Clone spack:
git clone https://github.com/spack/spack.git
#!/bin/bash
cmake_minimum_required(VERSION 3.18) | |
set(PROJECT_NAME test_scalar) | |
project(${PROJECT_NAME} LANGUAGES C CXX) | |
# Set C++20 standard | |
set(CMAKE_CXX_STANDARD 20) | |
set(CMAKE_CXX_STANDARD_REQUIRED ON) | |
set(CMAKE_CXX_EXTENSIONS OFF) |
from mpi4py import MPI | |
import basix | |
from dolfinx import fem | |
import dolfinx | |
import ufl | |
import numpy as np | |
from numba import cuda | |
# Copyright (C) 2023 Igor A. Baratta | |
# SPDX-License-Identifier: MIT | |
from functools import partial | |
import dolfinx | |
import numpy as np | |
import scipy.sparse | |
from dolfinx.fem import Function, FunctionSpace | |
from dolfinx.io import XDMFFile, gmshio |
cmake_minimum_required(VERSION 3.18) | |
set(PROJECT_NAME BMcublas) | |
project(${PROJECT_NAME}) | |
include(CheckLanguage) | |
check_language(CUDA CXX) | |
enable_language(CUDA CXX) | |
# Set C++ standard |
#include <oneapi/dpl/algorithm> | |
#include <oneapi/dpl/execution> | |
#include <CL/sycl.hpp> | |
#include <vector> | |
int main(int argc, char *argv[]) | |
{ | |
std::size_t n = 100'000'000; | |
int nrep = 10; |
#!/bin/bash | |
module purge | |
module load gcc/9 | |
module load intel/impi/2020.2/gnu | |
module load intel/mkl/2020.2 | |
module load cmake | |
module load slurm | |
source spack/share/spack/setup-env.sh |
from IPython import embed | |
from mpi4py import MPI | |
import dolfinx | |
import numpy | |
from pyvista.plotting.helpers import plot | |
import ufl | |
from dolfinx.io import XDMFFile | |
import dolfinx.plot | |
import pyvista | |
bc = 1 |
This blog post provides an overview of the work done for the Google Summer of Code 2019.
The main goal of this project is to add the KaHIP partitioner to DOLFIN's graph wrappers and mesh partitioning, and investigate whether the expected improvements obtained by KaHIP for mesh partitioning would reflect on the Dolfin's parallel toolchain. This is related to Issue #116.
A second goal of this project is to add support for partitioning using a subset of processors. As currently implemented in DOLFIN-X, the mesh partitioners use all the available MPI processes to perform partition. Numerical experiments show that at scale, this can demand high memory usage (partitioning packages depends on the number of processes). Also, the running time increases significantly with the number of processes for a fixed size local mesh/graph per-processor.This is related to Issue #9.
In the following
#!/bin/sh | |
# Shell Script to configure the CSD3 enviroments for running Dolfin | |
# This script assumes that dolfinx is already installed. | |
# Set SPACK root directories | |
SPACK_ROOT=/home/hpcbara1/src/spack | |
# Reset module enviroment and load only slurm and the compiler | |
module purge |