Last active
March 13, 2025 05:09
-
-
Save pshriwise/f10effe745d8d939428c4950780055b6 to your computer and use it in GitHub Desktop.
A script for checking scaling of a DAGMC model.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import sys | |
from pathlib import Path | |
from matplotlib import pyplot as plt | |
import openmc | |
import numpy as np | |
def threaded_scaling(threads, model, openmc_exec='openmc', output_filename=None, particles_per_thread=10_000): | |
"""Run simulation with various threads and collect timing results | |
Parameters | |
---------- | |
threads : Iterable of int | |
The thread counts to run with | |
model : openmc.Model | |
The model to run | |
output_filename: str or pathlib.Path | |
Name of the output file to be written in CSV | |
""" | |
filename = 'scaling.csv' if output_filename is None else output_filename | |
print(f'Running with executable {openmc_exec}...') | |
print(f'Writing results to {filename}...') | |
output = open(filename, 'w') | |
header = f'threads, particles, inactive batches, total batches, time inactive (s), time active (s), time transport (s)\n' | |
output.write(header) | |
for thread_count in threads: | |
n_particles = thread_count * particles_per_thread | |
sp_file = model.run(threads=thread_count, openmc_exec=openmc_exec, particles=n_particles) | |
# collect timing | |
with openmc.StatePoint(sp_file) as sp: | |
inactive_time = sp.runtime['inactive batches'] | |
active_time = sp.runtime['active batches'] | |
transport_time = sp.runtime['transport'] | |
inactive = model.settings.inactive | |
batches = model.settings.batches | |
output_line = f'{thread_count}, {n_particles}, {inactive},' \ | |
f'{batches}, {inactive_time}, {active_time}, {transport_time}\n' | |
output.write(output_line) | |
output.flush() | |
# open the statepoint file and get timing | |
output.close() | |
def ax_plot(ax, filename, label=''): | |
data = np.loadtxt(filename, skiprows=1, delimiter=',').T | |
ax.set_xlabel('# threads') | |
ax.set_ylabel('particles/s') | |
ax.plot(data[0, :], data[1,:] * data[2,:] / data[4, :], label=label) | |
def fig_plot(title, filename, label): | |
fig, ax = plt.subplots() | |
fig.suptitle(title) | |
ax_plot(ax, filename, label) | |
plt.legend() | |
plt.show() | |
def run(model_path, executable, threads, particles_per_thread=10_000): | |
l_values = [1] | |
threads = [1, 5, 10, 15, 20, 40, 60, 80] | |
model_path = Path(model_path).resolve() | |
print(f'Running scaling test with model {model_path}...') | |
if not model_path.exists(): | |
raise FileNotFoundError(f'No such file {model_path}') | |
if model_path.is_dir(): | |
model = openmc.Model.from_xml() | |
else: | |
model = openmc.Model.from_model_xml(model_path) | |
model.settings.particles = 50_000 | |
model.settings.inactive = 10 | |
model.settings.batches = 11 | |
for l in l_values: | |
scaling_filename = f'scaling_l_{l}.csv' | |
print(f'Threaded scaling run l={l}...') | |
threaded_scaling(threads, model, executable, scaling_filename, particles_per_thread) | |
if __name__ == '__main__': | |
from argparse import ArgumentParser | |
parser = ArgumentParser() | |
parser.add_argument('-p', '--plot', type=str, help='Plot scaling results from file') | |
parser.add_argument('-r', '--run', action='store_true', help='Run scaling test') | |
parser.add_argument('-n', '--particles', type=int, default=10_000, help='Particles per thread') | |
parser.add_argument('-m', '--model', type=str, help='Path to model XML file') | |
parser.add_argument('-t', '--threads', type=int, nargs='+', default=[1, 5, 10, 15, 20], help='Thread counts to run') | |
parser.add_argument('-e', '--executable', type=str, default='openmc', help='Path to openmc executable') | |
args = parser.parse_args() | |
if args.run: | |
run(args.model, args.executable, args.threads, args.particles) | |
if args.plot: | |
fig_plot('', args.plot, '') |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment