Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add modifier docs #190

Merged
merged 19 commits into from
Apr 24, 2024
Merged
Show file tree
Hide file tree
Changes from 18 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 62 additions & 27 deletions bin/benchpark
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def source_location():

def benchpark_list(subparsers, actions_dict):
list_parser = subparsers.add_parser(
"list", help="List available benchmarks and systems"
"list", help="List available experiments, systems, and modifiers"
)
list_parser.add_argument("sublist", nargs="?")
actions_dict["list"] = benchpark_list_handler
Expand All @@ -84,8 +84,7 @@ def benchpark_benchmarks():
benchmarks = []
experiments_dir = source_dir / "experiments"
for x in os.listdir(experiments_dir):
for y in os.listdir(experiments_dir / x):
benchmarks.append(f"{x}/{y}")
benchmarks.append(f"{x}")
return benchmarks


Expand All @@ -94,7 +93,8 @@ def benchpark_experiments():
experiments = []
experiments_dir = source_dir / "experiments"
for x in os.listdir(experiments_dir):
experiments.append(f"{x}")
for y in os.listdir(experiments_dir / x):
experiments.append(f"{x}/{y}")
return experiments


Expand All @@ -106,6 +106,14 @@ def benchpark_systems():
return systems


def benchpark_modifiers():
source_dir = source_location()
modifiers = []
for x in os.listdir(source_dir / "modifiers"):
modifiers.append(x)
return modifiers


def benchpark_get_tags():
f = source_location() / "tags.yaml"
tags = []
Expand All @@ -132,12 +140,14 @@ def benchpark_list_handler(args):
source_dir = source_location()
sublist = args.sublist
benchmarks = benchpark_benchmarks()
experiments = benchpark_experiments()
systems = benchpark_systems()
modifiers = benchpark_modifiers()

if sublist == None:
print("Benchmarks/ProgrammingModel:")
for benchmark in benchmarks:
print(f"\t{benchmark}")
print("Experiments:")
for experiment in experiments:
print(f"\t{experiment}")
print("Systems:")
for system in systems:
print(f"\t{system}")
Expand All @@ -147,21 +157,31 @@ def benchpark_list_handler(args):
for benchmark in benchmarks:
print(f"\t{benchmark}")
else:
if sublist == "systems":
print("Systems:")
for system in systems:
print(f"\t{system}")
if sublist == "experiments":
print("Experiments:")
for experiment in experiments:
print(f"\t{experiment}")
else:
raise ValueError(
f'Invalid benchpark list "{sublist}" - must choose [benchmarks], [systems], or leave empty'
)
if sublist == "systems":
print("Systems:")
for system in systems:
print(f"\t{system}")
else:
if sublist == "modifiers":
print("Modifiers:")
for modifier in modifiers:
print(f"\t{modifier}")
else:
raise ValueError(
f'Invalid benchpark list "{sublist}" - must choose [experiments], [systems], [modifiers] or leave empty'
)
pearce8 marked this conversation as resolved.
Show resolved Hide resolved


def benchpark_check_benchmark(arg_str):
benchmarks = benchpark_benchmarks()
found = arg_str in benchmarks
if not found:
out_str = f'Invalid benchmark/experiment "{arg_str}" - must choose one of: '
out_str = f'Invalid benchmark "{arg_str}" - must choose one of: '
for benchmark in benchmarks:
out_str += f"\n\t{benchmark}"
raise ValueError(out_str)
Expand All @@ -172,7 +192,7 @@ def benchpark_check_experiment(arg_str):
experiments = benchpark_experiments()
found = arg_str in experiments
if not found:
out_str = f'Invalid benchmark/experiment "{arg_str}" - must choose one of: '
out_str = f'Invalid experiment (benchmark/ProgrammingModel) "{arg_str}" - must choose one of: '
for experiment in experiments:
out_str += f"\n\t{experiment}"
raise ValueError(out_str)
Expand Down Expand Up @@ -201,13 +221,26 @@ def benchpark_check_tag(arg_str):
return found


def benchpark_check_modifier(arg_str):
modifiers = benchpark_modifiers()
found = arg_str in modifiers
if not found:
out_str = f'Invalid modifier "{arg_str}" - must choose one of: '
for modifier in modifiers:
out_str += f"\n\t{modifier}"
raise ValueError(out_str)
return found


def benchpark_setup(subparsers, actions_dict):
create_parser = subparsers.add_parser(
"setup", help="Set up an experiment and prepare it to build/run"
)

create_parser.add_argument(
"benchmark", type=str, help="The experiment (benchmark/ProgrammingModel) to run"
"experiment",
type=str,
help="The experiment (benchmark/ProgrammingModel) to run",
)
create_parser.add_argument(
"system", type=str, help="The system on which to run the experiment"
Expand Down Expand Up @@ -311,32 +344,34 @@ def benchpark_setup_handler(args):
experiments_root/
spack/
ramble/
<benchmark>/
<experiment>/
<system>/
workspace/
configs/
(everything from source/configs/<system>)
(everything from source/experiments/<benchmark>)
(everything from source/experiments/<experiment>)
"""

benchmark = args.benchmark
experiment = args.experiment
system = args.system
experiments_root = pathlib.Path(os.path.abspath(args.experiments_root))
modifier = args.modifier
source_dir = source_location()
debug_print(f"source_dir = {source_dir}")
debug_print(f"specified benchmark/ProgrammingModel = {benchmark}")
valid_benchmark = benchpark_check_benchmark(benchmark)
debug_print(f"specified experiment (benchmark/ProgrammingModel) = {experiment}")
valid_experiment = benchpark_check_experiment(experiment)
debug_print(f"specified system = {system}")
valid_system = benchpark_check_system(system)
if not (valid_benchmark and valid_system):
debug_print(f"specified modifier = {modifier}")
valid_modifier = benchpark_check_modifier(modifier)
if not (valid_experiment and valid_system and valid_modifier):
raise ValueError(
pearce8 marked this conversation as resolved.
Show resolved Hide resolved
"Invalid benchmark/experiment and system provided: {0} {1}".format(
benchmark, system
"Invalid experiment, system, or modifier provided: {0} {1} {2}".format(
experiment, system, modifier
)
)

workspace_dir = experiments_root / str(benchmark) / str(system)
workspace_dir = experiments_root / str(experiment) / str(system)

if workspace_dir.exists():
if workspace_dir.is_dir():
Expand All @@ -360,7 +395,7 @@ def benchpark_setup_handler(args):
print(f"Setting up configs for Ramble workspace {ramble_configs_dir}")

configs_src_dir = source_dir / "configs" / str(system)
experiment_src_dir = source_dir / "experiments" / benchmark
experiment_src_dir = source_dir / "experiments" / experiment
modifier_config_dir = source_dir / "modifiers" / modifier / "configs"
ramble_configs_dir.mkdir(parents=True)
ramble_logs_dir.mkdir(parents=True)
Expand Down
51 changes: 50 additions & 1 deletion docs/3-opt-edit-experiment.rst
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ Benchpark configuration files are organized as follows::
│ └── package.py
└── repo.yaml

You can edit any of them to change the behavior of your experiments.
You can edit these configuration files to change the behavior of your experiments.

System specification
--------------------
Expand All @@ -54,3 +54,52 @@ If you would like to modify a specification of your benchmark,
you can do so by upstreaming changes to Spack and/or Ramble,
or working on your benchmark specification in ``benchpark/repo/${BENCHMARK}``
(see :doc:`add-a-benchmark` for details).

Modifiers
---------
In Benchpark, a ``modifier`` follows the `Ramble Modifier
<https://googlecloudplatform.github.io/ramble/tutorials/10_using_modifiers.html#modifiers>`_
and is an abstract object that can be applied to a large set of reproducible
specifications. Modifiers are intended to encasulate reusable patterns that
perform a specific configuration of an experiment. This may include injecting
performance analysis or setting up system resources.

Applying the Caliper modifier
-----------------------------
We have implemented a Caliper modifier to enable profiling of Caliper-instrumented
benchmarks in Benchpark. More documentation on Caliper can be found `here
<https://software.llnl.gov/Caliper>`_.

To turn on profiling with Caliper, add ``--modifier=<caliper_modifier>`` to the Benchpark
setup step::

./benchpark setup benchmark/programmingmodel system --modifier=<caliper_modifier> <workspace-dir>

Valid values for ``<caliper_modifier>`` are found in the **Caliper Modifier**
column of the table below. Benchpark will link the experiment to Caliper,
and inject appropriate Caliper configuration at runtime. After the experiments
in the workspace have completed running, a ``.cali`` file
is created which contains the collected performance metrics.

.. list-table:: Available caliper modifiers
:widths: 20 20 50
:header-rows: 1

* - Caliper Modifier
- Where Applicable
- Metrics Collected
* - caliper
- Platform-independent
- | - Min time/rank: Minimum time (in seconds) across all ranks
| - Max time/rank: Maximum time (in seconds) across all ranks
| - Avg time/rank: Average time (in seconds) across all ranks
| - Total time: Aggregated time (in seconds) over all ranks
* - caliper-topdown
- x86 Intel CPUs
- | - Retiring
| - Bad speculation
| - Front end bound
| - Back end bound
* - caliper-cuda
- NVIDIA GPUs
- | - CUDA API functions (e.g., time.gpu)