Skip to content

Commit

Permalink
Add modifier docs (#190)
Browse files Browse the repository at this point in the history
* initial commit on caliper modifier

* add links

* updates

* remove extra line

* add following

* reconfigure, separate modifier and caliper modifier sections

- add table for different caliper modifiers

* multi-line list in table

* update table

* Update 3-opt-edit-experiment.rst

* Adding modifiers to the help menu

* List benchmarks and experiments

* Lint

* Flattening list options, removing redundant error output

---------

Co-authored-by: Stephanie Brink <[email protected]>
  • Loading branch information
pearce8 and slabasan authored Apr 24, 2024
1 parent 24cda7b commit 15769ec
Show file tree
Hide file tree
Showing 2 changed files with 109 additions and 35 deletions.
93 changes: 59 additions & 34 deletions bin/benchpark
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def source_location():

def benchpark_list(subparsers, actions_dict):
list_parser = subparsers.add_parser(
"list", help="List available benchmarks and systems"
"list", help="List available experiments, systems, and modifiers"
)
list_parser.add_argument("sublist", nargs="?")
actions_dict["list"] = benchpark_list_handler
Expand All @@ -84,8 +84,7 @@ def benchpark_benchmarks():
benchmarks = []
experiments_dir = source_dir / "experiments"
for x in os.listdir(experiments_dir):
for y in os.listdir(experiments_dir / x):
benchmarks.append(f"{x}/{y}")
benchmarks.append(f"{x}")
return benchmarks


Expand All @@ -94,7 +93,8 @@ def benchpark_experiments():
experiments = []
experiments_dir = source_dir / "experiments"
for x in os.listdir(experiments_dir):
experiments.append(f"{x}")
for y in os.listdir(experiments_dir / x):
experiments.append(f"{x}/{y}")
return experiments


Expand All @@ -106,6 +106,14 @@ def benchpark_systems():
return systems


def benchpark_modifiers():
source_dir = source_location()
modifiers = []
for x in os.listdir(source_dir / "modifiers"):
modifiers.append(x)
return modifiers


def benchpark_get_tags():
f = source_location() / "tags.yaml"
tags = []
Expand All @@ -132,36 +140,44 @@ def benchpark_list_handler(args):
source_dir = source_location()
sublist = args.sublist
benchmarks = benchpark_benchmarks()
experiments = benchpark_experiments()
systems = benchpark_systems()
modifiers = benchpark_modifiers()

if sublist == None:
print("Benchmarks/ProgrammingModel:")
print("Experiments:")
for experiment in experiments:
print(f"\t{experiment}")
print("Systems:")
for system in systems:
print(f"\t{system}")
elif sublist == "benchmarks":
print("Benchmarks:")
for benchmark in benchmarks:
print(f"\t{benchmark}")
elif sublist == "experiments":
print("Experiments:")
for experiment in experiments:
print(f"\t{experiment}")
elif sublist == "systems":
print("Systems:")
for system in systems:
print(f"\t{system}")
elif sublist == "modifiers":
print("Modifiers:")
for modifier in modifiers:
print(f"\t{modifier}")
else:
if sublist == "benchmarks":
print("Benchmarks:")
for benchmark in benchmarks:
print(f"\t{benchmark}")
else:
if sublist == "systems":
print("Systems:")
for system in systems:
print(f"\t{system}")
else:
raise ValueError(
f'Invalid benchpark list "{sublist}" - must choose [benchmarks], [systems], or leave empty'
)
raise ValueError(
f'Invalid benchpark list "{sublist}" - must choose [experiments], [systems], [modifiers] or leave empty'
)


def benchpark_check_benchmark(arg_str):
benchmarks = benchpark_benchmarks()
found = arg_str in benchmarks
if not found:
out_str = f'Invalid benchmark/experiment "{arg_str}" - must choose one of: '
out_str = f'Invalid benchmark "{arg_str}" - must choose one of: '
for benchmark in benchmarks:
out_str += f"\n\t{benchmark}"
raise ValueError(out_str)
Expand All @@ -172,7 +188,7 @@ def benchpark_check_experiment(arg_str):
experiments = benchpark_experiments()
found = arg_str in experiments
if not found:
out_str = f'Invalid benchmark/experiment "{arg_str}" - must choose one of: '
out_str = f'Invalid experiment (benchmark/ProgrammingModel) "{arg_str}" - must choose one of: '
for experiment in experiments:
out_str += f"\n\t{experiment}"
raise ValueError(out_str)
Expand Down Expand Up @@ -201,13 +217,26 @@ def benchpark_check_tag(arg_str):
return found


def benchpark_check_modifier(arg_str):
modifiers = benchpark_modifiers()
found = arg_str in modifiers
if not found:
out_str = f'Invalid modifier "{arg_str}" - must choose one of: '
for modifier in modifiers:
out_str += f"\n\t{modifier}"
raise ValueError(out_str)
return found


def benchpark_setup(subparsers, actions_dict):
create_parser = subparsers.add_parser(
"setup", help="Set up an experiment and prepare it to build/run"
)

create_parser.add_argument(
"benchmark", type=str, help="The experiment (benchmark/ProgrammingModel) to run"
"experiment",
type=str,
help="The experiment (benchmark/ProgrammingModel) to run",
)
create_parser.add_argument(
"system", type=str, help="The system on which to run the experiment"
Expand Down Expand Up @@ -311,32 +340,28 @@ def benchpark_setup_handler(args):
experiments_root/
spack/
ramble/
<benchmark>/
<experiment>/
<system>/
workspace/
configs/
(everything from source/configs/<system>)
(everything from source/experiments/<benchmark>)
(everything from source/experiments/<experiment>)
"""

benchmark = args.benchmark
experiment = args.experiment
system = args.system
experiments_root = pathlib.Path(os.path.abspath(args.experiments_root))
modifier = args.modifier
source_dir = source_location()
debug_print(f"source_dir = {source_dir}")
debug_print(f"specified benchmark/ProgrammingModel = {benchmark}")
valid_benchmark = benchpark_check_benchmark(benchmark)
debug_print(f"specified experiment (benchmark/ProgrammingModel) = {experiment}")
valid_experiment = benchpark_check_experiment(experiment)
debug_print(f"specified system = {system}")
valid_system = benchpark_check_system(system)
if not (valid_benchmark and valid_system):
raise ValueError(
"Invalid benchmark/experiment and system provided: {0} {1}".format(
benchmark, system
)
)
debug_print(f"specified modifier = {modifier}")
valid_modifier = benchpark_check_modifier(modifier)

workspace_dir = experiments_root / str(benchmark) / str(system)
workspace_dir = experiments_root / str(experiment) / str(system)

if workspace_dir.exists():
if workspace_dir.is_dir():
Expand All @@ -360,7 +385,7 @@ def benchpark_setup_handler(args):
print(f"Setting up configs for Ramble workspace {ramble_configs_dir}")

configs_src_dir = source_dir / "configs" / str(system)
experiment_src_dir = source_dir / "experiments" / benchmark
experiment_src_dir = source_dir / "experiments" / experiment
modifier_config_dir = source_dir / "modifiers" / modifier / "configs"
ramble_configs_dir.mkdir(parents=True)
ramble_logs_dir.mkdir(parents=True)
Expand Down
51 changes: 50 additions & 1 deletion docs/3-opt-edit-experiment.rst
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ Benchpark configuration files are organized as follows::
│ └── package.py
└── repo.yaml

You can edit any of them to change the behavior of your experiments.
You can edit these configuration files to change the behavior of your experiments.

System specification
--------------------
Expand All @@ -54,3 +54,52 @@ If you would like to modify a specification of your benchmark,
you can do so by upstreaming changes to Spack and/or Ramble,
or working on your benchmark specification in ``benchpark/repo/${BENCHMARK}``
(see :doc:`add-a-benchmark` for details).

Modifiers
---------
In Benchpark, a ``modifier`` follows the `Ramble Modifier
<https://googlecloudplatform.github.io/ramble/tutorials/10_using_modifiers.html#modifiers>`_
and is an abstract object that can be applied to a large set of reproducible
specifications. Modifiers are intended to encasulate reusable patterns that
perform a specific configuration of an experiment. This may include injecting
performance analysis or setting up system resources.

Applying the Caliper modifier
-----------------------------
We have implemented a Caliper modifier to enable profiling of Caliper-instrumented
benchmarks in Benchpark. More documentation on Caliper can be found `here
<https://software.llnl.gov/Caliper>`_.

To turn on profiling with Caliper, add ``--modifier=<caliper_modifier>`` to the Benchpark
setup step::

./benchpark setup benchmark/programmingmodel system --modifier=<caliper_modifier> <workspace-dir>

Valid values for ``<caliper_modifier>`` are found in the **Caliper Modifier**
column of the table below. Benchpark will link the experiment to Caliper,
and inject appropriate Caliper configuration at runtime. After the experiments
in the workspace have completed running, a ``.cali`` file
is created which contains the collected performance metrics.

.. list-table:: Available caliper modifiers
:widths: 20 20 50
:header-rows: 1

* - Caliper Modifier
- Where Applicable
- Metrics Collected
* - caliper
- Platform-independent
- | - Min time/rank: Minimum time (in seconds) across all ranks
| - Max time/rank: Maximum time (in seconds) across all ranks
| - Avg time/rank: Average time (in seconds) across all ranks
| - Total time: Aggregated time (in seconds) over all ranks
* - caliper-topdown
- x86 Intel CPUs
- | - Retiring
| - Bad speculation
| - Front end bound
| - Back end bound
* - caliper-cuda
- NVIDIA GPUs
- | - CUDA API functions (e.g., time.gpu)

0 comments on commit 15769ec

Please sign in to comment.