-
Notifications
You must be signed in to change notification settings - Fork 32
/
Copy pathrun
executable file
·1837 lines (1621 loc) · 56.1 KB
/
run
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Error out if a command fails or a variable is not defined
set -eu
#===============================================================================
# Default values for environment variables.
#===============================================================================
init_globals() {
# Paths
if [[ "${BASH_SOURCE[0]}" != "${0}" ]]; then
CALLER=$(readlink -f "$0")
fi
export RUN_SCRIPT_FILE="$(readlink -f "${BASH_SOURCE[0]}")"
export TOP=$(dirname "${RUN_SCRIPT_FILE}")
# Executables
export HOLOSCAN_PY_EXE=${HOLOSCAN_PY_EXE:-"python3"}
export HOLOSCAN_DOCKER_EXE=${HOLOSCAN_DOCKER_EXE:-"docker"}
# Options
export DO_DRY_RUN="false" # print commands but do not execute them. Used by run_command
export DO_STANDALONE="false" # do not run prerequisite functions
export NO_CACHE="" # by default, use cache
# Define default img and dir names
export SDK_BUILD_IMG="holoscan-sdk-build"
export SDK_RUN_IMG="holoscan-sdk-run"
export SDK_BUILD_DIR="build"
export SDK_INSTALL_DIR="install"
# Define constants for docs
export DOCS_BASE_IMG="holoscan-docs-deps"
export DOCS_HTML_IMG="holoscan-docs-html-builder"
export DOCS_PDF_IMG="holoscan-docs-pdf-builder"
export DOCS_SRC_DIR="docs"
export LIVE_HTML_IP="0.0.0.0"
export LIVE_HTML_PORT="8888"
# Use buildkit for docker build
export DOCKER_BUILDKIT=1
# Only enabled TTY if supported
[ -t 1 ] && export USE_TTY="--tty" || export USE_TTY=""
}
#===============================================================================
# Utility functions
#===============================================================================
# Build Type ===================================================================
get_buildtype_str() {
local build_type="${1:-}"
local build_type_str
case "${build_type}" in
debug|Debug)
build_type_str="Debug"
;;
release|Release)
build_type_str="Release"
;;
rel-debug|RelWithDebInfo)
build_type_str="RelWithDebInfo"
;;
*)
build_type_str="${CMAKE_BUILD_TYPE:-Release}"
;;
esac
echo -n "${build_type_str}"
}
# Boolean ======================================================================
get_boolean() {
local bool_in="${1:-}"
local bool_out
case "${bool_in}" in
true|yes|1)
bool_out=true
;;
false|no|0)
bool_out=false
;;
*)
return 1
esac
echo -n "${bool_out}"
}
# Archs & GPUs =================================================================
checkif_x86_64() {
if [ $(get_host_arch) == "x86_64" ]; then
return 0
else
return 1
fi
}
checkif_aarch64() {
if [ $(get_host_arch) == "aarch64" ]; then
return 0
else
return 1
fi
}
get_host_arch() {
echo -n "$(uname -m)"
}
is_cross_compiling() {
[ "$ARCH" != $(get_host_arch) ]
}
get_gnu_arch_str() {
local arch="${1:-}"
local arch_str
case "${arch}" in
amd64|x86_64|x86|linux/amd64)
arch_str="x86_64"
;;
arm64|aarch64|arm|linux/arm64)
arch_str="aarch64"
;;
*)
return 1
esac
echo -n "${arch_str}"
}
get_docker_arch_str() {
local arch="${1:-}"
local arch_str
case "${arch}" in
amd64|x86_64|x86|linux/amd64)
arch_str="amd64"
;;
arm64|aarch64|arm|linux/arm64)
arch_str="arm64"
;;
*)
return 1
esac
echo -n "${arch_str}"
}
get_platform_str() {
echo -n "linux/$(get_docker_arch_str $ARCH)"
}
get_gpu_str() {
local gpu="${1:-}"
local gpu_str=$gpu
case "${gpu}" in
igpu|iGPU|integrated|Jetson)
gpu_str="igpu"
;;
dgpu|dGPU|discrete|RTX)
gpu_str="dgpu"
;;
esac
echo -n "${gpu_str}"
}
get_host_gpu() {
if ! command -v nvidia-smi >/dev/null; then
c_echo_err Y "Could not find any GPU drivers on host. Defaulting build to target dGPU/CPU stack."
echo -n "dgpu"
elif nvidia-smi 2>/dev/null | grep nvgpu -q; then
echo -n "igpu"
else
echo -n "dgpu"
fi
}
is_cross_gpu() {
[ "$GPU" != $(get_host_gpu) ]
}
get_arch+gpu_str() {
local suffix=""
if [ "$ARCH" = "aarch64" ]; then
suffix="$ARCH-$GPU"
else
suffix="$ARCH"
fi
echo -n "$suffix"
}
get_build_img_name() {
echo -n "${SDK_BUILD_IMG}-$(get_arch+gpu_str)"
}
get_run_img_name() {
echo -n "${SDK_RUN_IMG}-$(get_arch+gpu_str)"
}
get_build_dir() {
echo -n "${SDK_BUILD_DIR}-$(get_arch+gpu_str)"
}
get_install_dir() {
echo -n "${SDK_INSTALL_DIR}-$(get_arch+gpu_str)"
}
# CUDA =========================================================================
get_cuda_archs() {
local cuda_archs="${1:-}"
# match https://cmake.org/cmake/help/latest/prop_tgt/CUDA_ARCHITECTURES.html
case "${cuda_archs}" in
native|NATIVE)
cuda_archs_str="native"
;;
all|ALL)
cuda_archs_str="all"
;;
all-major|ALL-MAJOR|major|MAJOR)
cuda_archs_str="all-major"
;;
*)
cuda_archs_str="${1:-}"
;;
esac
echo -n "${cuda_archs_str}"
}
# ID ===========================================================================
get_group_id() {
local group=$1
cat /etc/group | grep $group | cut -d: -f3
}
# Version ======================================================================
get_git_sha() {
echo -n "$(git rev-parse --short=9 HEAD)"
}
get_git_branch() {
# Can't use "/" in tag, convert to "-"
echo -n "$(git branch --show-current | sed 's|/|_|g')"
}
get_git_tag() {
echo -n "$(git tag --points-at HEAD)"
}
get_version() {
echo -n "$(cat $TOP/VERSION)"
}
get_image_tag_flags() {
local img_name=$1
local tags=("latest" $(get_version) $(get_git_branch) $(get_git_tag) $(get_git_sha))
local flags=""
for tag in "${tags[@]}"; do
flags+="-t ${img_name}:${tag} "
done
echo -n "$flags"
}
#===============================================================================
# Section: CLI
#===============================================================================
install_python_dev_deps() {
if [ -n "${VIRTUAL_ENV-}" ] || [ -n "${CONDA_PREFIX-}" ]; then
run_command ${HOLOSCAN_PY_EXE} -m pip install -q -r ${TOP}/python/requirements.txt
run_command ${HOLOSCAN_PY_EXE} -m pip install -q -r ${TOP}/python/requirements.dev.txt
else
c_echo_err R "You must be in a virtual environment to install dependencies."
if [ ! -e "$TOP/.venv/dev/bin/python3" ]; then
c_echo_err W "Installing a virtual environment at " G "$TOP/.venv/dev" W " ..."
run_command ${HOLOSCAN_PY_EXE} -m venv "$TOP/.venv/dev"
fi
c_echo_err W "Please activate the virtual environment at " G "$TOP/.venv/dev" W " and execute this command again."
c_echo_err
c_echo_err G " source $TOP/.venv/dev/bin/activate"
c_echo_err G " $0 $@"
exit 1
fi
}
setup_cli_dev_desc() { c_echo 'Setup development environment for Holoscan CLI
'
}
setup_cli_dev() {
c_echo W "Setup Holoscan CLI development environment..."
if [ -f "/.dockerenv" ]; then
c_echo_err "WARNING: devcontainer is not supported for CLI development"
exit
fi
install_python_dev_deps
local build_path=$TOP/$(get_build_dir)/python/lib/holoscan
if [ -d "${build_path}" ]; then
c_echo "Found build directory: $build_path"
else
c_echo_err "Build directory ${build_path} not found, please run ./run build first"
exit
fi
DEST=$TOP/python/holoscan
c_echo "Removing existing symlinks from $DEST/**/*.so"
find $DEST -name *.so -delete
c_echo "Creating symlinks from $build_path/* to $DEST"
cp -as $build_path/* $DEST &> /dev/null
}
#===============================================================================
# Section: Build
#===============================================================================
clear_cache_desc() { c_echo 'Clear cache folders (including build/install folders)
'
}
clear_cache() {
c_echo W "Clearing cache..."
run_command rm -rf ${TOP}/build
run_command rm -rf ${TOP}/install
run_command rm -rf ${TOP}/build-*
run_command rm -rf ${TOP}/install-*
run_command rm -rf ${TOP}/src/core/services/generated/*
run_command rm -rf ${TOP}/.cache/ccache
run_command rm -rf ${TOP}/.cache/cpm
run_command rm -rf ${TOP}/.cache/gxf
}
check_system_deps_desc() { c_echo 'Check system dependencies
Ensure that the system has all adequate prerequisites to be
able to build the Holoscan SDK.
'
}
check_system_deps() {
c_echo W "Setup development environment..."
if ! command -v ${HOLOSCAN_DOCKER_EXE} > /dev/null; then
fatal G "${HOLOSCAN_DOCKER_EXE}" W " doesn't exists. Please install Docker:
https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository"
fi
if ! $(${HOLOSCAN_DOCKER_EXE} buildx version &> /dev/null) ; then
fatal G "${HOLOSCAN_DOCKER_EXE} buildx plugin" W " is missing. Please install " G "docker-buildx-plugin" W ":
https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository"
fi
if ! groups | grep -e docker -e root -q; then
c_echo_err G "groups" W " doesn't contain 'docker' group. Please add 'docker' group to your user."
fatal G "groups" W " doesn't contain 'docker' group. Please add 'docker' group to your user." B '
# Create the docker group.
sudo groupadd docker
# Add your user to the docker group.
sudo usermod -aG docker $USER
newgrp docker
docker run hello-world'
fi
if checkif_x86_64 && is_cross_compiling && [ ! -f /proc/sys/fs/binfmt_misc/qemu-aarch64 ]; then
fatal G "qemu-aarch64" W " doesn't exists. Please install qemu with binfmt-support to run Docker container with aarch64 platform" B '
# Install the qemu packages
sudo apt-get install qemu binfmt-support qemu-user-static
# Execute the registering scripts
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes'
fi
# Check NVIDIA CTK version
min_ctk_version="1.12.0"
recommended_ctk_version="1.14.1"
if ! command -v nvidia-ctk > /dev/null; then
fatal G "nvidia-ctk" W " not found. Please install the NVIDIA Container Toolkit."
fi
ctk_version_output=$(nvidia-ctk --version | grep version)
ctk_version_pattern="([0-9]+\.[0-9]+\.[0-9]+)"
if [[ "$ctk_version_output" =~ $ctk_version_pattern ]]; then
ctk_version="${BASH_REMATCH[1]}"
if [[ "$(echo -e "$ctk_version\n$min_ctk_version" | sort -V | head -n1)" == "$ctk_version" ]]; then
fatal "Found nvidia-ctk Version $ctk_version. Version $min_ctk_version+ is required ($recommended_ctk_version+ recommended)."
fi
else
c_echo_err R "Could not extract nvidia-ctk version number."
c_echo_err " Version $min_ctk_version+ required."
c_echo_err " Version $recommended_ctk_version+ recommended."
fi
}
build_image_desc() { c_echo 'Build the image where we can build the SDK
This will generate an image will the following tags:
- `latest`
- from VERSION file
- from git branch name
- from git tag, if any
- from git commit sha - used in the build and launch methods
'
}
build_image() {
# Prerequisite steps
if [ "$DO_STANDALONE" != "true" ]; then
check_system_deps
fi
# Error if requesting iGPU and x86_64 when cross-compiling
if [ "$ARCH" = "x86_64" ] && [ "$GPU" = "igpu" ]; then
fatal R "Can't combine 'x86_64' arch with 'igpu' GPU type"
fi
# Docker tags (-t name:versions)
local docker_tags=$(get_image_tag_flags $(get_build_img_name))
# Docker build
local extra_args="$@"
run_command $HOLOSCAN_DOCKER_EXE build \
--build-arg BUILDKIT_INLINE_CACHE=1 \
--build-arg GPU_TYPE=${GPU} \
--platform $(get_platform_str) \
--network=host \
${NO_CACHE} \
${docker_tags} \
${extra_args} \
${TOP}
}
build_desc() { c_echo 'Build the SDK with CMake
This command will build the SDK with optional configuration arguments.
Arguments:
--build_libtorch [true | false] : Build the SDK with libtorch support
Default: true
Associated environment variable: HOLOSCAN_BUILD_LIBTORCH
--cudaarchs [native | all | <custom_arch_list>]
Default: "native" when building for host architecture, "all" otherwise
Associated environment variable: CMAKE_CUDA_ARCHITECTURES
--type [debug | release | rel-debug] : Specify the type of build
Default: release
Associated environment variable: CMAKE_BUILD_TYPE
--parallel, -j <njobs> : Specify the maximum number of concurrent processes to be used when building
Default: maximum (number of CPUs, using 'nproc')
Associated environment variable: CMAKE_BUILD_PARALLEL_LEVEL
--buildpath, -d <build_directory> : Change the build path.
Default: build-<arch>[-<gpu>]
Associated environment variable: CMAKE_BUILD_PATH
--installprefix, -i <install_directory> : Specify the install directory
Default: install-<arch>[-<gpu>]
Associated environment variable: CMAKE_INSTALL_PREFIX
--reconfigure, -f: Force reconfiguration of the CMake project. By default CMake is only run if
a CMakeCache.txt does not exist in the build directory or if CMake detects a reconfigure
is needed.
Default: false
--tidy [true | false] : Build the SDK with clang-tidy (will be slower)
Default: false
Associated environment variable: HOLOSCAN_ENABLE_CLANG_TIDY
'
}
build() {
# Prerequisite steps
if [ "$DO_STANDALONE" != "true" ]; then
# Adjust final stage for igpu to support nvdla_compiler
local stage="build"
if [ "$GPU" = "igpu" ]; then
stage="build-igpu"
fi
build_image --target $stage
fi
# Default cuda architecture
local default_cuda_archs="native"
if is_cross_compiling || is_cross_gpu; then
default_cuda_archs="all"
fi
# Parse env variables first or set default values
local cuda_archs=$(get_cuda_archs "${CMAKE_CUDA_ARCHITECTURES:-$default_cuda_archs}")
local build_type=$(get_buildtype_str "${CMAKE_BUILD_TYPE:-release}")
local build_njobs="${CMAKE_BUILD_PARALLEL_LEVEL:-$(nproc)}"
local build_path="${CMAKE_BUILD_PATH:-$(get_build_dir)}"
local install_prefix="${CMAKE_INSTALL_PREFIX:-$(get_install_dir)}"
local reconfigure=false
local build_libtorch="${HOLOSCAN_BUILD_LIBTORCH:-'ON'}"
local enable_clang_tidy="${HOLOSCAN_ENABLE_CLANG_TIDY:-'OFF'}"
local config_args=""
# Parse args
local extra_args=""
while [[ $# -gt 0 ]]; do
case $1 in
--build_libtorch)
build_libtorch_val=$(get_boolean "$2")
if [ "$build_libtorch_val" == "false" ]; then
build_libtorch='OFF'
fi
reconfigure=true
shift
shift
;;
--tidy)
tidy_val=$(get_boolean "$2")
if [ "$tidy_val" == "true" ]; then
enable_clang_tidy='ON'
fi
reconfigure=true
shift
shift
;;
--cudaarchs)
cuda_archs=$(get_cuda_archs "$2")
reconfigure=true
shift
shift
;;
--type)
build_type=$(get_buildtype_str "$2")
reconfigure=true
shift
shift
;;
--parallel|-j)
build_njobs="$2"
shift
shift
;;
--buildpath|-d)
build_path="$2"
shift
shift
;;
--installprefix|-i)
install_prefix="$2"
shift
shift
;;
--reconfigure|-f)
reconfigure=true
shift
;;
--config | -c)
config_args+=("$2")
reconfigure=true
shift
shift
;;
*)
extra_args+=("$1")
shift
;;
esac
done
# Error if requesting native cuda arch explicitly when cross-compiling
if [ "$cuda_archs" = "native" ] && (is_cross_compiling || is_cross_gpu); then
fatal Y "Cross-compiling " W "(host: $(get_host_arch), target: $ARCH)" R " does not support 'native' cuda architecture."
fi
# Native means we need the container to access the GPU for CMake to choose the architecture
# to use for nvcc.
if [ "$cuda_archs" = "native" ]; then
runtime=nvidia
else
runtime=runc
fi
# DOCKER PARAMETERS
#
# --rm
# Deletes the container after the command runs
#
# --net=host
# Provide access to the host network
#
# --interactive (-i)
# Making the container interactive allows cancelling the build
#
# ${USE_TTY}
# Set --tty (-t) when TTY is possible only
#
# --entrypoint=bash
# Override the default entrypoint to hide the regular message from the base image
#
# --runtime ${runtime}
# Docker runtime. Should ideally be runc for build (no need for drivers).
#
# --platform $(get_platform_str)
# Platform to build
#
# -u $(id -u):$(id -g)
# Ensures the generated files (build, install...) are owned by $USER and not root
#
# -v ${TOP}:/workspace/holoscan-sdk
# Mount the source directory
#
# -w /workspace/holoscan-sdk
# Start in the source directory
#
#
# CMAKE PARAMETERS
#
# -S . -B ${build_path}
# Generic configuration
#
# -D CMAKE_BUILD_TYPE=${build_type}
# Define the build type (release, debug...). Can be passed as env to docker also.
#
# -D CMAKE_CUDA_ARCHITECTURES=${cuda_archs}
# Define the cuda architectures to build for (native, all, all-major, custom). Apart from
# native, the last arch will be used for real and virtual architectures (PTX, forward
# compatible) while the previous archs will be real only.
#
# CMAKE BUILD COMMAND
#
# cmake --build ${build_path} -j ${build_njobs}
# Builds the SDK using the build path at ${build_path} with number of concurrent
# jobs given by -j ${build_njobs}
#
img="$(get_build_img_name):$(get_git_sha)"
config_args="${config_args[@]}"
run_command $HOLOSCAN_DOCKER_EXE run \
--rm \
--net=host \
--interactive \
${USE_TTY} \
--entrypoint=bash \
--runtime=${runtime} \
--platform $(get_platform_str) \
-u $(id -u):$(id -g) \
-v ${TOP}:/workspace/holoscan-sdk \
-w /workspace/holoscan-sdk \
${extra_args[@]} \
$img \
-c "set -e
if [ ! -f '${build_path}/build.ninja' ] || ${reconfigure} ; then
cmake -S . -B ${build_path} -G Ninja \
-D HOLOSCAN_BUILD_LIBTORCH=${build_libtorch} \
-D HOLOSCAN_ENABLE_CLANG_TIDY=${enable_clang_tidy} \
-D CMAKE_CUDA_ARCHITECTURES='${cuda_archs}' \
-D CMAKE_BUILD_TYPE=${build_type} \
${config_args}
fi
cmake --build ${build_path} -j ${build_njobs}
cmake --install ${build_path} --prefix ${install_prefix} --component holoscan
cmake --install ${build_path} --prefix ${install_prefix} --component holoscan-core
cmake --install ${build_path} --prefix ${install_prefix} --component holoscan-gxf_extensions
cmake --install ${build_path} --prefix ${install_prefix} --component holoscan-examples
cmake --install ${build_path} --prefix ${install_prefix} --component holoscan-gxf_libs
cmake --install ${build_path} --prefix ${install_prefix} --component holoscan-gxf_bins
cmake --install ${build_path} --prefix ${install_prefix} --component holoscan-modules
cmake --install ${build_path} --prefix ${install_prefix} --component holoscan-dependencies
cmake --install ${build_path} --prefix ${install_prefix} --component holoscan-python_libs
"
}
build_run_image_desc() { c_echo 'Build the runtime image
Build a lightweight docker image meant for running applications only.
Arguments:
--cpp : Only build with C++ dependencies
--cpp-no-mkl : (x86_64 only) Only build with C++ dependencies apart from MKL (torch dependency)
'
}
build_run_image() {
# Prerequisite steps
if [ "$DO_STANDALONE" != "true" ]; then
build $@
fi
# Parse args
local stage="runtime_cpp_py"
local extra_args=""
while [[ $# -gt 0 ]]; do
case $1 in
--cpp)
stage="runtime_cpp"
shift
;;
--cpp-no-mkl)
stage="runtime_cpp_no_mkl"
shift
;;
*)
extra_args+=("$1")
shift
;;
esac
done
# Current build image to copy from some stages
local current_build_img="$(get_build_img_name):$(get_git_sha)"
# Run image tags (-t name:versions)
local docker_tags=$(get_image_tag_flags $(get_run_img_name))
# Build the runtime image
run_command $HOLOSCAN_DOCKER_EXE build \
--build-arg BUILDKIT_INLINE_CACHE=1 \
--build-arg BUILD_IMAGE=${current_build_img} \
--build-arg HOST_INSTALL_DIR=$(get_install_dir) \
--build-arg GPU_TYPE=${GPU} \
--platform $(get_platform_str) \
--network=host \
--target ${stage} \
${NO_CACHE} \
${docker_tags} \
${extra_args[@]} \
-f ${TOP}/runtime_docker/Dockerfile \
${TOP}
}
#===============================================================================
# Section: Test
#===============================================================================
install_lint_deps_desc() { c_echo 'Install lint dependencies
This command will install the dependencies required to run the linting tools:
'
cat ${TOP}/python/requirements.lint.txt
echo
}
install_lint_deps() {
c_echo W "Install Lint Dependencies"
run_command ${HOLOSCAN_PY_EXE} -m pip install -r ${TOP}/python/requirements.lint.txt
}
lint_desc() { c_echo 'Lint the repository
Python linting: ruff
C++ linting: cpplint
CMake linting: cmakelint
Spelling: codespell
Arguments:
$@ - directories to lint (default: .)
'
}
lint() {
local DIR_TO_RUN=${@:-"."}
# We use $(command) || exit_code=1 to run all linting tools, and exit
# with failure after all commands were executed if any of them failed
local exit_code=0
pushd ${TOP} > /dev/null
c_echo W "Linting Python"
run_command ruff --version
run_command ruff check $DIR_TO_RUN || exit_code=1
# Set --diff so formatting suggestions are not automatically applied, but the suggestions
# will be printed to screen.
run_command ruff format --diff $DIR_TO_RUN || exit_code=1
c_echo W "Linting C++"
# We use `grep -v` to hide verbose output that drowns actual errors
# Since we care about the success/failure of cpplint and not of grep, we:
# 1. use `set -o pipefail` to fail if `cpplint` fails
# 2. use `grep -v ... || true` to ignore whether grep hid any output
run_command set -o pipefail; ${HOLOSCAN_PY_EXE} -m cpplint \
--exclude .cache \
--exclude build \
--exclude install \
--exclude build-\* \
--exclude install-\* \
--exclude src/core/services/generated \
--exclude modules/holoviz/thirdparty/nvpro_core \
--recursive $DIR_TO_RUN \
| { grep -v "Ignoring\|Done processing" || true; } || exit_code=1
c_echo W "Code spelling"
# Supports inline comment to ignore code spell check for a line, add `// codespell-ignore` or
# `# codespell-ignore` at the end of the line
run_command codespell --version
run_command codespell $DIR_TO_RUN --skip="*.onnx,NOTICE.txt,*.toml,./docs/vale/styles/write-good" \
--ignore-regex=".*codespell-ignore$" || exit_code=1
c_echo W "Linting CMake"
run_command cmakelint --filter=-whitespace/indent,-linelength,-readability/wonkycase,-convention/filename,-package/stdargs \
$(find $DIR_TO_RUN '(' -name CMakeLists.txt -o -name *.cmake ')' -not -path "*build-*/*" -not -path "*build/*" \
-not -path "*./.*" -not -path "*install/*" -not -path "*install-*/*") || exit_code=1
popd > /dev/null
exit $exit_code
}
test_desc() { c_echo 'Execute test cases with CTest
This command will run the tests using CTest. It will run headless
if no DISPLAY is found.
Arguments:
--name, -n <regex> Name of test(s) to execute (regular expression)
Default: all tests
--verbose, -v Print the test outputs
--timeout, -t <seconds> CTest timeout in seconds
Default: 480
--options, -o <opts> Extra flags to pass to CTest
Default: None
'
}
test() {
local test_regex_flag=""
local verbose=""
local timeout=480
local options=""
# Parse CLI arguments next
local extra_args=""
while [[ $# -gt 0 ]]; do
case $1 in
--name|-n)
local test_regex="$2"
local test_regex_uppercase="${test_regex^^}"
test_regex_flag="-R '$test_regex|$test_regex_uppercase'"
shift
shift
;;
--verbose|-v)
verbose="--verbose"
shift
;;
--timeout|-t)
timeout="$2"
shift
shift
;;
--options|-o)
options="$2"
shift
shift
;;
*)
extra_args+=("$1")
shift
;;
esac
done
# --init
# Use tini as entrypoint to forward proper signals to xvfb (below)
#
# --cap-drop=NET_BIND_SERVICE
# needed for SYSTEM_TEST that depends on privileged ports being unbindable
#
# $run_headless (xvfb-run -a)
# creates a display buffer for Holoviz use, the `-a` flag automatically finds
# an open display port, and sets the DISPLAY env var. This is installed inside
# the container at runtime so it isn't installed in the public container.
# Additionally, `xvfb-run -a docker run ...` is not used, as no solution was found
# that could reliably set the `-e DISPLAY` parameter using this approach.
#
local run_headless=$([ -z ${DISPLAY-} ] && echo "xvfb-run -a")
launch $(get_build_dir) \
--init \
--cap-drop=NET_BIND_SERVICE \
${extra_args[@]} \
--run-cmd "$run_headless ctest . $test_regex_flag --timeout $timeout --output-on-failure $verbose $options"
}
#===============================================================================
# Section: Launch
#===============================================================================
launch_desc() { c_echo 'Launch the Holoscan build container
Note: any extraneous arguments not listed below will be added to `docker run`
Arguments:
$1 - Working directory (e.g, "install" => "/workspace/holoscan-sdk/install")
Default: build-<arch>[-<gpu>]
--mount-point - Specifies the mount point (default is the directory of this script)
--run-cmd - Specifies a command to run in the container instead of running interactively.
This is the equivalent of what you would put after `bash -c` with `docker run`.
--ssh-x11 : Enable X11 forwarding of graphical HoloHub applications over SSH
'
}
launch() {
# Prerequisite steps
if [ "$DO_STANDALONE" != "true" ]; then
build_image --target build
fi
local container_mount="/workspace/holoscan-sdk"
local mount_point="${TOP}"
local mount_device_opt=""
local extra_args=""
local run_cmd="bash"
local ssh_x11=0
# Skip the first argument to pass the remaining arguments to the docker command.
local working_dir=${1:-$(get_build_dir)}
if [ -n "${1-}" ]; then
shift
fi
# Parse CLI arguments next
while [[ $# -gt 0 ]]; do
case $1 in
--mount-point)
mount_point="$2"
shift
shift
;;
--run-cmd)
run_cmd="$2"
shift
shift
;;
--ssh-x11)
ssh_x11=1
shift
;;
*)
extra_args+=("$1")
shift
;;
esac
done
local host_mount_to_top=$(realpath --relative-to="$mount_point" "$TOP")
local container_top="${container_mount}/${host_mount_to_top}"
# Mount V4L2 device nodes
for video_dev in $(find /dev -regex '/dev/video[0-9]+'); do
mount_device_opt+=" --device $video_dev"
done
# Mount Tegra's Video Input unit (capture data from CSI) device nodes
for capture_dev in $(find /dev -regex '/dev/capture-vi-channel[0-9]+'); do
mount_device_opt+=" --device $capture_dev"
done
# Mount AJA device nodes
for aja_dev in $(find /dev -regex '/dev/ajantv2[0-9]+'); do
mount_device_opt+=" --device $aja_dev"
done
# Mount ConnectX device nodes
if [ -e /dev/infiniband/rdma_cm ]; then
mount_device_opt+=" --device /dev/infiniband/rdma_cm"
fi
for uverbs_dev in $(find /dev -regex '/dev/infiniband/uverbs[0-9]+'); do
mount_device_opt+=" --device $uverbs_dev"
done
# The device nodes under /dev/dri are owned by the root user and video and render groups,
# so using a non-root user in the container requires to access these groups. Adding the group
# names to `docker run --group-add` might not work as the group ids in the container and on the
# host device might not match. Mounting `/etc/group` doesn't address it since the mount occurs
# after the user and its groups are setup. Instead, we pass the group ids which we compute on
# the host before starting the container.
groups=""
video_id=$(get_group_id video)
if [ -n "$video_id" ]; then
groups+=" --group-add $video_id"
fi
render_id=$(get_group_id render)
if [ -n "$render_id" ]; then
groups+=" --group-add $render_id"