-
Notifications
You must be signed in to change notification settings - Fork 18
/
Copy pathsystemlink-values.yaml
1250 lines (1190 loc) · 51.3 KB
/
systemlink-values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
## Default values for systemlink.
## This is a YAML-formatted file.
## Declare override values for variables.
## Global variables expected to be inherited from parent helm charts which apply to the entire application.
##
global:
## Host names for the cluster's UI ingress controller.
# <ATTENTION> - Set this to the DNS address where the SystemLink UI will be hosted.
##
hosts: &uiHosts
- &primaryUIHost "systemlink.example.com"
## Host names for the cluster's API ingress controller.
# <ATTENTION> - Set this to the DNS address to be used for communication between client systems and the SystemLink API.
##
apiHosts: &apiHosts
- &primaryApiHost "systemlink-api.example.com"
## Defines the secret required to pull containers hosted on a private image repository.
##
niImagePullSecret: &niPullSecret "niartifacts-secret"
## Defines secrets required if containers are hosted on a private image repository.
##
imagePullSecrets: &pullSecrets [*niPullSecret]
## Overrides the default image registry.
# <ATTENTION> - Use this override if mirroring the SystemLink container registry.
##
imageRegistry: &imageRegistryRef "niedge01.jfrog.io/ni-docker"
## Ingress settings that apply globally.
# <ATTENTION> - Use the following section to apply annotation-based configuration to all Systemlink ingresses.
# Configuration is specific to the ingress controller, and the defaults will be sufficient for many deployments.
##
ingress:
## Ingress settings that apply to the apiHosts endpoints.
##
api:
## Annotations for the ingress.
##
annotations: {}
## Ingress settings that apply to the hosts endpoints.
##
ui:
## @param global.ingress.ui.annotations Annotations for the ingress.
##
annotations: {}
## Disable secret deployment if you want to manually manage secrets on the cluster.
## WARNING: Changing this value from true to false during an upgrade will delete existing secrets.
# <ATTENTION> - Set to false if you do not want to manage secrets as part of the Helm installation.
##
deploySecrets: true
## Customize telemetry data collection.
##
telemetry:
## Specify whether to collect telemetry data.
## Requires deployment of Prometheus.
## https://prometheus.io/docs/introduction/overview/
##
enabled: false
## Url target of OpenTelemetry exporter. Corresponds to OTEL_EXPORTER_OTLP_ENDPOINT.
## https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/#otel_exporter_otlp_endpoint
##
openTelemetryExporterOtlpEndpoint: ""
## Specify whether to include user IDs in traces.
## Turned off by default per recommendation of the OpenTelemetry Specification.
## https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/span-general.md#general-identity-attributes
##
includeUserInfo: false
## Allow the SystemLink web application to collect usage data to help our engineers improve the performance and
## capabilities of the product. NI will not share this information with third parties or use it to identify or
## contact users.
## https://www.ni.com/en/about-ni/legal/ceip.html
##
userTelemetry:
## Allow the SystemLink web application to send usage data to NI.
# <ATTENTION> - Opt out of usage data collection by setting this value to false.
##
enableFrontEndTelemetry: true
## Data will be uploaded to this URL. NI does not recommend changing this value.
##
telemetryEndpoint: "https://esp.ni.com/"
## CDN endpoint for telemetry-driven content. NI does not recommend changing this value.
##
cdnEndpoint: "https://web-sdk.ni.com/"
##
## MongoDB Database Configuration.
## Many Systemlink Enterprise services utilize MongoDB databases for data storage. Customers can deploy their own MongoDB instance
## or utilize a managed solution such as MongoDB Atlas. Once provisioned provide the connection details in global.secrets.mongodb.connection_string.
##
mongodb:
## Enable or disable installation of per-service MongoDB instances.
# <ATTENTION> - If you do *not* have your own MongoDB instance set this (`global.mongodb.install`) to `true`.
##
install: false
##
## Specify secrets containing trusted certificates that should be installed in all the necessary applications.
## The secrets must be deployed manually prior to installing this chart. Config maps may be used when it is not possible to use a secret.
## Example:
## - secretName: "ca-secret"
## key: "cert"
## - secretName: "ca-config-map"
## key: "my-cert"
## isConfigMap: true
##
# <ATTENTION> - If one or more private Certificate Authorities are required for HTTPS access to this application
# or for TLS access to any external resources referenced by this application (databases, file storage, etc.),
# a copy of the certificate chain must be deployed as a secret to the cluster prior to deploying this application.
# For certificate authorities, only the public certificate needs to be deployed to the cluster.
#
# EXAMPLE: kubectl -n systemlink-namespace create secret generic my-ca-certificate --from-file=cert=path/to/my-ca.crt
#
# Use the 'trustedCertificatesSecrets', 'apiHostCertificateSecret' and 'deployApiHostCertificateToSystems'
# values below to configure the application and registered client systems with the required trust chain.
#
trustedCertificatesSecrets: []
##
## Secret containing the TLS certificate for the API host. This configuration is only necessary when the API
## host is using a private root CA certificate. When deployApiHostCertificateToSystems is true, this certificate
## will be deployed and installed on all managed clients to enable communication with the API host.
##
apiHostCertificateSecret:
# secretName: "ca-secret"
# key: "certificate"
##
## Deploy the certificate stored in apiHostCertificateSecret to all managed systems.
##
deployApiHostCertificateToSystems: false
## The following values are typically shared across multiple configurations. The values
## here are not used directly as configuration but provide a convenient way to share
## common configuration throughout this file. Individual references to these values can
## be overridden with custom values if required.
##
## Hostname of an external S3 provider.
# <ATTENTION> To connect to an external S3 provider, set s3Host, s3Scheme, s3Port and s3Region.
##
s3Host: &s3Host "s3.amazonaws.com"
## Service name of an on-cluster S3 provider.
# <ATTENTION> To connect to an S3 provider within the same cluster, set s3ServiceName, s3Scheme, s3Port and s3Region,
# and set s3Host to an empty string.
##
s3ServiceName: &s3ServiceName ""
## Schema used to access the configured S3 provider ("http" or "https")
##
s3Scheme: &s3Scheme "https"
## Port number of the S3 provider
##
s3Port: &s3Port 443
## Region where S3 storage is located
##
s3Region: &s3Region "us-east-1"
## Core configuration for the RabbitMQ message broker
##
rabbitmq:
## Policy used when starting pods in the stateful set.
# <ATTENTION> - The "OrderedReady" podManagementPolicy must be used for the first deployment
# of the application. Once the application has been deployed, change the
# podManagementPolicy to "Parallel" to avoid synchronization issues when upgrading
# RabbitMQ instances. It will be necessary to manually delete the rabbitmq stateful
# set from the deployment prior to redeploying with the new setting.
##
podManagementPolicy: "OrderedReady"
# podManagementPolicy: "Parallel"
clustering:
## Force cluster to boot after an unexpected shutdown (in an unexpected order).
# <ATTENTION> - If RabbitMQ fails to restart after an unexpected shutdown, setting foreceBoot to
# true temporarily may resolve the issue. The RabbitMQ maintainers recommend only
# setting this flag to recover from extraordinary circumstances. It should not be used
# for regular application maintenance.
# https://www.rabbitmq.com/docs/clustering#restarting
##
forceBoot: false
## Core configuration for the SystemLink web application.
##
webserver:
## Ingress configuration
##
ingress:
## Increase the read timeout from the nginx default to give proxied services more time to respond to requests.
## Applies only if you are using an nginx ingress controller.
##
annotations:
nginx.ingress.kubernetes.io/proxy-read-timeout: '90'
## Values used to configure OpenID Connect providers.
# <ATTENTION> - Configure your OpenID Connect provider here, as well as the claims to make available to the SystemLink application.
# Consult your provider's documentation regarding exposing scopes to clients.
##
oidc:
## Required - Secret name that holds client ID, client secret, and JWKs.
##
secretName: "oidc-secret"
## Required - Include either issuer URL (for discovery) or the provider configuration as JSON wrapped in single quotes.
##
issuer: "https://oidc.example.com/"
# provider: '<provider-config-json>'
## Optional - Claim to use for user ID.
##
# userIDClaim: "email"
## Optional - Claim to use for user name.
##
# usernameClaim: "name"
## Optional - Scopes requested from the provider.
## At minimum, "openid" scope is required, "email" and "profile" are required to populate user preferences
##
scope: "openid email profile"
## Optional - Pass the id_token_hint to provider when logging out.
## https://openid.net/specs/openid-connect-rpinitiated-1_0.html#RPLogout
## The id_token_hint parameter is recommended but may cause issues with some identity provider configurations.
# <ATTENTION> - Set enableLogoutToken to "true" if the identity provider supports it.
enableLogoutToken: "false"
# Optional - Enable the user-info end-point to return the user's claims
##
# enableUserInfo: "false"
## If access to the OpenID Connect provider requires a proxy server, provide proxy service configuration here.
# <ATTENTION> - Configure the proxy server for OpenID connect access here.
##
proxy:
## The host and port of the proxy, no schema
##
# authority: "example.com:8080"
## Optional - The name of the secret that has the proxy credentials
##
# secretName: "webserver-proxy-credentials"
## Optional - The key for the username in the credentials secret
##
userSecretKey: "username"
## Optional - The key for the password in the credentials secret
##
passwordSecretKey: "password"
## Defines an initial administrator role mapping, allowing an initial user or users to access the system.
## At least one user must be defined during installation to allow access to the application. The initial user(s) can then provision
## access for further users. By default, users are only created on install, not during upgrades.
##
userservicesetup:
initialAdministratorMapping:
## Overrides default behavior and creates or updates an administrator user mapping on upgrade.
# <ATTENTION> - If true, the configured administrator will be reconfigured on upgrade.
##
createOnUpgrade: false
## The mapping type used to define the administrator user.
##
mappingType: "oidc-claim"
## The property used to match mappingValue.
##
mappingKey: "email"
## The property value used to identify admin users. This value is required when creating a new user mapping.
## To avoid unwanted information disclosure, it is recommended to only set this value when creating a new mapping.
# <ATTENTION> - Choose an initial system administrator here. You must configure an administrator to enable access
# to the SystemLink application. This example shows configuring an administrator by email address, but any
# OpenID Connect claim can be used to define this mapping.
##
mappingValue: "[email protected]"
## Shared database configuration.
##
database:
## If using an externally managed PostgresSQL database, this value can be used to define a public TLS certificate used
## to authenticate with that database. The following argument can be used to load a certificate file from disk as part of a
## Helm install/upgrade command:
## --set-file database.postgresCertificate my-cert.pem
##
postgresCertificate: ""
## Name that will be used for this certificate when mounted on disk.
##
postgresCertificateFileName: &postgresCertificateFileName "postgres-tls-certificate.pem"
## Name of the ConfigMap used to deploy the certificate.
##
postgresCertificateConfigMapName: &postgresCertificateConfigMap "postgres-tls-certificate"
## Configuration for test result storage.
##
testmonitorservice:
## Database configuration
##
database:
## The PostgreSQL database connection string
## NOTE: If specified, the `database.connectionInfo` parameters are ignored. If
## `database.tls.enabled` is set to `true`, the connection string must include the
## appropriate SSL Mode (Prefer, Require, VerifyCA, or VerifyFull).
# <ATTENTION> - If connecting to an external PostgresSQL database, you must configure one of
# the connectionString and connectionInfo sections with the details of your database.
##
connectionString:
secretName: "testmonitorservicedb-connection"
## Key from the secret to retrieve the connection string the service will use to connect to PostgreSQL.
## NOTE: Ignored unless `database.connectionstring.secretName` parameter is set.
connectionStringKey: "connection-string"
## Key from the secret to retrieve the connection string that will be used to perform migrations managed by this chart.
## If unset database.connectionString.connectionStringKey is used instead.
## NOTE: Ignored unless `database.connectionstring.secretName` parameter is set.
migrationConnectionStringKey: null
## The PostgreSQL database connection info.
## NOTE: If the `database.connectionString` parameters are specified, the `database.connectionInfo`
## parameters are ignored.
##
# connectionInfo:
# ## PostgreSQL host name.
# ##
# host: ""
# ## PostgreSQL port.
# ##
# port: ""
# ## PostgreSQL database name.
# ##
# dbName: "nisystemlink"
# ## PostgreSQL user name.
# ##
# user: "nisystemlink"
# ## PostgreSQL username used to perform migration managed by this chart.
# ## If unset database.connectionInfo.user is used instead.
# migrationUser: ""
# ## The name of an existing secret with PostgreSQL connection credentials.
# ##
# secretName: "testmonitorservicedb-connection"
# ## @param database.connectionInfo.passwordKey Password key for database.connectionInfo.user to be retrieved from existing secret
# ## NOTE: Ignored unless `database.connectionInfo.secretName` parameter is set.
# ##
# passwordKey: "password"
# ## @param database.connectionInfo.migrationPasswordKey Password key for database.connectionInfo.migrationUser to be retrieved from existing secret
# ## If unset database.connectionInfo.passwordKey is used instead.
# ## NOTE: Ignored unless `database.connectionInfo.secretName` parameter is set.
# ##
# migrationPasswordKey: null
## The PostgreSQL database TLS configuration
##
tls:
## Enable TLS communication with the PostgreSQL database.
## NOTE: If true, the public TLS certificate from the server (.pem or .crt)
## must be uploaded to the cluster as a Kubernetes ConfigMap and its name and key
## be set under the `database.tls.existingConfigMap` and `database.tls.certificateSubPath`
## parameters respectively.
## Use the database.postgresCertificate value to automatically upload this certificate.
##
enabled: true
## The name of an existing ConfigMap with a TLS certificate for the database.
##
existingConfigMap: *postgresCertificateConfigMap
## The Certificate key to be retrieved from existing ConfigMap
##
certificateSubPath: *postgresCertificateFileName
## Configuration for the Alarm Service
##
alarmservice:
## Per-replica rate limiting configuration
##
rateLimits:
## Configuration for the global rate limiter that applies to all requests
##
global:
## Enable the rate limiter
##
enabled: true
## Number of token to replenish per second
##
tokensPerSecond: 750
## Maximum number of tokens that can accumulate
##
tokenLimit: 200
## Number of requests that may queue when no tokens are available
##
queueLimit: 0
## Configuration for the per-user rate limiter for acknowledging alarms
##
acknowledge:
## Enable the rate limiter
##
enabled: true
## Number of token to replenish per second
##
tokensPerSecond: 5
## Maximum number of tokens that can accumulate
##
tokenLimit: 5
## Number of requests that may queue when no tokens are available
##
queueLimit: 1
## Configuration for the per-API key rate limiter for creating or updating alarms
##
createOrUpdate:
## Enable the rate limiter
##
enabled: true
## Number of token to replenish per second
##
tokensPerSecond: 700
## Maximum number of tokens that can accumulate
##
tokenLimit: 200
## Number of requests that may queue when no tokens are available
##
queueLimit: 0
## Configuration for the per-user rate limiter for deleting alarms
##
delete:
## Enable the rate limiter
##
enabled: true
## Number of token to replenish per second
##
tokensPerSecond: 10
## Maximum number of tokens that can accumulate
##
tokenLimit: 10
## Number of requests that may queue when no tokens are available
##
queueLimit: 2
## Configuration for the per-user rate limiter for querying alarms
##
getAndQuery:
## Enable the rate limiter
##
enabled: true
## Number of token to replenish per second
##
tokensPerSecond: 10
## Maximum number of tokens that can accumulate
##
tokenLimit: 10
## Number of requests that may queue when no tokens are available
##
queueLimit: 2
database:
## The amount of time inactive alarms will be retained in the database ([d.]hh:mm[:ss] format).
##
inactiveAlarmCleanupInterval: 30.00:00
activeAlarmCleanup:
## The amount of time active alarms will be retained in the database since they were
## last updated ([d.]hh:mm[:ss] format).
##
interval: 90.00:00
## Whether to limit active alarm cleanup to only affect active alarms whose most recent
## transition has a CLEAR transition type.
##
onlyCleanUpClearAlarms: false
## The total number of alarms the service supports creating, including
## both active and inactive alarms. Must be greater than activeAlarmLimit.
## The service will return an error if this limit is exceeded. Increasing
## this limit requires tuning of database resources.
##
alarmLimit: 100000
## The total number of active alarms the service supports creating. Must be less
## than alarmLimit. The service will return an error if this limit is exceeded.
## Increasing this limit requires tuning of database resources.
##
activeAlarmLimit: 10000
## Configuration for the Grafana dashboard provider.
##
dashboardhost:
## Name to use for the database access secret.
##
databaseSecretName: &dashboardhostdbSecret "dashboardhost-postgres-secrets"
## Override Grafana ingress to enable pulling in globally-defined ingress annotation.
## The Grafana ingress is disabled by default so no further configuration is required under the "grafana" section.
##
ingress:
## Values can be templated.
##
annotations: {}
hosts: *uiHosts
## This API ingress makes it possible to expose elements of the Grafana backend API via the API ingress.
##
apiIngress:
## Values can be templated.
##
annotations: {}
hosts: *apiHosts
## Grafana community chart configuration. See https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md
## for more documentation and examples for these values.
##
grafana:
## Configure access to the Grafana container.
##
image:
## Image pull secrets required by Grafana
pullSecrets: *pullSecrets
## Node count. This must be set to 1 if Grafana is not configured to use an external database.
## This value is only used if autoscaling is disabled.
##
replicas: 1
## Create HorizontalPodAutoscaler object for deployment type
##
autoscaling:
# <ATTENTION> - If not using an external database, you must disable autoscaling and use a single Grafana instance.
enabled: true
## Use an existing secret for the admin user.
# <ATTENTION> - Uncomment this section to use a different secret to configure the admin user.
##
# admin:
# ## The name of an existing secret containing the admin credentials.
# existingSecret: "dashboardhost-login"
# ## The key in the existing admin secret containing the user name.
# userKey: admin-user
# ## The key in the existing admin secret containing the password.
# passwordKey: "admin-password"
## Uncomment this to provision additional datasources (the chart provisions ni-slnotebook-datasource by default)
## When adding additional datasources, take care to leave the the ni-slnotebook-datasource entry in place, and add
## new entries after it.
# <ATTENTION> - Uncomment this to provision additional data sources (the chart provisions systemlink-notebook-datasource by default)
##
# datasources:
# datasources.yaml:
# apiVersion: 1
# datasources:
# - name: "SystemLink Notebooks"
# type: "ni-slnotebook-datasource"
# access: "direct"
# url: /
# version: 1
## Defines additional mounts from Secrets.
# <ATTENTION> - This configures a connection to an external PostgresSQL. Remove this section if not using an external database.
##
extraSecretMounts:
- name: *dashboardhostdbSecret
secretName: *dashboardhostdbSecret
defaultMode: 0440
mountPath: "/etc/secrets/dashboardhost"
readOnly: true
## Defines additional mounts from ConfigMaps.
# <ATTENTION> - This configures a connection to an external PostgresSQL. Remove this section if not using an external database.
##
extraConfigmapMounts:
- name: *postgresCertificateConfigMap
mountPath: "/etc/ssl/certs/dashboardhost/"
subPath: *postgresCertificateFileName
configMap: *postgresCertificateConfigMap
readOnly: true
## This sets the GF_INSTALL_PLUGINS env variable
## https://grafana.com/docs/grafana/latest/installation/docker/#install-plugins-from-other-sources
## <ATTENTION> - To include additional Grafana plugins, uncomment this section, update the existing URLs where
## X.X.X.zip is the latest version in the values.yaml file, and add the URLs for any additional plugins.
##
# plugins:
# - http://localhost:8080/ni/plugins/systemlink-notebook-datasource/X.X.X.zip;ni-slnotebook-datasource
# - http://localhost:8080/ni/plugins/systemlink-dataframe-datasource/X.X.X.zip;ni-sldataframe-datasource
# - http://localhost:8080/ni/plugins/plotly-panel/X.X.X.zip;ni-plotly-panel
## Customize the grafana.ini file.
##
grafana.ini:
server:
## Host name for the Grafana instance.
# <ATTENTION> - The DNS address of the SystemLink application must be duplicated here.
##
domain: *primaryUIHost
## Database configuration. See here for more documentation: https://grafana.com/docs/grafana/latest/administration/configuration/#database
# <ATTENTION> - This configures a connection to an external PostgresSQL. Remove this section if not using an external database.
##
database:
## Either mysql, postgres or sqlite3.
##
type: "postgres"
## The database user (not applicable for sqlite3).
##
user: $__file{/etc/secrets/dashboardhost/user}
## The database user’s password (not applicable for sqlite3). If the password contains # or ; you have to wrap it with triple quotes. For example """#password;""".
##
password: $__file{/etc/secrets/dashboardhost/password}
## Only applicable to MySQL or Postgres. Includes IP or host name and port or in case of Unix sockets the path to it.
## For example, for MySQL running on the same host as Grafana: host = 127.0.0.1:3306 or with Unix sockets: host = /var/run/mysqld/mysqld.sock.
##
host: $__file{/etc/secrets/dashboardhost/host}
## The name of the Grafana database. Leave it set to grafana (default) or some other name.
## <ATTENTION> - You must create the database manually before deploying. If you are using the default database
## name, you must create a database named "grafana". The database user, if not a superuser, will require USAGE
## and CREATE privileges on the "public" schema and SELECT, INSERT, UPDATE, and DELETE privileges on all tables
## in the "public" schema.
# name: "database-name"
## Use either URL or the other fields above to configure the database.
## url: postgres://dashboardhost:[email protected]:5432/grafana
## For PostgresSQL, use either disable, require or verify-full. For MySQL, use either true, false, or skip-verify.
##
ssl_mode: "require"
## The path to the CA certificate to use. On many Linux systems, certs can be found in /etc/ssl/certs.
# <ATTENTION> - The filename here must mach database.postgresCertificateFileName.
##
ca_cert_path: "/etc/ssl/certs/dashboardhost/postgres-tls-certificate.pem"
## Plugin configuration
## Uncomment to set Grafana plugin configuration.
##
# plugins:
## Enter a comma-separated list of plugin identifiers to identify plugins that are allowed to be loaded even if they lack a valid signature.
## <ATTENTION> - Uncomment to set the list of unsigned plugins to load. This will override the plugins included by default so ensure you maintain
## the list of plugins listed in the default values file.
##
# allow_loading_unsigned_plugins: ni-slnotebook-datasource,ni-sldataframe-datasource,ni-plotly-panel
## Grafana provisioning
##
grafanaprovisioning:
grafanaDashboardsProvisioning:
## Dashboards listed are enumerated over the dashboard-configmap-template.yaml
## Each key under the "dashboards" item will be the name of a ConfigMap.
## It is recommended to add the "-dashboard" suffix to each key to avoid collision with other ConfigMaps
## Dashboard json files can be placed under "dashboards/[project-name]/[dashboard-file].json"
## By default, all "enabled" parameters are considered as true even if they are not mentioned. You can disable this by assigning "whitelistMode: true"
##
whitelistMode: true
grafanaDatasourcesProvisioning:
## Datasources listed are enumerated over the datasource-configmap-template.yaml
## Each key under the "datasources" item will be the name of a datasource in the ConfigMap.
## By default, all "enabled" parameters are considered as true even if they are not mentioned. You can disable this by assigning "whitelistMode: true"
##
whitelistMode: true
projects:
## Datasources must be grouped under projects
## There is an optional "enabled" parameter that defaults to true which can be used to toggle the project/dashboard on or off.
# some-project:
# enabled: false | true
# datasources:
# some-datasource:
# enabled: false | true
# name: Some Datasource
# type: some-datasource
# ## Each datasource is then copied as written into the template. You can follow the format here to define an entry: https://github.com/grafana/helm-charts/blob/307aae1ba29039c4c80581d457299c0a126b55c1/charts/grafana/values.yaml#L462
# ## The main difference is that instead of a list of datasources we use a key-value pair. The values for it can be defined the same way as in the grafana docs.
## <ATTENTION> - Uncomment to override the enabled plugins. This will override the plugins enabled by default so ensure you enable the plugins listed in the default values file.
# systemlink:
# enabled: true
# datasources:
# ni-slnotebook-datasource:
# enabled: true
# ni-sldataframe-datasource:
# enabled: true
## Configuration for the Data Frame service.
##
dataframeservice:
## Ingress configuration
##
ingress:
## Increase the maximum HTTP request body size from the nginx default. Only applies if an nginx
## ingress controller is used. Should be set to the same size as requestBodySizeLimit.
##
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: 256m
## <ATTENTION> - Configure rate limiting. Limits are enforced per-replica.
## Each replica of the dataframe service applies its own limit.
## Considering load-balancing, the effective rate will be higher than the
## individual rates configured here.
##
rateLimits:
## Configure rate limits for ingestion
##
ingestion:
## Number of concurrent requests that a single replica can serve for ingesting data.
## Subsequent requests will be put in a queue.
## If you increase the request limit, you may need to increase "resources.requests.memory" proportionally.
## Should be configured to the same value as "ingestion.s3StreamPool.maximumPooledStreams".
##
requestsLimit: &dataFrameIngestionRateLimit 20
## Size of the queue for concurrent requests. If a request arrives to a pod with a full queue,
## the replica will return a 429 Error code.
queueSize: 0
ingestion:
## Configuration for the pool of streams used to upload the data to S3.
##
s3StreamPool:
## Maximum number of streams that will be pooled.
## The recommendation is to provide the same number of pool streams as the limit of requests that
## can be processed in "rateLimits.ingestion.requestsLimit".
## If you increase the number of pooled streams, you may need to increase "resources.requests.memory" proportionally.
## WARNING: Setting this value to 0 would leave the pool unbounded, which could cause high memory usage.
##
maximumPooledStreams: *dataFrameIngestionRateLimit
## Limits the body size for requests. The ingress may also impose a request body size
## limit, which should be set to the same value.
## Accepts units in "MiB" (Mebibytes, 1024 KiB) or in "MB" (Megabytes, 1000 KB)
requestBodySizeLimit: 256MiB
## Configure S3 access.
##
s3:
auth:
## Name of the secret containing the S3 login credentials
##
secretName: "nidataframe-s3-credentials"
## The name of the S3 bucket that the service should connect to.
##
bucket: &dataframeBucket "systemlink-dataframe"
## This should just be the name of the scheme, without the trailing ://.
##
schemeName: *s3Scheme
## Set this value to connect to an external S3 instance.
##
host: *s3Host
## Set this value to connect to an S3 instance which is internal to the cluster. Ignored if host is set.
##
service: *s3ServiceName
## S3 port number.
##
port: *s3Port
## Maximum number of concurrent connections to S3 per replica.
##
maximumConnections: 32
## S3 Region
# <ATTENTION> This must be set to the region of the S3 instance.
##
region: *s3Region
## Configure Dremio access
##
sldremio:
## Uncomment this section to adjust the resource requests for the Dremio executor and coordinator.
## Refer to Dremio documentation at https://docs.dremio.com/software/deployment/system-requirements/#server-or-instance-hardware
## for a description of the recommended minimum values.
# <ATTENTION> - These are the recommended values for the executor and the coordinator where the coordinator
## can act as an executor. Although, 16 CPU cores and 128GB of RAM are recommended, 1 CPU core and 8GB of RAM
## are left for the OS.
##
# coordinator:
# cpu: 15
# memory: 122800
# volumeSize: 128Gi
# executor:
# count: 4
# cpu: 15
# memory: 122800
# volumeSize: 128Gi
## CPU and memory allocated to each zookeeper pod, expressed in CPU cores and MB respectively.
## Count should correspond with the number of nodes that received the "high.mem" label.
# zookeeper:
# cpu: 0.5
# memory: 1024
# count: 3
auth:
## Name of the secret containing the Dremio login credentials
##
secretName: "nidataframe-dremio-credentials"
## The name of the key in the above secret whose value contains the Dremio username
##
usernameKey: "username"
## The name of the key in the above secret whose value contains the Dremio password
##
passwordKey: "password"
distStorage:
# Dremio distributed storage configuration. This must be configured for the service to perform acceptably.
# See https://github.com/dremio/dremio-cloud-tools/blob/master/charts/dremio_v2/docs/Values-Reference.md#distributed-storage-values
# <ATTENTION>: This only partially configures distributed storage. Credentials must be configured in systemlink-secrets.yaml under
# sldremio.distStorage.aws.credentials.
type: "aws" # <ATTENTION> - change this if not using Amazon S3 or an equivalent, like MinIO
aws:
bucketName: *dataframeBucket
path: "/dremio/distStorage"
authentication: "accessKeySecret"
# extraProperties are only necessary if not using Amazon S3
# <ATTENTION>: If using MinIO or an equivalent, set fs.s3a.endpoint to the FQDN of MinIO or the equivalent service.
# extraProperties: |
# <property>
# <name>fs.s3a.endpoint</name>
# <value><ATTENTION> - set to the FQDN of the S3 endpoint, including the port, but without an HTTP or HTTPS prefix. Example: {svc-name}.{namespace}.svc.cluster.local:9000</value>
# </property>
# <property>
# <name>fs.s3a.path.style.access</name>
# <description>Value has to be set to true.</description>
# <value>true</value>
# </property>
# <property>
# <name>dremio.s3.compat</name>
# <description>Value has to be set to true.</description>
# <value>true</value>
# </property>
# <property>
# <name>fs.s3a.connection.ssl.enabled</name>
# <description>Value can either be true or false, set to true to use SSL with a secure Minio server.</description>
# <value>false</value>
# </property>
## Salt configuration.
##
saltmaster:
## Configure the TCP ingress for the Salt API.
##
serviceTCP:
annotations:
# <ATTENTION> - Set to the name of a MetalLB address group configured to allow TCP access to the Salt API.
metallb.universe.tf/address-pool: "systemlink"
## Feed configuration.
##
feedservice:
## Configure S3 access.
##
s3:
## Secret name for S3 credentials.
##
secretName: "feeds-s3-credentials"
## The name of the S3 bucket that the service should connect to.
##
bucket: "systemlink-feeds"
## S3 connection scheme.
##
scheme: *s3Scheme
## Set this value to connect to an external S3 instance.
##
host: *s3Host
## Set this value to connect to an S3 instance which is internal to the cluster. Ignored if host is set.
##
service: *s3ServiceName
## S3 Port
##
port: *s3Port
## S3 Region
##
region: *s3Region
## Proxy configuration to be used when the service needs to go through a proxy to have access to external services like ni.com.
httpProxy:
## @param httpProxy.address Address of the HTTP proxy in the $host:$port format. Example: "1.1.1.1:2222"
##
address: ""
## @param httpProxy.includeS3HostInNoProxy Set to true to include the S3 host in the noProxy list. Use this if the S3 host is internal to the cluster so we don't need to go through the proxy to access it.
##
includeS3HostInNoProxy: false
## @param httpProxy.includeAPIHostsInNoProxy Set to true to include the API hosts in the noProxy list. Use this if the API host is internal to the cluster so we don't need to go through the proxy to access it.
## This is needed for the service to communicate with itself when replicating a feed hosted in the service. The url of the feed will have the ApiHost as the host.
##
includeApiHostsInNoProxy: false
## @param httpProxy.additionalNoProxy List of hosts that should not be proxied. Example: ["localserver1","localserver2"]
## For example, we would need to set the host of the AWS Security Token Service if we use AWS_WEB_IDENTITY_TOKEN for the S3 auth type -> "sts.us-east-1.amazonaws.com"
additionalNoProxy: []
## File upload configuration.
##
fileingestion:
## Ingress configuration
##
ingress:
## Override the default upload limit for an nginx ingress controller.
##
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: 2000m
nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
nginx.ingress.kubernetes.io/proxy-buffering: "off"
## Configure S3 access.
##
s3:
## Secret name for S3 credentials.
##
secretName: "fileingestion-s3-credentials"
## The name of the S3 bucket that the service should connect to.
##
bucket: "systemlink-file-ingestion"
## Set this to true to limit each user to a maximum of 1Gb of file storage.
##
storageLimitsEnabled: false
## S3 connection scheme.
##
scheme: *s3Scheme
## Set this value to connect to an external S3 instance.
##
host: *s3Host
## Set this value to connect to an S3 instance which is internal to the cluster. Ignored if host is set.
##
service: *s3ServiceName
## S3 Port
##
port: *s3Port
## S3 Region
##
region: *s3Region
## Configure rate limiting. Limits are enforced per-user. Each replica of the file ingestion service
## applies its own per-user limit. With load-balancing, the effective rate will be higher than the
## individual rates configured here.
## Configure rate limits.
##
rateLimits:
## Upload file
##
upload: 3
## Download file
##
download: 3
## Delete file
##
delete: 5
## Delete many files
##
deleteMany: 5
## Query files
##
query: 5
## Get download tokens for downloading file
##
getDownloadToken: 5
## Update file
##
update: 5
## List files
##
list: 5
# Configure the maximum accepted size of uploaded files, expressed in gigabytes.
uploadLimitGB: 10
## Default values for the token used in downloading a file
##
downloadToken:
## Availability of the token expressed in seconds
##
expirySeconds: 60
## The secret used to create the encrypted token used for downloading files from UI
##
encryptionKeySecret: "fileingestionservices-download-encryption-key"
## Configuration for JupyterHub.
##
sl-jupyterhub:
jupyterhub:
imagePullSecrets: *pullSecrets
hub:
extraEnv:
## Setting this to "true" will enable the legacy implementation of Jupyter usernames.
JUPYTER_USERNAME_AS_SYSTEMLINK_USER_ID: "false"
singleuser:
networkPolicy:
egressAllowRules:
# <ATTENTION> - Set to true to enable JupyterHub user pods to establish outbound connections to private IP addresses.
privateIPs: false
# JupyterHub storage configuration.
storage:
## PVC Storage Request for every JupyterHub userpod.
# <ATTENTION> - Adjust this size to match the needs of your application.
capacity: 1Gi
## Ingress configuration.
##
ingress:
## Override the default upload limit for an nginx ingress controller.
##
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: 100m
## Configuration for Argo Workflows.
##
argoworkflows:
argo-workflows:
images:
pullSecrets: *pullSecrets
controller:
image:
registry: *imageRegistryRef
## Maximum number of workflows that can run in parallel.
##
parallelism: &workflowParallelism 300
## Uncomment to specify instanceID for Argo Workflows. This is needed to avoid conflicts with already existing
## Argo Workflows deployments in the cluster.
# instanceID:
# enabled: true
# explicitID: sl-notebook-execution-0
executor:
image:
registry: *imageRegistryRef
server:
image:
registry: *imageRegistryRef
## Configuration for Notebook Execution service.
##
nbexecservice:
maxNumberOfWorkflowsToSchedule: *workflowParallelism
## Configure S3 access.
##
s3:
## Secret name for S3 credentials.
##
secretName: "nbexecservice-s3-credentials"
## The name of the S3 bucket that the service should connect to.
##
bucket: "systemlink-executions"
## S3 connection scheme.
##
scheme: *s3Scheme
## Set this value to connect to an external S3 instance.
##
host: *s3Host