diff --git a/config.js b/config.js index 182055133f..88e63dedbb 100644 --- a/config.js +++ b/config.js @@ -165,9 +165,11 @@ config.STS_CORS_EXPOSE_HEADERS = 'ETag'; ///////////////////// // SECRETS CONFIG // ///////////////////// -config.JWT_SECRET = process.env.JWT_SECRET || _get_data_from_file(`/etc/noobaa-server/jwt`); -config.SERVER_SECRET = process.env.SERVER_SECRET || _get_data_from_file(`/etc/noobaa-server/server_secret`); -config.NOOBAA_AUTH_TOKEN = process.env.NOOBAA_AUTH_TOKEN || _get_data_from_file(`/etc/noobaa-auth-token/auth_token`); +if (process.env.CONTAINER_PLATFORM || process.env.LOCAL_MD_SERVER) { + config.JWT_SECRET = process.env.JWT_SECRET || _get_data_from_file(`/etc/noobaa-server/jwt`); + config.SERVER_SECRET = process.env.SERVER_SECRET || _get_data_from_file(`/etc/noobaa-server/server_secret`); + config.NOOBAA_AUTH_TOKEN = process.env.NOOBAA_AUTH_TOKEN || _get_data_from_file(`/etc/noobaa-auth-token/auth_token`); +} config.ROOT_KEY_MOUNT = '/etc/noobaa-server/root_keys'; diff --git a/docs/dev_guide/non_containerized_NSFS_events.md b/docs/dev_guide/non_containerized_NSFS_events.md index 2c6de9a50b..ccb1b4cf8e 100644 --- a/docs/dev_guide/non_containerized_NSFS_events.md +++ b/docs/dev_guide/non_containerized_NSFS_events.md @@ -5,7 +5,7 @@ This document will list all the possible Noobaa non-containerized NSFS events an ## Events ### 1. noobaa_nsfs_crashed -#### Reseason +#### Reasons - Noobaa endpoint module failed to load. - High Noobaa resource utilization. @@ -14,27 +14,27 @@ This document will list all the possible Noobaa non-containerized NSFS events an ### 2. noobaa_gpfslib_missing arguments: `gpfs_dl_path` -#### Reseason +#### Reasons - Missing gpfslib in `GPFS_DL_PATH` path. #### Resolutions - Add gpfslib in `GPFS_DL_PATH` path. ### 3. noobaa_started -#### Reseason +#### Reasons - Noobaa started without any issues. #### Resolutions - Nil ### 4. noobaa_account_created arguments: `account_name` -#### Reseason +#### Reasons - Noobaa user account created. #### Resolutions - Nil ### 5. noobaa_bucket_creation_failed arguments: `bucket_name` -#### Reseason +#### Reasons - User does not have permission to update `noobaa.conf.d` dir and its redirect path if present. - User does not have permission to create the bucket's underlying storage directory. @@ -44,7 +44,7 @@ arguments: `bucket_name` ### 6. noobaa_bucket_delete_failed arguments: `bucket_name`, `bucket_path` -#### Reseason +#### Reasons - User does not have permission to delete the bucket config file from `noobaa.conf.d` dir and its redirect path if present. - User does not have permission to delete the bucket's underlying storage directory. - Bucket storage dir is missing. @@ -56,7 +56,7 @@ arguments: `bucket_name`, `bucket_path` ### 7. noobaa_bucket_not_found arguments: `bucket_name` -#### Reseason +#### Reasons - Bucket config file in config_root path is missing. - Bucket config JSON schema validation failed. - Bucket's underlying storage directory not found @@ -67,7 +67,7 @@ arguments: `bucket_name` ### 8. noobaa_object_get_failed arguments : `bucket_path`, `object_name` -#### Reseason +#### Reasons - Noobaa bucket path is missing. - Bucket I/O operation is failed. #### Resolutions @@ -75,7 +75,7 @@ arguments : `bucket_path`, `object_name` ### 9. noobaa_object_uplaod_failed arguments : `bucket_path`, `object_name` -#### Reseason +#### Reasons - Bucket path is outside the bucket boundaries. - Bucket storage class is not supported. - Object I/O operation is failed. diff --git a/docs/non_containerized_NSFS.md b/docs/non_containerized_NSFS.md index 49ff5cbec8..7e66348eaf 100644 --- a/docs/non_containerized_NSFS.md +++ b/docs/non_containerized_NSFS.md @@ -216,31 +216,176 @@ Output - 2023-09-21 11:55:01 31 object1.txt ``` - - ## Health script -Health status of the NSFS can be fetched using the command line. +NSFS Health status can be fetched using the command line. Run `--help` to get all the available options. ``` - node usr/local/noobaa-core/src/cmd/health + node usr/local/noobaa-core/src/cmd/health [--https_port,--all_account_details, --all_bucket_details] ``` - Valid example output of a health script run - + output: ``` - { - service_name: 'nsfs', - status: 'OK', - memory: '137.9M', - checks: { - service: { - service_status: 'active', - pid: '90743' - }, - endpoint: { - endpoint_response: 200 - } - } +{ + "service_name": "nsfs", + "status": "NOTOK", + "memory": "88.6M", + "error": { + "error_code": "RSYSLOG_SERVICE_FAILED", + "error_message": "RSYSLOG service is not started properly, Please verify the service with status command." + }, + "checks": { + "services": [ + { + "name": "nsfs", + "service_status": "active", + "pid": "1204" + }, + { + "name": "rsyslog", + "service_status": "inactive", + "pid": "0" + } + ], + "endpoint": { + "endpoint_state": { + "response": { + "response_code": 200, + "response_message": "Endpoint running successfuly." + }, + "total_fork_count": 0, + "running_workers": [] + } + }, + "invalid_accounts": [ + { + "name": "naveen", + "storage_path": "/tmp/nsfs_root_invalid/", + "code": "STORAGE_NOT_EXIST" + } + ], + "valid_accounts": [ + { + "name": "naveen", + "storage_path": "/tmp/nsfs_root" + } + ], + "invalid_buckets": [ + { + "name": "bucket1.json", + "config_path": "/etc/noobaa.conf.d/buckets/bucket1.json", + "code": "INVALID_CONFIG" + }, + { + "name": "bucket3", + "storage_path": "/tmp/nsfs_root/bucket3", + "code": "STORAGE_NOT_EXIST" + } + ], + "valid_buckets": [ + { + "name": "bucket2", + "storage_path": "/tmp/nsfs_root/bucket2" + } + ] + } } ``` +`status`: overall status of the system. + +`error_code`: Error code for specific issue in health. + +`error_message`: Message explaining the issue with the health script. + +`service_status`: NSFS systemd status. Check for nsfs/rsyslog service up and running. + +`pid`: NSFS/Rsyslog systemd process id. + +`endpoint_response`: Noobaa endpoint web service response code. + +`total_fork_count`: Total number of forks in NSFS. + +`running_workers`: Running endpoint workers ids in list. + +`invalid_buckets`: List of buckets missing valid storage path and invalid JSON schema definition. + +`invalid_accounts`: List of account missing valid storage path and invalid JSON schema definition. + +`valid_accounts`: List all the valid accounts if `all_account_details` flag is `true`. + +`valid_buckets`: List all the valid buckets if `all_bucket_details` flag is `true`. + +In this health output, `bucket2`'s storage path is invalid and the directory mentioned in `new_buckets_path` for `user1` is missing or not accessible. Endpoint curl command returns an error response(`"endpoint_response":404`) if one or more buckets point to an invalid bucket storage path. + +### Health Error Codes +These are the error codes populated in the health output if the system is facing some issues. If any of these error codes are present in health status then the overall status will be in `NOTOK` state. +#### 1. `NSFS_SERVICE_FAILED` +#### Reasons +- NSFS service is not started properly. +- Stopped NSFS service is not removed. + +#### Resolutions +- Verify the NSFS service is running by checking the status and logs command. +``` +systemctl status nsfs +journalctl -xeu nsfs.service +``` +If the NSFS is not started, start the service +``` +systemctl enable nsfs +systemctl start nsfs +``` +#### 2. `RSYSLOG_SERVICE_FAILED` +#### Reasons +- Rsysog service is not started properly. +- Stopped Rsyslog service is not removed. + +#### Resolutions +- Verify the Rsyslog service is running by checking the status and logs command. +``` +systemctl status rsyslog +journalctl -xeu rsyslog.service +``` +If the rsyslog is not started, start the service +``` +systemctl enable rsyslog +systemctl start rsyslog +``` + +#### 3. `NSFS_ENDPOINT_FORK_MISSING` +#### Reasons +- One or more endpoint fork is not started properly. +- Number of workers running is less than the configured `forks` value. + +#### Resolutions +- Restart the NSFS service and also verify NSFS fork/s is exited with error in logs. +``` +systemctl status rsyslog +journalctl -xeu rsyslog.service +``` + +#### 4. `NSFS_ENDPOINT_FAILED` +#### Reasons +- NSFS endpoint process is not running and Its not able to respond to any requests. + +#### Resolutions +- Restart the NSFS service and verify NSFS process is exited with errors in logs. +``` +systemctl status rsyslog +journalctl -xeu rsyslog.service +``` +### Health Schema Error Codes +These error codes will get attached with a specific Bucket or Account schema inside `invalid_buckets` or `invalid_accounts` property. +#### 1. `STORAGE_NOT_EXIST` +#### Reasons +- Storage path mentioned in Bucket/Account schema pointing to the invalid directory. +#### Resolutions +- Make sure the path mentioned in Bucket/Account schema is a valid directory path. +- User has sufficient access. + +#### . `INVALID_CONFIG` +#### Reasons +- Bucket/Account Schema JSON is not valid or not in JSON format. +#### Resolutions +- Check for any JSON syntax error in the schema structure for Bucket/Account. ## Bucket and Account Manage CLI Users can create, update, delete, and list buckets and accounts using CLI. If the config directory is missing CLI will create one and also create accounts and buckets sub-directories in it and default config directory is `/etc/noobaa.conf.d`. diff --git a/src/cmd/health.js b/src/cmd/health.js index f9bfb74489..593b515777 100644 --- a/src/cmd/health.js +++ b/src/cmd/health.js @@ -4,8 +4,13 @@ const os_util = require('../util/os_utils'); const { make_https_request } = require('../util/http_utils'); const minimist = require('minimist'); - - +const config = require('../../config'); +const path = require('path'); +const nb_native = require('../util/nb_native'); +const native_fs_utils = require('../util/native_fs_utils'); +const dbg = require('../util/debug_module')(__filename); +const { read_stream_join } = require('../util/buffer_utils'); +const P = require('../util/promise'); const HELP = ` Help: @@ -25,106 +30,334 @@ Usage: const OPTIONS = ` Options: - --deployment_type (default nc) Set the nsfs type for heath check. - + --deployment_type (default nc) Set the nsfs type for heath check. + --config_root (default config.NSFS_NC_DEFAULT_CONF_DIR) Configuration files path for Noobaa standalon NSFS. + --https_port (default 6443) Set the S3 endpoint listening HTTPS port to serve. + --all_account_details (default false) Set a flag for returning all account details. + --all_bucket_details (default false) Set a flag for returning all bucket details. `; function print_usage() { - console.warn(HELP); - console.warn(USAGE.trimStart()); - console.warn(OPTIONS.trimStart()); + process.stdout.write(HELP); + process.stdout.write(USAGE.trimStart()); + process.stdout.write(OPTIONS.trimStart()); process.exit(1); } const HOSTNAME = "localhost"; -const PORT = 6443; -const SERVICE = "nsfs"; +const NSFS_SERVICE = "nsfs"; +const RSYSLOG_SERVICE = "rsyslog"; +const health_errors = { + NSFS_SERVICE_FAILED: { + error_code: 'NSFS_SERVICE_FAILED', + error_message: 'NSFS service is not started properly, Please verify the service with status command.', + }, + RSYSLOG_SERVICE_FAILED: { + error_code: 'RSYSLOG_SERVICE_FAILED', + error_message: 'RSYSLOG service is not started properly, Please verify the service with status command.', + }, + NSFS_ENDPOINT_FAILED: { + error_code: 'NSFS_ENDPOINT_FAILED', + error_message: 'NSFS endpoint process is not running. Restart the endpoint process.', + }, + NSFS_ENDPOINT_FORK_MISSING: { + error_code: 'NSFS_ENDPOINT_FORK_MISSING', + error_message: 'One or more endpoint fork is not started properly. Verify the total and missing fork count in response.', + }, + STORAGE_NOT_EXIST: { + error_code: 'STORAGE_NOT_EXIST', + error_message: 'Storage path mentioned in schema pointing to the invalid directory.', + }, + INVALID_CONFIG: { + error_code: 'INVALID_CONFIG', + error_message: 'Schema JSON is not valid, Please check the JSON format.', + }, +}; -async function nc_nsfs_health() { +const fork_response_code = { + RUNNING: { + response_code: 'RUNNING', + response_message: 'Endpoint running successfuly.', + }, + MISSING_FORKS: { + response_code: 'MISSING_FORKS', + response_message: 'Number of running forks is less than the expected fork count.', + }, + NOT_RUNNING: { + response_code: 'NOT_RUNNING', + response_message: 'Endpoint proccess not running.', + }, +}; - const {service_status, pid} = await get_service_state(); - const status_code = await get_endpoint_response(); - const memory = await get_service_memory_usage(); +//suppress aws sdk related commands. +process.env.AWS_SDK_JS_SUPPRESS_MAINTENANCE_MODE_MESSAGE = '1'; + +/** + */ +class NSFSHealth { + constructor(options) { + this.https_port = options.https_port; + this.config_root = options.config_root; + this.all_account_details = options.all_account_details; + this.all_bucket_details = options.all_bucket_details; + } + async nc_nsfs_health() { + let endpoint_state; + let memory; + const {service_status, pid} = await this.get_service_state(NSFS_SERVICE); + if (pid !== '0') { + endpoint_state = await this.get_endpoint_response(); + memory = await this.get_service_memory_usage(); + } + const response_code = endpoint_state ? endpoint_state.response.response_code : 'NOT_RUNNING'; + const rsyslog = await this.get_service_state(RSYSLOG_SERVICE); let service_health = "OK"; - if (service_status !== "active" || pid === "0" || status_code !== 200) { + if (service_status !== "active" || pid === "0" || response_code !== 'RUNNING' || + rsyslog.service_status !== "active" || rsyslog.pid === "0") { service_health = "NOTOK"; } - const helath = { - service_name: 'nsfs', + const error_code = await this.get_error_code(service_status, pid, rsyslog.service_status, response_code); + const bucket_details = await this.get_bucket_storage_status(this.config_root); + const account_details = await this.get_account_storage_status(this.config_root); + const health = { + service_name: NSFS_SERVICE, status: service_health, memory: memory, + error: error_code, checks: { - service: { - service_status: service_status, - pid: pid, + services: [{ + name: NSFS_SERVICE, + service_status: service_status, + pid: pid, }, + { + name: RSYSLOG_SERVICE, + service_status: rsyslog.service_status, + pid: rsyslog.pid, + }], endpoint: { - endpoint_response: status_code, + endpoint_state }, + invalid_accounts: account_details.invalid_storages, + valid_accounts: account_details.valid_storages, + invalid_buckets: bucket_details.invalid_storages, + valid_buckets: bucket_details.valid_storages, } - }; - console.log(helath); + }; + return health; + } + + async get_error_code(nsfs_status, pid, rsyslog_status, endpoint_response_code) { + if (nsfs_status !== "active" || pid === "0") { + return health_errors.NSFS_SERVICE_FAILED; + } else if (rsyslog_status !== "active") { + return health_errors.RSYSLOG_SERVICE_FAILED; + } else if (endpoint_response_code === 'NOT_RUNNING') { + return health_errors.NSFS_ENDPOINT_FAILED; + } else if (endpoint_response_code === 'MISSING_FORKS') { + return health_errors.NSFS_ENDPOINT_FORK_MISSING; + } + } + + async get_service_state(service_name) { + const service_status = await os_util.exec('systemctl show -p ActiveState --value ' + service_name, { + ignore_rc: true, + return_stdout: true, + trim_stdout: true, + }); + const pid = await os_util.exec('systemctl show --property MainPID --value ' + service_name, { + ignore_rc: true, + return_stdout: true, + trim_stdout: true, + }); + return { service_status: service_status, pid: pid }; + } + + async make_endpoint_health_request(url_path) { + const response = await make_https_request( + { HOSTNAME, + port: this.https_port, + path: url_path, + method: 'GET', + rejectUnauthorized: false, + }); + if (response && response.statusCode === 200) { + const buffer = await read_stream_join(response); + const body = buffer.toString('utf8'); + return JSON.parse(body); + } + } + + async get_endpoint_response() { + let url_path = '/total_fork_count'; + const worker_ids = []; + let total_fork_count = 0; + let response; + try { + const fork_count_response = await this.make_endpoint_health_request(url_path); + if (!fork_count_response) { + return { + response_code: fork_response_code.NOT_RUNNING, + total_fork_count: total_fork_count, + running_workers: worker_ids, + }; + } + total_fork_count = fork_count_response.fork_count; + if (total_fork_count > 0) { + url_path = '/endpoint_fork_id'; + await P.retry({ + attempts: total_fork_count * 2, + delay_ms: 1, + func: async () => { + const fork_id_response = await this.make_endpoint_health_request(url_path); + if (fork_id_response.worker_id && !worker_ids.includes(fork_id_response.worker_id)) { + worker_ids.push(fork_id_response.worker_id); + } + if (worker_ids.length < total_fork_count) { + throw new Error('Number of running forks is less than the expected fork count.'); + } + } + }); + if (worker_ids.length === total_fork_count) { + response = fork_response_code.RUNNING; + } else { + response = fork_response_code.MISSING_FORKS; + } + } else { + response = fork_response_code.RUNNING; + } + } catch (err) { + dbg.log1('Error while pinging endpoint host :' + HOSTNAME + ', port ' + this.https_port, err); + response = fork_response_code.NOT_RUNNING; + } + return { + response: response, + total_fork_count: total_fork_count, + running_workers: worker_ids, + }; + } + + async get_service_memory_usage() { + const memory_status = await os_util.exec('systemctl status ' + NSFS_SERVICE + ' | grep Memory ', { + ignore_rc: true, + return_stdout: true, + trim_stdout: true, + }); + if (memory_status) { + const memory = memory_status.split("Memory: ")[1].trim(); + return memory; + } + } + + get_root_fs_context() { + return { + uid: process.getuid(), + gid: process.getgid(), + warn_threshold_ms: config.NSFS_WARN_THRESHOLD_MS, + }; +} + + async get_bucket_storage_status(config_root) { + const bucket_details = await this.get_storage_status(config_root, 'bucket', this.all_bucket_details); + return bucket_details; } -async function get_service_state() { - const service_status = await os_util.exec('systemctl show -p ActiveState --value ' + SERVICE, { - ignore_rc: true, - return_stdout: true, - trim_stdout: true, - }); - const pid = await os_util.exec('systemctl show --property MainPID --value ' + SERVICE, { - ignore_rc: true, - return_stdout: true, - trim_stdout: true, - }); - return { service_status: service_status, pid: pid }; + async get_account_storage_status(config_root) { + const account_details = await this.get_storage_status(config_root, 'account', this.all_account_details); + return account_details; } -async function get_endpoint_response() { - const path = ''; + async get_storage_status(config_root, type, all_details) { + const fs_context = this.get_root_fs_context(); + const entries = await nb_native().fs.readdir(fs_context, this.get_config_path(config_root, type)); + const config_files = entries.filter(entree => !native_fs_utils.isDirectory(entree) && entree.name.endsWith('.json')); + const invalid_storages = []; + const valid_storages = []; + for (const config_file of config_files) { + const config_file_path = path.join(this.get_config_path(config_root, type), config_file.name); + let config_data; + let storage_path; try { - const response = await make_https_request({ HOSTNAME, port: PORT, path, method: 'GET', - rejectUnauthorized: false }); - if (response) { - const status_code = response.statusCode; - return status_code; - } + const { data } = await nb_native().fs.readFile(fs_context, config_file_path); + config_data = JSON.parse(data.toString()); + storage_path = type === 'bucket' ? config_data.path : config_data.nsfs_account_config.new_buckets_path; + const dir_stat = await nb_native().fs.stat(fs_context, storage_path); + if (dir_stat && all_details) { + const valid_storage = { + name: config_data.name, + storage_path: storage_path, + }; + valid_storages.push(valid_storage); + } } catch (err) { - console.debug('Error while pinging endpoint host :' + HOSTNAME + ', port ' + PORT); + let invalid_storage; + if (err.code === 'ENOENT') { + dbg.log1(`Error: Storage path should be a valid dir path`, storage_path); + invalid_storage = { + name: config_data.name, + storage_path: storage_path, + 'code': 'STORAGE_NOT_EXIST', + }; + } else { + dbg.log1('Error while accessing the config file: ', config_file, err); + invalid_storage = { + name: config_file.name, + config_path: config_file_path, + 'code': 'INVALID_CONFIG', + }; + } + invalid_storages.push(invalid_storage); } - return 0; + } + return { + invalid_storages: invalid_storages, + valid_storages: valid_storages + }; } -async function get_service_memory_usage() { - const memory_status = await os_util.exec('systemctl status ' + SERVICE + ' | grep Memory ', { - ignore_rc: true, - return_stdout: true, - trim_stdout: true, - }); - if (memory_status) { - const memory = memory_status.split("Memory: ")[1].trim(); - return memory; + get_config_path(config_root, type) { + return path.join(config_root, type === 'bucket' ? '/buckets' : '/accounts'); } } - async function main(argv = minimist(process.argv.slice(2))) { try { if (argv.help || argv.h) return print_usage(); - + const config_root = argv.config_root ? String(argv.config_root) : config.NSFS_NC_DEFAULT_CONF_DIR; + // disable console log to avoid unwanted logs in console. + await disable_console_log(); + const https_port = Number(argv.https_port) || 6443; const deployment_type = argv.deployment_type || 'nc'; + const all_account_details = argv.all_account_details || false; + const all_bucket_details = argv.all_bucket_details || false; if (deployment_type === 'nc') { - return nc_nsfs_health(); + const health = new NSFSHealth({https_port, config_root, all_account_details, all_bucket_details}); + const health_status = await health.nc_nsfs_health(); + process.stdout.write(JSON.stringify(health_status) + '\n'); } else { - console.log('Health is not supported for simple nsfs deployment.'); + dbg.log0('Health is not supported for simple nsfs deployment.'); } } catch (err) { - console.error('Helath: exit on error', err.stack || err); + dbg.error('Helath: exit on error', err.stack || err); process.exit(2); } } +async function disable_console_log() { + console.log = function() { + //empty function, lint fix + }; + console.error = function() { + //empty function, lint fix + }; + console.warn = function() { + //empty function, lint fix + }; +} + exports.main = main; if (require.main === module) main(); + +module.exports = NSFSHealth; diff --git a/src/deploy/NVA_build/standalone_deploy_nsfs.sh b/src/deploy/NVA_build/standalone_deploy_nsfs.sh index 37c40d7009..d3e2dfae92 100755 --- a/src/deploy/NVA_build/standalone_deploy_nsfs.sh +++ b/src/deploy/NVA_build/standalone_deploy_nsfs.sh @@ -10,8 +10,8 @@ function execute() { function main() { # Add accounts to run ceph tests - execute "node src/cmd/manage_nsfs account add --config_root ./standalone/config_root --name cephalt --email ceph.alt@noobaa.com --new_buckets_path ./standalone/nsfs_root --access_key abcd --secret_key abcd" nsfs_cephalt.log - execute "node src/cmd/manage_nsfs account add --config_root ./standalone/config_root --name cephtenant --email ceph.tenant@noobaa.com --new_buckets_path ./standalone/nsfs_root --access_key efgh --secret_key efgh" nsfs_cephtenant.log + execute "node src/cmd/manage_nsfs account add --config_root ./standalone/config_root --name cephalt --email ceph.alt@noobaa.com --new_buckets_path ./standalone/nsfs_root --access_key abcd --secret_key abcd --uid 100 --gid 100" nsfs_cephalt.log + execute "node src/cmd/manage_nsfs account add --config_root ./standalone/config_root --name cephtenant --email ceph.tenant@noobaa.com --new_buckets_path ./standalone/nsfs_root --access_key efgh --secret_key efgh --uid 200 --gid 200" nsfs_cephtenant.log # Start nsfs server execute "node src/cmd/nsfs --config_root ./standalone/config_root" nsfs.log # Wait for sometime to process to start diff --git a/src/endpoint/endpoint.js b/src/endpoint/endpoint.js index 753171d283..03d18d01a1 100755 --- a/src/endpoint/endpoint.js +++ b/src/endpoint/endpoint.js @@ -41,6 +41,9 @@ const background_scheduler = require('../util/background_scheduler').get_instanc const endpoint_stats_collector = require('../sdk/endpoint_stats_collector'); const { NamespaceMonitor } = require('../server/bg_services/namespace_monitor'); const { SemaphoreMonitor } = require('../server/bg_services/semaphore_monitor'); +const cluster = /** @type {import('node:cluster').Cluster} */ ( + /** @type {unknown} */ (require('node:cluster')) +); if (process.env.NOOBAA_LOG_LEVEL) { const dbg_conf = debug_config.get_debug_config(process.env.NOOBAA_LOG_LEVEL); @@ -49,6 +52,7 @@ if (process.env.NOOBAA_LOG_LEVEL) { const new_umask = process.env.NOOBAA_ENDPOINT_UMASK || 0o000; const old_umask = process.umask(new_umask); +let fork_count; dbg.log0('endpoint: replacing old umask: ', old_umask.toString(8), 'with new umask: ', new_umask.toString(8)); /** @@ -86,7 +90,8 @@ dbg.log0('endpoint: replacing old umask: ', old_umask.toString(8), 'with new uma async function main(options = {}) { try { // the primary just forks and returns, workers will continue to serve - if (fork_utils.start_workers((options.forks ?? config.ENDPOINT_FORKS))) return; + fork_count = options.forks ?? config.ENDPOINT_FORKS; + if (fork_utils.start_workers(fork_count)) return; const http_port = options.http_port || Number(process.env.ENDPOINT_PORT) || 6001; const https_port = options.https_port || Number(process.env.ENDPOINT_SSL_PORT) || 6443; @@ -257,6 +262,10 @@ function create_endpoint_handler(init_request_sdk, virtual_hosts, sts) { return lambda_rest_handler(req, res); } else if (req.headers['x-ms-version']) { return blob_rest_handler(req, res); + } else if (req.url.startsWith('/total_fork_count')) { + return fork_count_handler(req, res); + } else if (req.url.startsWith('/endpoint_fork_id')) { + return endpoint_fork_id_handler(req, res); } else { return s3_rest.handler(req, res); } @@ -271,6 +280,30 @@ function create_endpoint_handler(init_request_sdk, virtual_hosts, sts) { return sts ? endpoint_sts_request_handler : endpoint_request_handler; } +function endpoint_fork_id_handler(req, res) { + let reply = {}; + if (cluster.isWorker) { + reply = { + worker_id: cluster.worker.id, + }; + } + P.delay(500); + res.statusCode = 200; + res.setHeader('Content-Type', 'application/json'); + res.setHeader('Content-Length', Buffer.byteLength(JSON.stringify(reply))); + res.end(JSON.stringify(reply)); +} + +function fork_count_handler(req, res) { + const reply = { + fork_count: fork_count, + }; + res.statusCode = 200; + res.setHeader('Content-Type', 'application/json'); + res.setHeader('Content-Length', Buffer.byteLength(JSON.stringify(reply))); + res.end(JSON.stringify(reply)); +} + /** * @param {typeof server_rpc.rpc} rpc * @param {nb.APIClient} internal_rpc_client diff --git a/src/native/fs/fs_napi.cpp b/src/native/fs/fs_napi.cpp index 7626c4c0d8..e31655d54a 100644 --- a/src/native/fs/fs_napi.cpp +++ b/src/native/fs/fs_napi.cpp @@ -1981,8 +1981,8 @@ static Napi::Value set_debug_level(const Napi::CallbackInfo& info) { int level = info[0].As(); - LOG("FS::set_debug_level " << level); DBG_SET_LEVEL(level); + DBG1("FS::set_debug_level " << level); return info.Env().Undefined(); } @@ -2046,7 +2046,7 @@ fs_napi(Napi::Env env, Napi::Object exports) exports_fs["gpfs"] = gpfs; } } else { - LOG("FS::GPFS GPFS_DL_PATH=NULL, fs_napi will call default posix system calls"); + DBG1("FS::GPFS GPFS_DL_PATH=NULL, fs_napi will call default posix system calls"); } exports_fs["stat"] = Napi::Function::New(env, api); diff --git a/src/native/tools/ssl_napi.cpp b/src/native/tools/ssl_napi.cpp index 0de0a942f0..e0bcb58ad4 100644 --- a/src/native/tools/ssl_napi.cpp +++ b/src/native/tools/ssl_napi.cpp @@ -191,7 +191,7 @@ rand_seed(const Napi::CallbackInfo& info) { Napi::Env env = info.Env(); - printf("rand_seed: %s seeding randomness\n", SSLeay_version(SSLEAY_VERSION)); + DBG1("rand_seed: %s seeding randomness\n" << SSLeay_version(SSLEAY_VERSION)); if (!info[0].IsBuffer()) { Napi::TypeError::New(env, "rand_seed: expected buffer").ThrowAsJavaScriptException(); diff --git a/src/sdk/bucketspace_fs.js b/src/sdk/bucketspace_fs.js index 208febc4e2..3689b7bfa7 100644 --- a/src/sdk/bucketspace_fs.js +++ b/src/sdk/bucketspace_fs.js @@ -48,19 +48,6 @@ function prepare_fs_context(object_sdk) { return fs_context; } -function isDirectory(ent) { - if (!ent) throw new Error('isDirectory: ent is empty'); - if (ent.mode) { - // eslint-disable-next-line no-bitwise - return (((ent.mode) & nb_native().fs.S_IFMT) === nb_native().fs.S_IFDIR); - } else if (ent.type) { - return ent.type === nb_native().fs.DT_DIR; - } else { - throw new Error(`isDirectory: ent ${ent} is not supported`); - } -} - - class BucketSpaceFS extends BucketSpaceSimpleFS { constructor({config_root}) { @@ -229,7 +216,7 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { async list_buckets(object_sdk) { try { const entries = await nb_native().fs.readdir(this.fs_context, this.bucket_schema_dir); - const bucket_config_files = entries.filter(entree => !isDirectory(entree) && entree.name.endsWith('.json')); + const bucket_config_files = entries.filter(entree => !native_fs_utils.isDirectory(entree) && entree.name.endsWith('.json')); //TODO : we need to add pagination support to list buckets for more than 1000 buckets. let buckets = await P.map(bucket_config_files, bucket_config_file => this.get_bucket_name(bucket_config_file.name)); buckets = buckets.filter(bucket => bucket.name.unwrap()); diff --git a/src/sdk/bucketspace_simple_fs.js b/src/sdk/bucketspace_simple_fs.js index b7a9a9889e..20b9d67e3c 100644 --- a/src/sdk/bucketspace_simple_fs.js +++ b/src/sdk/bucketspace_simple_fs.js @@ -6,19 +6,8 @@ const config = require('../../config'); const nb_native = require('../util/nb_native'); const SensitiveString = require('../util/sensitive_string'); const { S3Error } = require('../endpoint/s3/s3_errors'); +const native_fs_utils = require('../util/native_fs_utils'); -//TODO: dup from namespace_fs - need to handle and not dup code -function isDirectory(ent) { - if (!ent) throw new Error('isDirectory: ent is empty'); - if (ent.mode) { - // eslint-disable-next-line no-bitwise - return (((ent.mode) & nb_native().fs.S_IFMT) === nb_native().fs.S_IFDIR); - } else if (ent.type) { - return ent.type === nb_native().fs.DT_DIR; - } else { - throw new Error(`isDirectory: ent ${ent} is not supported`); - } -} /** * @implements {nb.BucketSpace} @@ -51,7 +40,7 @@ class BucketSpaceSimpleFS { async list_buckets() { try { const entries = await nb_native().fs.readdir(this.fs_context, this.fs_root); - const dirs_only = entries.filter(entree => isDirectory(entree)); + const dirs_only = entries.filter(entree => native_fs_utils.isDirectory(entree)); const buckets = dirs_only.map(e => ({ name: new SensitiveString(e.name) })); return { buckets }; } catch (err) { @@ -69,7 +58,7 @@ class BucketSpaceSimpleFS { const bucket_path = path.join(this.fs_root, name); console.log(`BucketSpaceSimpleFS: read_bucket ${bucket_path}`); const bucket_dir_stat = await nb_native().fs.stat(this.fs_context, bucket_path); - if (!isDirectory(bucket_dir_stat)) { + if (!native_fs_utils.isDirectory(bucket_dir_stat)) { throw new S3Error(S3Error.NoSuchBucket); } const owner_account = { diff --git a/src/sdk/namespace_fs.js b/src/sdk/namespace_fs.js index 6efd7185a0..e32997ff25 100644 --- a/src/sdk/namespace_fs.js +++ b/src/sdk/namespace_fs.js @@ -212,17 +212,6 @@ async function _rename_null_version(old_versions, fs_context, version_path) { return { renamed_null_versions_set, old_versions_after_rename }; } -function isDirectory(ent) { - if (!ent) throw new Error('isDirectory: ent is empty'); - if (ent.mode) { - // eslint-disable-next-line no-bitwise - return (((ent.mode) & nb_native().fs.S_IFMT) === nb_native().fs.S_IFDIR); - } else if (ent.type) { - return ent.type === nb_native().fs.DT_DIR; - } else { - throw new Error(`isDirectory: ent ${ent} is not supported`); - } -} /** * @@ -233,11 +222,11 @@ function isDirectory(ent) { */ async function is_directory_or_symlink_to_directory(stat, fs_context, entry_path) { try { - let r = isDirectory(stat); + let r = native_fs_utils.isDirectory(stat); if (!r && is_symbolic_link(stat)) { const targetStat = await nb_native().fs.stat(fs_context, entry_path); if (!targetStat) throw new Error('is_directory_or_symlink_to_directory: targetStat is empty'); - r = isDirectory(targetStat); + r = native_fs_utils.isDirectory(targetStat); } return r; } catch (err) { @@ -864,7 +853,7 @@ class NamespaceFS { await this._check_path_in_bucket_boundaries(fs_context, file_path); await this._load_bucket(params, fs_context); let stat = await nb_native().fs.stat(fs_context, file_path); - const isDir = isDirectory(stat); + const isDir = native_fs_utils.isDirectory(stat); if (isDir) { if (!stat.xattr?.[XATTR_DIR_CONTENT]) { throw error_utils.new_error_code('ENOENT', 'NoSuchKey'); @@ -2359,7 +2348,7 @@ class NamespaceFS { const entries = await nb_native().fs.readdir(fs_context, dir); const results = await Promise.all(entries.map(entry => { const fullPath = path.join(dir, entry.name); - const task = isDirectory(entry) ? this._folder_delete(fullPath, fs_context) : + const task = native_fs_utils.isDirectory(entry) ? this._folder_delete(fullPath, fs_context) : nb_native().fs.unlink(fs_context, fullPath); return task.catch(error => ({ error })); })); diff --git a/src/test/unit_tests/index.js b/src/test/unit_tests/index.js index 32b65ba891..b7a7234d0c 100644 --- a/src/test/unit_tests/index.js +++ b/src/test/unit_tests/index.js @@ -62,6 +62,7 @@ require('./test_namespace_fs_mpu'); require('./test_nb_native_fs'); require('./test_s3select'); require('./test_nc_nsfs_cli'); +require('./test_nc_nsfs_health'); // // SERVERS require('./test_agent'); diff --git a/src/test/unit_tests/test_nc_nsfs_health.js b/src/test/unit_tests/test_nc_nsfs_health.js new file mode 100644 index 0000000000..01bc815303 --- /dev/null +++ b/src/test/unit_tests/test_nc_nsfs_health.js @@ -0,0 +1,206 @@ +/* Copyright (C) 2016 NooBaa */ +'use strict'; + +const _ = require('lodash'); +const path = require('path'); +const mocha = require('mocha'); +const assert = require('assert'); +const sinon = require('sinon'); +const P = require('../../util/promise'); +const fs_utils = require('../../util/fs_utils'); +const nb_native = require('../../util/nb_native'); +const NSFSHealth = require('../../cmd/health'); +const native_fs_utils = require('../../util/native_fs_utils'); +const config = require('../../../config'); + +const MAC_PLATFORM = 'darwin'; +let tmp_fs_path = '/tmp/test_bucketspace_fs'; +if (process.platform === MAC_PLATFORM) { + tmp_fs_path = '/private/' + tmp_fs_path; +} +const DEFAULT_FS_CONFIG = { + uid: process.getuid(), + gid: process.getgid(), + backend: '', + warn_threshold_ms: 100, +}; + + +mocha.describe('nsfs nc health', function() { + + const config_root = path.join(tmp_fs_path, 'config_root_nsfs_health'); + const root_path = path.join(tmp_fs_path, 'root_path_nsfs_health/'); + const accounts_schema_dir = 'accounts'; + const buckets_schema_dir = 'buckets'; + let health; + + mocha.before(async () => { + await P.all(_.map([accounts_schema_dir, buckets_schema_dir], async dir => + fs_utils.create_fresh_path(`${config_root}/${dir}`))); + await fs_utils.create_fresh_path(root_path); + }); + mocha.after(async () => { + fs_utils.folder_delete(`${config_root}`); + fs_utils.folder_delete(`${root_path}`); + }); + + mocha.describe('health check', async function() { + const acount_name = 'account1'; + const bucket_name = 'bucket1'; + const new_buckets_path = `${root_path}new_buckets_path_user1/`; + const account1 = { name: acount_name, nsfs_account_config: { new_buckets_path: new_buckets_path } }; + const bucket1 = { name: bucket_name, path: new_buckets_path + '/bucket1' }; + mocha.before(async () => { + const https_port = 6443; + health = new NSFSHealth({ config_root, https_port }); + await fs_utils.create_fresh_path(new_buckets_path); + await fs_utils.file_must_exist(new_buckets_path); + await fs_utils.create_fresh_path(new_buckets_path + '/bucket1'); + await fs_utils.file_must_exist(new_buckets_path + '/bucket1'); + await write_config_file(config_root, accounts_schema_dir, acount_name, account1); + await write_config_file(config_root, buckets_schema_dir, bucket_name, bucket1); + const get_service_memory_usage = sinon.stub(health, "get_service_memory_usage"); + get_service_memory_usage.onFirstCall().returns(Promise.resolve(100)); + }); + + mocha.after(async () => { + fs_utils.folder_delete(new_buckets_path); + fs_utils.folder_delete(path.join(new_buckets_path, 'bucket1')); + fs_utils.file_delete(path.join(config_root, buckets_schema_dir, bucket1.name + '.json')); + fs_utils.file_delete(path.join(config_root, accounts_schema_dir, account1.name + '.json')); + }); + + mocha.it('Health all condition is success', async function() { + const get_service_state = sinon.stub(health, "get_service_state"); + get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 100 })) + .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 200 })); + const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); + health.all_account_details = true; + health.all_bucket_details = true; + const health_status = await health.nc_nsfs_health(); + assert.strictEqual(health_status.status, 'OK'); + assert.strictEqual(health_status.checks.invalid_buckets.length, 0); + assert.strictEqual(health_status.checks.valid_accounts.length, 1); + assert.strictEqual(health_status.checks.valid_accounts[0].name, 'account1'); + assert.strictEqual(health_status.checks.valid_buckets.length, 1); + assert.strictEqual(health_status.checks.valid_buckets[0].name, 'bucket1'); + }); + + mocha.it('NSFS service is inactive', async function() { + health.get_service_state.restore(); + health.get_endpoint_response.restore(); + const get_service_state = sinon.stub(health, "get_service_state"); + get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'inactive', pid: 0 })) + .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 200 })); + const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); + const health_status = await health.nc_nsfs_health(); + assert.strictEqual(health_status.status, 'NOTOK'); + assert.strictEqual(health_status.error.error_code, 'NSFS_SERVICE_FAILED'); + }); + + mocha.it('NSFS rsyslog service is inactive', async function() { + health.get_service_state.restore(); + health.get_endpoint_response.restore(); + const get_service_state = sinon.stub(health, "get_service_state"); + get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 1000 })) + .onSecondCall().returns(Promise.resolve({ service_status: 'inactive', pid: 0 })); + const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); + const health_status = await health.nc_nsfs_health(); + assert.strictEqual(health_status.status, 'NOTOK'); + assert.strictEqual(health_status.error.error_code, 'RSYSLOG_SERVICE_FAILED'); + }); + + mocha.it('NSFS endpoint return error response is inactive', async function() { + health.get_service_state.restore(); + health.get_endpoint_response.restore(); + const get_service_state = sinon.stub(health, "get_service_state"); + get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 1000 })) + .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 2000 })); + const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'MISSING_FORKS', total_fork_count: 3, running_workers: ['1', '3']}})); + const health_status = await health.nc_nsfs_health(); + assert.strictEqual(health_status.status, 'NOTOK'); + assert.strictEqual(health_status.error.error_code, 'NSFS_ENDPOINT_FORK_MISSING'); + }); + + mocha.it('NSFS account with invalid storage path', async function() { + health.get_service_state.restore(); + health.get_endpoint_response.restore(); + const account_invalid = { name: 'account_invalid', nsfs_account_config: { new_buckets_path: new_buckets_path + '/invalid' } }; + await write_config_file(config_root, accounts_schema_dir, account_invalid.name, account_invalid); + const get_service_state = sinon.stub(health, "get_service_state"); + get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 1000 })) + .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 2000 })); + const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); + const health_status = await health.nc_nsfs_health(); + assert.strictEqual(health_status.status, 'OK'); + assert.strictEqual(health_status.checks.invalid_accounts.length, 1); + assert.strictEqual(health_status.checks.invalid_accounts[0].name, 'account_invalid'); + fs_utils.file_delete(path.join(config_root, accounts_schema_dir, account_invalid.name + '.json')); + }); + + mocha.it('NSFS bucket with invalid storage path', async function() { + health.get_service_state.restore(); + health.get_endpoint_response.restore(); + const bucket_invalid = { name: 'bucket_invalid', path: new_buckets_path + '/bucket1/invalid' }; + await write_config_file(config_root, buckets_schema_dir, bucket_invalid.name, bucket_invalid); + const get_service_state = sinon.stub(health, "get_service_state"); + get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 1000 })) + .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 2000 })); + const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); + const health_status = await health.nc_nsfs_health(); + assert.strictEqual(health_status.status, 'OK'); + assert.strictEqual(health_status.checks.invalid_buckets.length, 1); + assert.strictEqual(health_status.checks.invalid_buckets[0].name, 'bucket_invalid'); + fs_utils.file_delete(path.join(config_root, buckets_schema_dir, bucket_invalid.name + '.json')); + }); + + mocha.it('NSFS invalid bucket schema json', async function() { + health.get_service_state.restore(); + health.get_endpoint_response.restore(); + const bucket_invalid_schema = { name: 'bucket_invalid_schema', path: new_buckets_path }; + await write_config_file(config_root, buckets_schema_dir, bucket_invalid_schema.name, bucket_invalid_schema, "invalid"); + const get_service_state = sinon.stub(health, "get_service_state"); + get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 1000 })) + .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 2000 })); + const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); + const health_status = await health.nc_nsfs_health(); + assert.strictEqual(health_status.status, 'OK'); + assert.strictEqual(health_status.checks.invalid_buckets.length, 1); + assert.strictEqual(health_status.checks.invalid_buckets[0].name, 'bucket_invalid_schema.json'); + fs_utils.file_delete(path.join(config_root, buckets_schema_dir, bucket_invalid_schema.name + '.json')); + }); + + mocha.it('NSFS invalid account schema json', async function() { + health.get_service_state.restore(); + health.get_endpoint_response.restore(); + const account_invalid_schema = { name: 'account_invalid_schema', path: new_buckets_path }; + await write_config_file(config_root, accounts_schema_dir, account_invalid_schema.name, account_invalid_schema, "invalid"); + const get_service_state = sinon.stub(health, "get_service_state"); + get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 1000 })) + .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 2000 })); + const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); + const health_status = await health.nc_nsfs_health(); + assert.strictEqual(health_status.status, 'OK'); + assert.strictEqual(health_status.checks.invalid_accounts.length, 1); + assert.strictEqual(health_status.checks.invalid_accounts[0].name, 'account_invalid_schema.json'); + fs_utils.file_delete(path.join(config_root, buckets_schema_dir, account_invalid_schema.name + '.json')); + }); + }); +}); + +async function write_config_file(config_root, schema_dir, config_file_name, config_data, invalid_str = '') { + const config_path = path.join(config_root, schema_dir, config_file_name + '.json'); + await nb_native().fs.writeFile( + DEFAULT_FS_CONFIG, + config_path, Buffer.from(JSON.stringify(config_data) + invalid_str), { + mode: native_fs_utils.get_umasked_mode(config.BASE_MODE_FILE), + }); +} diff --git a/src/util/native_fs_utils.js b/src/util/native_fs_utils.js index 359fdfd1b8..0a283a506f 100644 --- a/src/util/native_fs_utils.js +++ b/src/util/native_fs_utils.js @@ -415,6 +415,18 @@ async function get_user_by_distinguished_name({ distinguished_name }) { } } +function isDirectory(ent) { + if (!ent) throw new Error('isDirectory: ent is empty'); + if (ent.mode) { + // eslint-disable-next-line no-bitwise + return (((ent.mode) & nb_native().fs.S_IFMT) === nb_native().fs.S_IFDIR); + } else if (ent.type) { + return ent.type === nb_native().fs.DT_DIR; + } else { + throw new Error(`isDirectory: ent ${ent} is not supported`); + } +} + exports.get_umasked_mode = get_umasked_mode; exports._make_path_dirs = _make_path_dirs; exports._create_path = _create_path; @@ -440,3 +452,4 @@ exports.gpfs_unlink_retry_catch = gpfs_unlink_retry_catch; exports.create_config_file = create_config_file; exports.delete_config_file = delete_config_file; exports.update_config_file = update_config_file; +exports.isDirectory = isDirectory; diff --git a/src/util/nb_native.js b/src/util/nb_native.js index 9fc1f36c45..b98be4069c 100644 --- a/src/util/nb_native.js +++ b/src/util/nb_native.js @@ -46,8 +46,9 @@ function inherits(target, source) { // https://wiki.openssl.org/index.php/Random_Numbers#Entropy // doing as suggested and seeding with /dev/random async function init_rand_seed() { - - console.log('init_rand_seed: starting ...'); + if (process.env.LOCAL_MD_SERVER) { + console.log('init_rand_seed: starting ...'); + } let still_reading = true; const promise = generate_entropy(() => still_reading); @@ -83,7 +84,9 @@ async function read_rand_seed(seed_bytes) { const count = buf.length - offset; const random_dev = process.env.DISABLE_DEV_RANDOM_SEED ? '/dev/urandom' : '/dev/random'; if (!fh) { - console.log(`read_rand_seed: opening ${random_dev} ...`); + if (process.env.LOCAL_MD_SERVER) { + console.log(`read_rand_seed: opening ${random_dev} ...`); + } fh = await fs.promises.open(random_dev, 'r'); // Ignore seed in standalone due to pkg issue: https://github.com/noobaa/noobaa-core/issues/6476 if (Number.isInteger(fh)) break;