diff --git a/.github/workflows/candig-testing.yml b/.github/workflows/candig-testing.yml index 7809b7bc1..fe0b4aed0 100644 --- a/.github/workflows/candig-testing.yml +++ b/.github/workflows/candig-testing.yml @@ -76,6 +76,6 @@ jobs: with: name: Post-build error log path: | - tmp/error.txt + tmp/progress.txt tmp/container_logs.txt tmp/vault_audit.log diff --git a/.gitignore b/.gitignore index 5d8e035ba..fd03f5829 100644 --- a/.gitignore +++ b/.gitignore @@ -50,3 +50,8 @@ pnpm-debug.log* # macOS-specific files .DS_Store + +# minio-related files +lib/minio/access-key +lib/minio/secret-key +lib/minio/aws-credentials diff --git a/Makefile b/Makefile index 7b94d2cbd..fdcbc7d23 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,6 @@ SHELL = bash CONDA = $(CONDA_INSTALL)/bin/conda CONDA_ENV_SETTINGS = $(CONDA_INSTALL)/etc/profile.d/conda.sh -LOGFILE = tmp/progress.txt .PHONY: all all: @@ -56,7 +55,7 @@ ifndef CONDA_INSTALL echo "ERROR: Conda install location not specified. Do you have a .env?" exit 1 endif - echo " started bin-conda" >> $(LOGFILE) + @printf "\nOutput of bin-conda:\n" | tee -a $(LOGFILE) ifeq ($(VENV_OS), linux) curl -Lo bin/miniconda_install.sh \ https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh @@ -84,7 +83,6 @@ endif $(CONDA) config --remove channels defaults $(CONDA) config --add channels conda-forge $(CONDA) config --set channel_priority strict - echo " finished bin-conda" >> $(LOGFILE) #>>> @@ -93,7 +91,7 @@ endif #<<< .PHONY: build-all build-all: mkdir - printf "Build started at `date '+%D %T'`.\n\n" >> $(ERRORLOG) + @printf "Build started at `date '+%D %T'`.\n\n" >> $(LOGFILE) ./pre-build-check.sh $(ARGS) # Setup the entire stack @@ -132,15 +130,14 @@ build-images: #toil-docker #<<< build-%: - printf "\nOutput of build-$*: \n" >> $(ERRORLOG) - echo " started build-$*" >> $(LOGFILE) + @printf "\nOutput of build-$*: \n" | tee -a $(LOGFILE) source setup_hosts.sh if [ -f lib/$*/$*_preflight.sh ]; then \ - source lib/$*/$*_preflight.sh 2>&1 | tee -a $(ERRORLOG); \ + source lib/$*/$*_preflight.sh 2>&1 | tee -a $(LOGFILE); \ fi export SERVICE_NAME=$*; \ DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 \ - docker compose -f lib/candigv2/docker-compose.yml -f lib/$*/docker-compose.yml build $(BUILD_OPTS) 2>&1 | tee -a $(ERRORLOG) + docker compose -f lib/candigv2/docker-compose.yml -f lib/$*/docker-compose.yml build $(BUILD_OPTS) 2>&1 | tee -a $(LOGFILE) echo " finished build-$*" >> $(LOGFILE) @@ -188,7 +185,6 @@ clean-authx: # Empties error and progress logs .PHONY: clean-logs clean-logs: - > $(ERRORLOG) > $(LOGFILE) #>>> @@ -259,7 +255,7 @@ clean-secrets: #>>> -# remove all peristant volumes and local data +# remove all persistent volumes and local data # make clean-volumes #<<< @@ -289,20 +285,18 @@ containers=$(shell cat lib/$*/docker-compose.yml | yq -ojson '.services' | jq ' found=$(shell grep -ch $(containers) tmp/containers.txt) #<<< compose-%: - printf "\nOutput of compose-$*: \n" >> $(ERRORLOG) - echo " started compose-$*" >> $(LOGFILE) + @printf "\nOutput of compose-$*: \n" | tee -a $(LOGFILE) source setup_hosts.sh; \ python settings.py; source env.sh; \ export SERVICE_NAME=$*; \ - docker compose -f lib/candigv2/docker-compose.yml -f lib/$*/docker-compose.yml --compatibility up -d 2>&1 | tee -a $(ERRORLOG) + docker compose -f lib/candigv2/docker-compose.yml -f lib/$*/docker-compose.yml --compatibility up -d 2>&1 | tee -a $(LOGFILE) cat tmp/containers.txt if [ $(found) -eq 0 ]; then \ echo $(containers) >> tmp/containers.txt; \ fi if [ -f lib/$*/$*_setup.sh ]; then \ - source lib/$*/$*_setup.sh 2>&1 | tee -a $(ERRORLOG); \ + source lib/$*/$*_setup.sh 2>&1 | tee -a $(LOGFILE); \ fi - echo " finished compose-$*" >> $(LOGFILE) #>>> @@ -324,8 +318,7 @@ recompose-%: #<<< down-%: - printf "\nOutput of down-$*: \n" >> $(ERRORLOG) - echo " started down-$*" >> $(LOGFILE) + @printf "\nOutput of down-$*: \n" | tee -a $(LOGFILE) source setup_hosts.sh; \ export SERVICE_NAME=$*; \ docker compose -f lib/candigv2/docker-compose.yml -f lib/$*/docker-compose.yml --compatibility down 2>&1 @@ -360,7 +353,7 @@ docker-push: #<<< .PHONY: docker-secrets -docker-secrets: mkdir authx-secrets data-secrets #minio-secrets +docker-secrets: mkdir authx-secrets data-secrets data-secrets: mkdir @@ -383,11 +376,12 @@ authx-secrets: mkdir minio-secrets: mkdir @echo "making minio secrets" - @echo $(DEFAULT_ADMIN_USER) > tmp/secrets/minio-access-key + @echo $(DEFAULT_ADMIN_USER) > lib/minio/access-key $(MAKE) secret-minio-secret-key - @echo '[default]' > tmp/secrets/aws-credentials - @echo "aws_access_key_id=`cat tmp/secrets/minio-access-key`" >> tmp/secrets/aws-credentials - @echo "aws_secret_access_key=`cat tmp/secrets/minio-secret-key`" >> tmp/secrets/aws-credentials + mv tmp/secrets/minio-secret-key lib/minio/secret-key + @echo '[default]' > lib/minio/aws-credentials + @echo "aws_access_key_id=`cat lib/minio/access-key`" >> lib/minio/aws-credentials + @echo "aws_secret_access_key=`cat lib/minio/secret-key`" >> lib/minio/aws-credentials #>>> @@ -399,8 +393,6 @@ minio-secrets: mkdir docker-volumes: docker volume create grafana-data --label candigv2=volume docker volume create jupyter-data --label candigv2=volume - # docker volume create minio-config --label candigv2=volume - # docker volume create minio-data $(MINIO_VOLUME_OPT) --label candigv2=volume docker volume create prometheus-data --label candigv2=volume docker volume create toil-jobstore --label candigv2=volume docker volume create keycloak-data --label candigv2=volume @@ -425,6 +417,17 @@ init-authx: mkdir $(foreach MODULE, $(CANDIG_AUTH_MODULES), $(MAKE) build-$(MODULE); $(MAKE) compose-$(MODULE); python settings.py;) +#>>> +# create a minio container (that won't be removed as part of clean-all) +# make init-minio + +#<<< +init-minio: minio-secrets + docker volume create minio-config + docker volume create minio-data $(MINIO_VOLUME_OPT) + docker compose -f lib/candigv2/docker-compose.yml -f lib/minio/docker-compose.yml --compatibility up -d 2>&1 | tee -a $(LOGFILE) + + #>>> # initialize conda environment # make init-conda @@ -432,7 +435,7 @@ init-authx: mkdir #<<< .PHONY: init-conda init-conda: - echo " started init-conda" >> $(LOGFILE) + @printf "\nOutput of init-conda: \n" | tee -a $(LOGFILE) # source conda's script to be safe, so the conda command is found source $(CONDA_ENV_SETTINGS) \ && $(CONDA) create -y -n $(VENV_NAME) python=$(VENV_PYTHON) pip=$(VENV_PIP) @@ -445,7 +448,6 @@ init-conda: #@echo "Load local conda: source bin/miniconda3/etc/profile.d/conda.sh" #@echo "Activate conda env: conda activate $(VENV_NAME)" #@echo "Install requirements: pip install -U -r etc/venv/requirements.txt" - echo " finished init-conda" >> $(LOGFILE) #>>> @@ -494,7 +496,7 @@ secret-%: #<<< .PHONY: toil-docker toil-docker: - echo " started toil-docker" >> $(LOGFILE) + @printf "\nOutput of toil-docker: \n" | tee -a $(LOGFILE) VIRTUAL_ENV=1 DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 TOIL_DOCKER_REGISTRY=$(DOCKER_REGISTRY) \ $(MAKE) -C lib/toil/toil-docker docker $(foreach MODULE,$(TOIL_MODULES), \ @@ -504,7 +506,6 @@ toil-docker: docker tag $(DOCKER_REGISTRY)/$(MODULE):$(TOIL_VERSION) \ $(DOCKER_REGISTRY)/$(MODULE):latest;) $(foreach MODULE, $(TOIL_MODULES), docker push $(DOCKER_REGISTRY)/$(MODULE):latest;) - echo " finished toil-docker" >> $(LOGFILE) #>>> diff --git a/docs/astro.config.mjs b/docs/astro.config.mjs index 241af8d1c..b8558f944 100644 --- a/docs/astro.config.mjs +++ b/docs/astro.config.mjs @@ -41,31 +41,31 @@ export default defineConfig({ }), starlightOpenAPI([ { - base: 'technical/ingest', + base: 'technical/ingest-api', label: 'ingest api', schema: 'https://raw.githubusercontent.com/CanDIG/candigv2-ingest/refs/heads/develop/ingest_openapi.yaml', collapsed: true }, { - base: 'technical/query', + base: 'technical/query-api', label: 'query api', schema: 'https://raw.githubusercontent.com/CanDIG/candigv2-query/refs/heads/stable/query_server/openapi.yaml', collapsed: true }, { - base: 'technical/katsu', + base: 'technical/katsu-api', label: 'katsu api', schema: 'https://raw.githubusercontent.com/CanDIG/katsu/refs/heads/stable/chord_metadata_service/mohpackets/docs/schemas/schema.yml', collapsed: true }, { - base: 'technical/htsget/drs', + base: 'technical/htsget/drs-api', label: 'htsget drs api', schema: 'https://raw.githubusercontent.com/CanDIG/htsget_app/refs/heads/stable/htsget_server/drs_openapi.yaml', collapsed: true }, { - base: 'technical/htsget/beacon', + base: 'technical/htsget/beacon-api', label: 'htsget beacon api', schema: 'https://raw.githubusercontent.com/CanDIG/htsget_app/refs/heads/stable/htsget_server/beacon_openapi.yaml', collapsed: true @@ -76,6 +76,12 @@ export default defineConfig({ schema: 'https://raw.githubusercontent.com/CanDIG/htsget_app/refs/heads/stable/htsget_server/htsget_openapi.yaml', collapsed: true }, + { + base: 'technical/federation-api', + label: 'htsget operations api', + schema: 'https://raw.githubusercontent.com/CanDIG/federation_service/refs/heads/develop/candig_federation/federation.yaml', + collapsed: true + }, ]) ], sidebar: [ @@ -131,4 +137,4 @@ export default defineConfig({ }, ], })] -}); \ No newline at end of file +}); diff --git a/docs/src/content/docs/ingest/ingest-help.mdx b/docs/src/content/docs/ingest/ingest-help.mdx index 0d4a664ec..a10817c53 100644 --- a/docs/src/content/docs/ingest/ingest-help.mdx +++ b/docs/src/content/docs/ingest/ingest-help.mdx @@ -95,7 +95,7 @@ If you receive an error when using the `/ingest/genomic` endpoint something like This means that the token used by ingest to submit to hts-get has expired. At the moment this happens after 30 minutes. Get a fresh token and try again. -### What if I need to delete or edit data that I already ingested into the system? +## What if I need to delete or edit data that I already ingested into the system? Currently, there is no way to edit data that is already ingested into CanDIG. To change any data, the data must be deleted and re-ingested. Follow the steps below in order to delete data in CanDIG. @@ -125,7 +125,7 @@ This token should be kept secure, it lasts for 30 mins ```bash curl --request DELETE \ ---url $CANDIG_URL'/katsu/v2/ingest/program/$PROGRAM_ID/' \ +--url $CANDIG_URL'/katsu/v3/ingest/program/$PROGRAM_ID/' \ -H 'accept: application/json' \ -H 'Content-Type: application/json' \ -H 'Authorization: Bearer '$TOKEN @@ -179,7 +179,3 @@ When attempting clinical data ingest into katsu, if you get a response such as b ``` It means you have not yet registered your program before ingesting. Please follow the instructions in [Register Programs](register-programs/) to submit a program authorization before attempting clinical ingest again. - -:::danger -This token should be kept secure, it lasts for 30 mins. -::: diff --git a/etc/env/example.env b/etc/env/example.env index aa3dfb893..7d4b3c8b2 100644 --- a/etc/env/example.env +++ b/etc/env/example.env @@ -3,7 +3,7 @@ # site options CANDIG_MODULES=logging keycloak vault redis postgres htsget katsu query tyk opa federation candig-ingest candig-data-portal - #minio drs-server wes-server monitoring + #drs-server wes-server monitoring CANDIG_AUTH_MODULES=keycloak vault tyk opa federation CANDIG_DATA_MODULES=keycloak vault redis postgres logging @@ -301,7 +301,9 @@ CANDIG_DATA_PORTAL_PRIVATE_URL=http://candig-data-portal:3000 TOKEN_PATH = ${PWD}/Vault-Helper-Tool/token.txt PROGRESS_FILE = ${PWD}/tmp/progress.txt -# error logging -ERRORLOG=tmp/error.txt +# install logging +LOGFILE = tmp/progress.txt CONDA_INSTALL=bin/miniconda3 + +COMPOSE_IGNORE_ORPHANS=True diff --git a/etc/tests/test_integration.py b/etc/tests/test_integration.py index 0803fd4d2..d486455a3 100644 --- a/etc/tests/test_integration.py +++ b/etc/tests/test_integration.py @@ -443,6 +443,7 @@ def test_ingest_not_admin_katsu(): while response.status_code == 200 and "status" in response.json(): time.sleep(2) response = requests.get(f"{ENV['CANDIG_URL']}/ingest/status/{queue_id}", headers=headers) + print(response.text) assert len(response.json()[f"{ENV['CANDIG_ENV']['CANDIG_SITE_LOCATION']}-SYNTH_01"]["errors"]) == 0 assert len(response.json()[f"{ENV['CANDIG_ENV']['CANDIG_SITE_LOCATION']}-SYNTH_01"]["results"]) == 13 katsu_response = requests.get(f"{ENV['CANDIG_ENV']['KATSU_INGEST_URL']}/v3/discovery/programs/") @@ -1077,6 +1078,7 @@ def test_query_donor_search(): assert summary_stats[category][value] == expected_response[category][value] +# Can we can find donors by querying a specific region of the genome? def test_query_genomic(): # tests that a request sent via query to htsget-beacon properly prunes the data token = get_token(username=ENV['CANDIG_NOT_ADMIN2_USER'], @@ -1150,29 +1152,8 @@ def test_query_genomic(): print(f"{donor["program_id"]}: {donor["submitter_donor_id"]}") assert response and len(response.json()["results"]) == 1 - # token = get_token(username=ENV['CANDIG_NOT_ADMIN_USER'], - # password=ENV['CANDIG_NOT_ADMIN_PASSWORD']) - # headers = { - # "Authorization": f"Bearer {token}", - # "Content-Type": "application/json; charset=utf-8", - # } - # params = { - # "gene": "TP53", - # "assembly": "hg38" - # } - # response = requests.get( - # f"{ENV['CANDIG_URL']}/query/query", headers=headers, params=params - # ) - # pprint.pprint(response.json()) - # if len(response.json()["results"]) != 0: - # print(f"\n\nExpected 0 results from the genomic query using gene name 'TP53' but got {len(response.json()["results"])}") - # if len(response.json()["results"]) > 0: - # print("Got results from:") - # for donor in response.json()["results"]: - # print(f"{donor["program_id"]}: {donor["submitter_donor_id"]}") - # assert response and len(response.json()["results"]) == 0 - +# Can we use a discovery query to get counts of donors we do not have access to? def test_query_discovery(): katsu_response = requests.get( f"{ENV['CANDIG_ENV']['KATSU_INGEST_URL']}/v3/discovery/programs/" @@ -1203,6 +1184,18 @@ def test_query_discovery(): assert field in query_response["site"]["required_but_missing"][category] +# Can we check how many donors have genomics data? +def test_query_completeness(): + query_response = requests.get( + f"{ENV['CANDIG_ENV']['QUERY_INTERNAL_URL']}/genomic_completeness").json() + pprint.pprint(query_response) + # Verify that the synthetic data shows up + assert "LOCAL-SYNTH_01" in query_response + assert query_response["LOCAL-SYNTH_01"]["genomes"] == 6 + assert "LOCAL-SYNTH_02" in query_response + assert query_response["LOCAL-SYNTH_02"]["genomes"] == 5 + + def test_clean_up(): clean_up_program(f"{ENV['CANDIG_ENV']['CANDIG_SITE_LOCATION']}-SYNTH_01") clean_up_program(f"{ENV['CANDIG_ENV']['CANDIG_SITE_LOCATION']}-SYNTH_02") diff --git a/lib/candigv2/docker-compose.yml b/lib/candigv2/docker-compose.yml index d9aa723d5..4669f8547 100644 --- a/lib/candigv2/docker-compose.yml +++ b/lib/candigv2/docker-compose.yml @@ -1,8 +1,4 @@ volumes: - # minio-data: - # external: true - # minio-config: - # external: true toil-jobstore: external: true prometheus-data: @@ -31,14 +27,6 @@ secrets: file: $PWD/tmp/postgres/db-secret labels: - "candigv2=secret" - # minio-access-key: - # file: $PWD/tmp/secrets/minio-access-key - # labels: - # - "candigv2=secret" - # minio-secret-key: - # file: $PWD/tmp/secrets/minio-secret-key - # labels: - # - "candigv2=secret" wes-dependency-resolver: file: $PWD/etc/yml/${WES_DEPENDENCY_RESOLVER}.yml labels: diff --git a/lib/minio/docker-compose.yml b/lib/minio/docker-compose.yml index fbb70ae37..af66340fd 100644 --- a/lib/minio/docker-compose.yml +++ b/lib/minio/docker-compose.yml @@ -1,8 +1,16 @@ +volumes: + minio-data: + external: true + minio-config: + external: true +secrets: + minio-access-key: + file: $PWD/lib/minio/access-key + minio-secret-key: + file: $PWD/lib/minio/secret-key services: minio: image: minio/minio:${MINIO_VERSION:-latest} - labels: - - "candigv2=minio" volumes: - minio-data:/data - minio-config:/root/.minio diff --git a/lib/toil/docker-compose.yml b/lib/toil/docker-compose.yml index afbef2209..c65008485 100644 --- a/lib/toil/docker-compose.yml +++ b/lib/toil/docker-compose.yml @@ -1,3 +1,10 @@ +secrets: + minio-access-key: + file: $PWD/lib/minio/access-key + minio-secret-key: + file: $PWD/lib/minio/secret-key + aws-credentials: + file: $PWD/lib/minio/aws-credentials services: toil-server: image: ${DOCKER_REGISTRY}/toil:${TOIL_VERSION:-latest} diff --git a/lib/wes-server/docker-compose.yml b/lib/wes-server/docker-compose.yml index 0d858e112..72943f6ff 100644 --- a/lib/wes-server/docker-compose.yml +++ b/lib/wes-server/docker-compose.yml @@ -1,3 +1,10 @@ +secrets: + minio-access-key: + file: $PWD/lib/minio/access-key + minio-secret-key: + file: $PWD/lib/minio/secret-key + aws-credentials: + file: $PWD/lib/minio/aws-credentials services: wes-server: build: diff --git a/post_build.sh b/post_build.sh index 2819810eb..b00ff32e4 100755 --- a/post_build.sh +++ b/post_build.sh @@ -6,7 +6,8 @@ # Also prints out all relevant logs from the error logging file (i.e., all lines # that contain the phrases 'error' or 'warn'). -source <(grep --color=never "ERRORLOG" .env) +python settings.py +source env.sh RED='\033[0;31m' YELLOW='\033[1;33m' @@ -16,7 +17,8 @@ DEFAULT='\033[0m' function print_module_logs() { MODULE=$1 - BUILD_LINE=$(grep -n build-${MODULE} ${ERRORLOG} | tail -1 | cut -d ':' -f 1) + output="" + BUILD_LINE=$(grep -n build-${MODULE} ${LOGFILE} | tail -1 | cut -d ':' -f 1) if [[ $BUILD_LINE != "" ]]; then LNO=$BUILD_LINE while read -r LINE; do @@ -24,13 +26,13 @@ function print_module_logs() { break else if [[ ${LINE} =~ .*([Ee]rror|[Ww]arn).* ]]; then - printf "${GREEN}${LNO}${DEFAULT} ${LINE}\n" + output="${output}${GREEN}${LNO}${DEFAULT} ${LINE}\n" fi fi LNO=$((LNO+1)) - done < <(tail -n "+$((BUILD_LINE + 1))" $ERRORLOG) + done < <(tail -n "+$((BUILD_LINE + 1))" $LOGFILE) fi - COMPOSE_LINE=$(grep -n compose-${MODULE} ${ERRORLOG} | tail -1 | cut -d ':' -f 1) + COMPOSE_LINE=$(grep -n compose-${MODULE} ${LOGFILE} | tail -1 | cut -d ':' -f 1) if [[ $COMPOSE_LINE != "" ]]; then LNO=$COMPOSE_LINE while read -r LINE; do @@ -38,41 +40,54 @@ function print_module_logs() { break else if [[ ${LINE} =~ .*([Ee]rror|[Ww]arn).* ]]; then - printf "${GREEN}${LNO}${DEFAULT} ${LINE}\n" + output="${output}${GREEN}${LNO}${DEFAULT} ${LINE}\n" fi fi LNO=$((LNO+1)) - done < <(tail -n "+$((COMPOSE_LINE+1))" $ERRORLOG) + done < <(tail -n "+$((COMPOSE_LINE+1))" $LOGFILE) + fi + if [[ $output != "" ]]; then + printf "\n\n${RED}Error logs for ${MODULE}:\n--------------------\n${DEFAULT}" + printf "${output}" + printf "${RED}--------------------\n${DEFAULT}\n" fi } MODULES=$(cat .env | grep CANDIG_MODULES | cut -c 16- | cut -d '#' -f 1) ALL_MODULES="${MODULES}" -SERVICE_COUNT=0 +EXPECTED_CONTAINERS="" for MODULE in $ALL_MODULES; do + services=$(cat lib/$MODULE/docker-compose.yml | yq -ojson '.services' | jq 'keys' | jq -r @sh | sed s/^\'/candigv2_/g | sed s/\'$/_1/g | sed "s/\'\ \'/_1\\ candigv2_/g" | sed "s/'\\s'/_1\\ candigv2_/g") + EXPECTED_CONTAINERS=$(echo $EXPECTED_CONTAINERS $services) sc=$(cat lib/$MODULE/docker-compose.yml | yq -ojson '.services' | jq 'keys' | jq -r @sh | wc -w | tr -d ' ') - SERVICE_COUNT=`expr $SERVICE_COUNT + $sc` done -RUNNING_MODULES=$(docker ps --format "{{.Names}}") +EXPECTED_COUNT=$(echo $EXPECTED_CONTAINERS | wc -w) + +RUNNING_CONTAINERS=$(docker ps --format "{{.Names}}") +RUNNING_COUNT=$(echo $RUNNING_CONTAINERS | wc -w) -if [ $(docker ps -q | wc -l) == $SERVICE_COUNT ] +# figure out any containers that should've been there but aren't +for i in $EXPECTED_CONTAINERS +do + [[ ! $RUNNING_CONTAINERS =~ $i ]] && MISSING_CONTAINERS="${MISSING_CONTAINERS:+${MISSING_CONTAINERS} }$i" +done +# echo expected: $EXPECTED_CONTAINERS +# echo running: $RUNNING_CONTAINERS +# echo missing: $MISSING_CONTAINERS +if [[ $(echo $MISSING_CONTAINERS | wc -w | tr -d ' ') == "0" ]] then for MODULE in $ALL_MODULES; do - printf "\n\n${BLUE}Error logs for ${MODULE}:\n--------------------\n${DEFAULT}" - print_module_logs $MODULE - printf "${BLUE}--------------------\n${DEFAULT}" + print_module_logs $MODULE $COLOR done - echo -e "${GREEN}Number of expected CanDIG services matches number of containers running!${DEFAULT} Potentially useful error log segments listed above for debugging." + echo -e "${GREEN}Number of expected CanDIG services matches number of containers running!${DEFAULT} Lines above are in ${LOGFILE} and may be helpful for debugging." exit 0 else for MODULE in $ALL_MODULES; do - printf "\n\n${RED}Error logs for ${MODULE}:\n--------------------\n${DEFAULT}" print_module_logs $MODULE - printf "${RED}--------------------\n${DEFAULT}" done - echo -e "${RED}WARNING: ${YELLOW}The number of CanDIG containers running does not match the number of expected services.\nRunning: ${BLUE}$(docker ps -q | wc -l) ${YELLOW}Expected: ${BLUE}${SERVICE_COUNT} -${DEFAULT}Check your build/docker logs. Potentially offending service logs shown above. View ${ERRORLOG} for more information." + echo -e "${RED}WARNING: ${YELLOW}Some containers that are expected to be running are missing:\n${MISSING_CONTAINERS} +${DEFAULT}Lines above are in ${LOGFILE} and may be helpful for debugging." exit 1 fi diff --git a/settings.py b/settings.py index 476627d79..f09644bf5 100755 --- a/settings.py +++ b/settings.py @@ -41,6 +41,7 @@ def get_env_value(key): def get_env(): vars = {} + vars["LOGFILE"] = get_env_value("LOGFILE") vars["CANDIG_URL"] = get_env_value("TYK_LOGIN_TARGET_URL") vars["CANDIG_CLIENT_ID"] = get_env_value("KEYCLOAK_CLIENT_ID") vars["KEYCLOAK_PUBLIC_URL"] = get_env_value("KEYCLOAK_PUBLIC_URL")