diff --git a/docker/conf/nginx.conf b/docker/conf/nginx.conf index dcfcaf7..b851484 100644 --- a/docker/conf/nginx.conf +++ b/docker/conf/nginx.conf @@ -1,108 +1,109 @@ worker_processes 1; # Show startup logs on stderr; switch to debug to print, well, debug logs when # running nginx-debug error_log /dev/stderr info; events { worker_connections 1024; } http { include mime.types; default_type application/octet-stream; sendfile on; keepalive_timeout 65; + client_max_body_size 100M; # Built-in Docker resolver. Needed to allow on-demand resolution of proxy # upstreams. resolver 127.0.0.11 valid=30s; server { - listen 80 default_server; + listen 5080 default_server; # Add a trailing slash to top level requests (e.g. http://localhost:5080/flower) rewrite ^/([^/]+)$ /$1/ permanent; # In this pile of proxies, all upstreams are set using a variable. This # makes nginx DNS-resolve the name of the upstream when clients request # them, rather than on start. This avoids an unstarted container preventing # nginx from starting. # # Variables need to be set as early as possible, as they're statements from # the rewrite module and `rewrite [...] break;` will prevent these # statements from being executed. location /flower/ { set $upstream "http://flower:5555"; rewrite ^/flower/(.*)$ /$1 break; proxy_pass $upstream; proxy_set_header X-Real-IP $remote_addr; proxy_set_header Host $host; proxy_redirect off; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; } location /rabbitmq/ { set $upstream "http://amqp:15672"; rewrite ^ $request_uri; rewrite ^/rabbitmq(/.*)$ $1 break; proxy_pass $upstream$uri; } location /scheduler { set $upstream "http://swh-scheduler:5008"; rewrite ^/scheduler/(.*)$ /$1 break; proxy_pass $upstream; } location /storage { set $upstream "http://swh-storage:5002"; rewrite ^/storage/(.*)$ /$1 break; proxy_pass $upstream; } location /indexer-storage { set $upstream "http://swh-idx-storage:5007"; rewrite ^/indexer-storage/(.*)$ /$1 break; proxy_pass $upstream; } location /deposit { set $upstream "http://swh-deposit:5006"; rewrite ^/deposit/(.*)$ /deposit/$1 break; proxy_pass $upstream; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-Host $host:5080; proxy_set_header SCRIPT_NAME /deposit; proxy_redirect off; } location /objstorage { set $upstream "http://swh-objstorage:5003"; rewrite ^/objstorage/(.*)$ /$1 break; proxy_pass $upstream; } location /prometheus { set $upstream "http://prometheus:9090"; proxy_pass $upstream; } location /grafana { set $upstream "http://grafana:3000"; rewrite ^/grafana/(.*)$ /$1 break; proxy_pass $upstream; } location / { set $upstream "http://swh-web:5004"; proxy_pass $upstream; } } } diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 452dbb6..93390cb 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,415 +1,415 @@ version: '2' services: amqp: image: rabbitmq:3.6-management ports: - 5072:5672 # flower: # image: mher/flower # command: --broker=amqp://guest:guest@amqp:5672// --url_prefix=flower # ports: # - 5055:5555 # depends_on: # - amqp zookeeper: image: wurstmeister/zookeeper restart: always kafka: image: wurstmeister/kafka ports: - "5092:9092" env_file: ./env/kafka.env depends_on: - zookeeper kafka-manager: image: hlebalbau/kafka-manager:stable ports: - "5093:9000" environment: ZK_HOSTS: zookeeper:2181 APPLICATION_SECRET: random-secret command: -Dpidfile.path=/dev/null prometheus: image: prom/prometheus depends_on: - prometheus-statsd-exporter command: # Needed for the reverse-proxy - "--web.external-url=/prometheus" - "--config.file=/etc/prometheus/prometheus.yml" volumes: - "./conf/prometheus.yml:/etc/prometheus/prometheus.yml:ro" restart: unless-stopped prometheus-statsd-exporter: image: prom/statsd-exporter command: - "--statsd.mapping-config=/etc/prometheus/statsd-mapping.yml" volumes: - "./conf/prometheus-statsd-mapping.yml:/etc/prometheus/statsd-mapping.yml:ro" restart: unless-stopped prometheus-rabbitmq-exporter: image: kbudde/rabbitmq-exporter restart: unless-stopped environment: SKIP_QUEUES: "RPC_.*" MAX_QUEUES: 5000 RABBIT_URL: http://amqp:15672 grafana: image: grafana/grafana restart: unless-stopped depends_on: - prometheus environment: GF_SERVER_ROOT_URL: http://localhost:5080/grafana volumes: - "./conf/grafana/provisioning:/etc/grafana/provisioning:ro" - "./conf/grafana/dashboards:/var/lib/grafana/dashboards" nginx: image: nginx volumes: - "./conf/nginx.conf:/etc/nginx/nginx.conf:ro" ports: - - 5080:80 + - 5080:5080 # Scheduler swh-scheduler-db: image: postgres:11 env_file: - ./env/common_python.env - ./env/scheduler-db.env environment: # unset PGHOST as db service crashes otherwise PGHOST: swh-scheduler: image: swh/stack build: ./ env_file: - ./env/common_python.env - ./env/scheduler-db.env - ./env/scheduler.env environment: SWH_CONFIG_FILENAME: /scheduler.yml SWH_SCHEDULER_CONFIG_FILE: /scheduler.yml entrypoint: /entrypoint.sh depends_on: - swh-scheduler-db ports: - 5008:5008 volumes: - "./conf/scheduler.yml:/scheduler.yml:ro" - "./services/swh-scheduler/entrypoint.sh:/entrypoint.sh:ro" swh-scheduler-listener: image: swh/stack build: ./ env_file: - ./env/common_python.env - ./env/scheduler-db.env - ./env/scheduler.env environment: SWH_CONFIG_FILENAME: /scheduler.yml SWH_SCHEDULER_CONFIG_FILE: /scheduler.yml entrypoint: /entrypoint.sh command: start-listener depends_on: - swh-scheduler - amqp volumes: - "./conf/scheduler.yml:/scheduler.yml:ro" - "./services/swh-scheduler-worker/entrypoint.sh:/entrypoint.sh:ro" swh-scheduler-runner: image: swh/stack build: ./ env_file: - ./env/common_python.env - ./env/scheduler-db.env - ./env/scheduler.env environment: SWH_CONFIG_FILENAME: /scheduler.yml SWH_SCHEDULER_CONFIG_FILE: /scheduler.yml entrypoint: /entrypoint.sh command: start-runner -p 10 depends_on: - swh-scheduler - amqp volumes: - "./conf/scheduler.yml:/scheduler.yml:ro" - "./services/swh-scheduler-worker/entrypoint.sh:/entrypoint.sh:ro" # Graph storage swh-storage-db: image: postgres:11 env_file: - ./env/storage-db.env environment: # unset PGHOST as db service crashes otherwise PGHOST: swh-storage: image: swh/stack build: ./ ports: - 5002:5002 depends_on: - swh-storage-db - swh-objstorage - kafka env_file: - ./env/common_python.env - ./env/storage-db.env environment: SWH_CONFIG_FILENAME: /storage.yml STORAGE_BACKEND: postgresql entrypoint: /entrypoint.sh volumes: - "./conf/storage.yml:/storage.yml:ro" - "./services/swh-storage/entrypoint.sh:/entrypoint.sh:ro" # Object storage swh-objstorage: build: ./ image: swh/stack ports: - 5003:5003 env_file: - ./env/common_python.env environment: SWH_CONFIG_FILENAME: /objstorage.yml entrypoint: /entrypoint.sh volumes: - "./conf/objstorage.yml:/objstorage.yml:ro" - "./services/swh-objstorage/entrypoint.sh:/entrypoint.sh:ro" # Indexer storage swh-idx-storage-db: image: postgres:11 env_file: - ./env/indexers-db.env environment: # unset PGHOST as db service crashes otherwise PGHOST: swh-idx-storage: image: swh/stack build: ./ ports: - 5007:5007 depends_on: - swh-idx-storage-db env_file: - ./env/common_python.env - ./env/indexers-db.env environment: SWH_CONFIG_FILENAME: /indexer_storage.yml entrypoint: /entrypoint.sh volumes: - "./conf/indexer_storage.yml:/indexer_storage.yml:ro" - "./services/swh-indexer-storage/entrypoint.sh:/entrypoint.sh:ro" # Web interface swh-web: build: ./ image: swh/stack ports: - 5004:5004 depends_on: - swh-objstorage - swh-storage - swh-idx-storage env_file: - ./env/common_python.env environment: VERBOSITY: 3 DJANGO_SETTINGS_MODULE: swh.web.settings.development SWH_CONFIG_FILENAME: /web.yml entrypoint: /entrypoint.sh volumes: - "./conf/web.yml:/web.yml:ro" - "./services/swh-web/entrypoint.sh:/entrypoint.sh:ro" swh-deposit-db: image: postgres:11 env_file: - ./env/deposit-db.env environment: # unset PGHOST as db service crashes otherwise PGHOST: swh-deposit: image: swh/stack build: ./ ports: - 5006:5006 depends_on: - swh-deposit-db - swh-scheduler env_file: - ./env/common_python.env - ./env/deposit-db.env environment: VERBOSITY: 3 SWH_CONFIG_FILENAME: /deposit.yml DJANGO_SETTINGS_MODULE: swh.deposit.settings.production entrypoint: /entrypoint.sh volumes: - "./conf/deposit.yml:/deposit.yml:ro" - "./services/swh-deposit/entrypoint.sh:/entrypoint.sh:ro" swh-vault-db: image: postgres:11 env_file: - ./env/vault-db.env environment: # unset PGHOST as db service crashes otherwise PGHOST: swh-vault: image: swh/stack build: ./ env_file: - ./env/common_python.env - ./env/vault-db.env environment: SWH_CONFIG_FILENAME: /vault.yml command: server ports: - 5005:5005 depends_on: - swh-vault-db - swh-objstorage - swh-storage - swh-scheduler entrypoint: /entrypoint.sh volumes: - "./conf/vault.yml:/vault.yml:ro" - "./services/swh-vault/entrypoint.sh:/entrypoint.sh:ro" swh-vault-worker: image: swh/stack build: ./ command: worker env_file: - ./env/common_python.env environment: SWH_CONFIG_FILENAME: /cooker.yml depends_on: - swh-vault - swh-storage entrypoint: /entrypoint.sh volumes: - "./conf/vault-worker.yml:/cooker.yml:ro" - "./services/swh-vault/entrypoint.sh:/entrypoint.sh:ro" # Lister Celery workers swh-listers-db: image: postgres:11 env_file: - ./env/listers-db.env environment: # unset PGHOST as db service crashes otherwise PGHOST: swh-lister: image: swh/stack build: ./ env_file: - ./env/common_python.env - ./env/listers-db.env - ./env/workers.env user: swh environment: SWH_WORKER_INSTANCE: listers SWH_CONFIG_FILENAME: /lister.yml depends_on: - swh-listers-db - swh-scheduler - swh-scheduler-runner - swh-storage - amqp entrypoint: /entrypoint.sh volumes: - "./conf/lister.yml:/lister.yml:ro" - "./services/swh-listers-worker/entrypoint.sh:/entrypoint.sh:ro" # Loader + deposit checker Celery workers swh-loader: image: swh/stack build: ./ env_file: - ./env/common_python.env - ./env/workers.env user: swh environment: SWH_WORKER_INSTANCE: loader SWH_CONFIG_FILENAME: /loader.yml entrypoint: /entrypoint.sh depends_on: - swh-storage - swh-scheduler - swh-deposit - amqp volumes: - "./conf/loader.yml:/loader.yml:ro" - "./services/swh-worker/entrypoint.sh:/entrypoint.sh:ro" # Indexer Celery workers swh-indexer: image: swh/stack build: ./ user: swh env_file: - ./env/common_python.env - ./env/indexers-db.env - ./env/workers.env environment: SWH_WORKER_INSTANCE: indexer SWH_CONFIG_FILENAME: /indexer.yml CONCURRENCY: 4 entrypoint: /entrypoint.sh depends_on: - swh-scheduler-runner - swh-idx-storage - swh-storage - swh-objstorage - amqp volumes: - "./conf/indexer.yml:/indexer.yml:ro" - "./services/swh-indexer-worker/entrypoint.sh:/entrypoint.sh:ro" # Journal related swh-indexer-journal-client: image: swh/stack build: ./ entrypoint: /entrypoint.sh env_file: - ./env/common_python.env depends_on: - kafka - swh-storage - swh-scheduler volumes: - "./conf/indexer_journal_client.yml:/etc/softwareheritage/indexer/journal_client.yml:ro" - "./services/swh-indexer-journal-client/entrypoint.sh:/entrypoint.sh:ro" diff --git a/docker/services/swh-deposit/entrypoint.sh b/docker/services/swh-deposit/entrypoint.sh index 07b7cf7..e0bff60 100755 --- a/docker/services/swh-deposit/entrypoint.sh +++ b/docker/services/swh-deposit/entrypoint.sh @@ -1,36 +1,41 @@ #!/bin/bash set -ex source /srv/softwareheritage/utils/pyutils.sh setup_pip source /srv/softwareheritage/utils/pgsql.sh setup_pgsql if [ "$1" = 'shell' ] ; then - exec bash -i + shift + if (( $# == 0)); then + exec bash -i + else + "$@" + fi else wait_pgsql echo "Migrating db" django-admin migrate --settings=${DJANGO_SETTINGS_MODULE} swh-deposit admin user exists test || \ swh-deposit admin user create \ --username test \ --password test \ --provider-url https://softwareheritage.org \ --domain softwareheritage.org echo "starting swh-deposit server" exec gunicorn --bind 0.0.0.0:5006 \ --reload \ --threads 2 \ --workers 2 \ --log-level DEBUG \ --timeout 3600 \ --config 'python:swh.core.api.gunicorn_config' \ 'django.core.wsgi:get_wsgi_application()' fi diff --git a/docker/tests/run_tests.sh b/docker/tests/run_tests.sh index 06e774c..e8fe3e8 100755 --- a/docker/tests/run_tests.sh +++ b/docker/tests/run_tests.sh @@ -1,182 +1,182 @@ #!/bin/bash # Main script to run high level tests on the Software Heritage stack # Use a temporary directory as working directory WORKDIR=/tmp/swh-docker-dev_tests # Create it if it does not exist mkdir $WORKDIR 2>/dev/null # Ensure it is empty before running the tests rm -rf $WORKDIR/* # We want the script to exit at the first encountered error set -e # Get test scripts directory TEST_SCRIPTS_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) # Set the docker-compose.yml file to use export COMPOSE_FILE=$TEST_SCRIPTS_DIR/../docker-compose.yml # Useful global variables -SWH_WEB_API_BASEURL="http://localhost:5004/api/1" +SWH_WEB_API_BASEURL="http://localhost:5080/api/1" CURRENT_TEST_SCRIPT="" # Colored output related variables and functions (only if stdout is a terminal) if test -t 1; then GREEN='\033[0;32m' RED='\033[0;31m' NC='\033[0m' else DOCO_OPTIONS='--no-ansi' fi # Remove previously dumped service logs file if any rm -f $TEST_SCRIPTS_DIR/swh-docker-compose.logs function colored_output { local msg="$2" if [ "$CURRENT_TEST_SCRIPT" != "" ]; then msg="[$CURRENT_TEST_SCRIPT] $msg" fi echo -e "${1}${msg}${NC}" } function status_message { colored_output ${GREEN} "$1" } function error_message { colored_output ${RED} "$1" } function dump_docker_logs { error_message "Dumping logs for all services in file $TEST_SCRIPTS_DIR/swh-docker-compose.logs" docker-compose logs > $TEST_SCRIPTS_DIR/swh-docker-compose.logs } # Exit handler that will get called when this script terminates function finish { if [ $? -ne 0 ] && [ "$CURRENT_TEST_SCRIPT" != "" ]; then local SCRIPT_NAME=$CURRENT_TEST_SCRIPT CURRENT_TEST_SCRIPT="" error_message "An error occurred when running test script ${SCRIPT_NAME}" dump_docker_logs fi docker-compose $DOCO_OPTIONS down rm -rf $WORKDIR } trap finish EXIT # Docker-compose events listener that will be executed in background # Parameters: # $1: PID of parent process function listen_docker_events { docker-compose $DOCO_OPTIONS events | while read event do service=$(echo $event | cut -d " " -f7 | sed 's/^name=swh-docker-dev_\(.*\)_1)/\1/') event_type=$(echo $event | cut -d ' ' -f4) # "docker-compose down" has been called, exiting this child process if [ "$event_type" = "kill" ] ; then exit # a swh service crashed, sending signal to parent process to exit with error elif [ "$event_type" = "die" ]; then if [[ "$service" =~ ^swh.* ]]; then exit_code=$(docker-compose ps | grep $service | awk '{print $4}') if [ "$exit_code" != "0" ]; then error_message "Service $service died unexpectedly, exiting" dump_docker_logs kill -s SIGUSR1 $1; exit fi fi fi done } trap "exit 1" SIGUSR1 declare -A SERVICE_LOGS_NB_LINES_READ # Function to wait for a specific string to be outputted in a specific # docker-compose service logs. # When called multiple times on the same service, only the newly outputted # logs since the last call will be processed. # Parameters: # $1: a timeout value in seconds to stop waiting and exit with error # $2: docker-compose service name # $3: the string to look for in the produced logs function wait_for_service_output { local nb_lines_to_skip=0 if [[ -v "SERVICE_LOGS_NB_LINES_READ[$2]" ]]; then let nb_lines_to_skip=${SERVICE_LOGS_NB_LINES_READ[$2]}+1 fi SECONDS=0 local service_logs=$(docker-compose $DOCO_OPTIONS logs $2 | tail -n +$nb_lines_to_skip) until echo -ne "$service_logs" | grep -m 1 "$3" >/dev/null ; do sleep 1; if (( $SECONDS > $1 )); then error_message "Could not find pattern \"$3\" in $2 service logs after $1 seconds" exit 1 fi let nb_lines_to_skip+=$(echo -ne "$service_logs" | wc -l) service_logs=$(docker-compose $DOCO_OPTIONS logs $2 | tail -n +$nb_lines_to_skip) done let nb_lines_to_skip+=$(echo -ne "$service_logs" | wc -l) SERVICE_LOGS_NB_LINES_READ[$2]=$nb_lines_to_skip } # Function to make an HTTP request and gets its response. # It should be used the following way: # response=$(http_request ) # Parameters: # $1: http method name (GET, POST, ...) # $2: request url function http_request { local response=$(curl -sS -X $1 $2) echo $response } # Function to check that an HTTP request ends up with no errors. # If the HTTP response code is different from 200, an error will # be raised and the main script will terminate # Parameters: # $1: http method name (GET, POST, ...) # $2: request url function http_request_check { curl -sSf -X $1 $2 > /dev/null } # Function to run the content of a script dedicated to test a specific # part of the Software Heritage stack. function run_test_script { local SCRIPT_NAME=$(basename $1) status_message "Executing test script $SCRIPT_NAME" CURRENT_TEST_SCRIPT=$SCRIPT_NAME source $1 } # Move to work directory cd $WORKDIR # Start the docker-compose event handler as a background process status_message "Starting docker-compose events listener" listen_docker_events $$ & # Start the docker-compose environment including the full Software Heritage stack status_message "Starting swh docker-compose environment" docker-compose $DOCO_OPTIONS up -d # Ensure all swh services are up before running tests status_message "Waiting for swh services to be up" docker-compose $DOCO_OPTIONS exec -T swh-storage wait-for-it localhost:5002 -s --timeout=0 docker-compose $DOCO_OPTIONS exec -T swh-objstorage wait-for-it localhost:5003 -s --timeout=0 docker-compose $DOCO_OPTIONS exec -T swh-web wait-for-it localhost:5004 -s --timeout=0 docker-compose $DOCO_OPTIONS exec -T swh-vault wait-for-it localhost:5005 -s --timeout=0 docker-compose $DOCO_OPTIONS exec -T swh-deposit wait-for-it localhost:5006 -s --timeout=0 docker-compose $DOCO_OPTIONS exec -T swh-idx-storage wait-for-it localhost:5007 -s --timeout=0 docker-compose $DOCO_OPTIONS exec -T swh-scheduler wait-for-it localhost:5008 -s --timeout=0 # Execute test scripts for test_script in $TEST_SCRIPTS_DIR/test_*; do run_test_script ${test_script} CURRENT_TEST_SCRIPT="" done diff --git a/docker/tests/test_deposit.py b/docker/tests/test_deposit.py new file mode 100644 index 0000000..be0bf3d --- /dev/null +++ b/docker/tests/test_deposit.py @@ -0,0 +1,143 @@ +# Copyright (C) 2019-2020 The Software Heritage developers +# See the AUTHORS file at the top-level directory of this distribution +# License: GNU General Public License version 3, or any later version +# See top-level LICENSE file for more information + +import json +import subprocess +import time + +import pytest +import testinfra + + +SAMPLE_METADATA = '''\ + + + Test Software + swh + test-software + + No One + + +''' + + +# scope='session' so we use the same container for all the tests; +@pytest.fixture(scope='session') +def deposit_host(request): + # start the whole cluster + subprocess.check_output(['docker-compose', 'up', '-d']) + # run a container in which test commands are executed + docker_id = subprocess.check_output( + ['docker-compose', 'run', '-d', + 'swh-deposit', 'shell', 'sleep', '1h']).decode().strip() + deposit_host = testinfra.get_host("docker://" + docker_id) + deposit_host.check_output( + 'echo \'print("Hello World!")\n\' > /tmp/hello.py') + deposit_host.check_output( + 'tar -C /tmp -czf /tmp/archive.tgz /tmp/hello.py') + deposit_host.check_output( + f'echo \'{SAMPLE_METADATA}\' > /tmp/metadata.xml') + deposit_host.check_output('wait-for-it swh-deposit:5006 -t 30') + # return a testinfra connection to the container + yield deposit_host + + # at the end of the test suite, destroy the container + subprocess.check_call(['docker', 'rm', '-f', docker_id]) + # and the wole cluster + subprocess.check_call(['docker-compose', 'down']) + + +def test_admin_collection(deposit_host): + # 'deposit_host' binds to the container + assert deposit_host.check_output( + 'swh deposit admin collection list') == 'test' + + +def test_admin_user(deposit_host): + assert deposit_host.check_output('swh deposit admin user list') == 'test' + + +def test_create_deposit_simple(deposit_host): + deposit = deposit_host.check_output( + 'swh deposit upload --format json --username test --password test ' + '--url http://nginx:5080/deposit/1 ' + '--archive /tmp/archive.tgz ' + '--name test_deposit --author somebody') + deposit = json.loads(deposit) + + assert set(deposit.keys()) == {'deposit_id', 'deposit_status', + 'deposit_status_detail', 'deposit_date'} + assert deposit['deposit_status'] == 'deposited' + deposit_id = deposit['deposit_id'] + + for i in range(60): + status = json.loads(deposit_host.check_output( + 'swh deposit status --format json --username test --password test ' + '--url http://nginx:5080/deposit/1 --deposit-id %s' % deposit_id)) + if status['deposit_status'] == 'done': + break + time.sleep(1) + else: + assert False, "Deposit loading failed" + + +def test_create_deposit_with_metadata(deposit_host): + deposit = deposit_host.check_output( + 'swh deposit upload --format json --username test --password test ' + '--url http://nginx:5080/deposit/1 ' + '--archive /tmp/archive.tgz ' + '--metadata /tmp/metadata.xml') + deposit = json.loads(deposit) + + assert set(deposit.keys()) == {'deposit_id', 'deposit_status', + 'deposit_status_detail', 'deposit_date'} + assert deposit['deposit_status'] == 'deposited' + deposit_id = deposit['deposit_id'] + + for i in range(60): + status = json.loads(deposit_host.check_output( + 'swh deposit status --format json --username test --password test ' + '--url http://nginx:5080/deposit/1 --deposit-id %s' % deposit_id)) + if status['deposit_status'] == 'done': + break + time.sleep(1) + else: + assert False, "Deposit loading failed" + + +def test_create_deposit_multipart(deposit_host): + deposit = deposit_host.check_output( + 'swh deposit upload --format json --username test --password test ' + '--url http://nginx:5080/deposit/1 ' + '--archive /tmp/archive.tgz ' + '--partial') + deposit = json.loads(deposit) + + assert set(deposit.keys()) == {'deposit_id', 'deposit_status', + 'deposit_status_detail', 'deposit_date'} + assert deposit['deposit_status'] == 'partial' + deposit_id = deposit['deposit_id'] + + deposit = deposit_host.check_output( + 'swh deposit upload --format json --username test --password test ' + '--url http://nginx:5080/deposit/1 ' + '--metadata /tmp/metadata.xml ' + '--deposit-id %s' + % deposit_id) + deposit = json.loads(deposit) + assert deposit['deposit_status'] == 'deposited' + assert deposit['deposit_id'] == deposit_id + + for i in range(60): + status = json.loads(deposit_host.check_output( + 'swh deposit status --format json --username test --password test ' + '--url http://nginx:5080/deposit/1 --deposit-id %s' % deposit_id)) + if status['deposit_status'] == 'done': + break + time.sleep(1) + else: + assert False, "Deposit loading failed; current status is %s" % status