[fix] better logging of wcce-handler #105
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: arm64_tests | |
on: | |
push: | |
branches: | |
- "devel" | |
jobs: | |
instance_start: | |
name: instance_start | |
runs-on: ubuntu-latest | |
steps: | |
- name: "install awccli" | |
run: | | |
sudo apt-get update | |
pip3 install awscli --upgrade --user | |
pip3 install boto3 --upgrade --user | |
export PATH=$PATH:$HOME/.local/bin | |
- name: "configure awccli" | |
run: | | |
aws --version | |
aws configure set aws_access_key_id ${{ secrets.AWS_ACCESS_KEY_ID }} | |
aws configure set aws_secret_access_key ${{ secrets.AWS_SECRET_ACCESS_KEY }} | |
aws configure set default.region ${{ secrets.AWS_REGION }} | |
- name: "check instance status" | |
run: | | |
wget https://raw.githubusercontent.com/grindsa/aws_ec2_mgr/main/aws_ec_mgr.py | |
chmod a+rx ./aws_ec_mgr.py | |
python3 ./aws_ec_mgr.py -a state -r ${{ secrets.AWS_REGION }} -i ${{ secrets.AWS_INSTANCE_ID }} | grep -i "stopped" | |
- name: "start instance" | |
run: | | |
python3 ./aws_ec_mgr.py -a start -r ${{ secrets.AWS_REGION }} -i ${{ secrets.AWS_INSTANCE_ID }} | |
- name: "[ WAIT ] Sleep for 10s" | |
uses: juliangruber/sleep-action@v2.0.3 | |
with: | |
time: 10s | |
- name: "check instance status" | |
run: | | |
python3 ./aws_ec_mgr.py -a state -r ${{ secrets.AWS_REGION }} -i ${{ secrets.AWS_INSTANCE_ID }} | grep -i "running" | |
build_test: | |
name: build_test | |
runs-on: ubuntu-latest | |
needs: instance_start | |
strategy: | |
fail-fast: false | |
matrix: | |
websrv: ['apache2', 'nginx'] | |
dbhandler: ['wsgi', 'django'] | |
steps: | |
- name: "checkout GIT" | |
uses: actions/checkout@v4 | |
- name: "Retrieve Version from version.py" | |
run: | | |
echo TAG_NAME=$(cat acme_srv/version.py | grep -i __version__ | head -n 1 | sed 's/__version__ = //g' | sed s/\'//g) >> $GITHUB_ENV | |
echo UUID=$(uuidgen) >> $GITHUB_ENV | |
- run: echo "Repo is at version ${{ steps.acme2certifier_ver.outputs.tag }}" | |
- run: echo "UUID ${{ env.UUID }}" | |
- name: "Prepare ssh environment in ramdisk" | |
run: | | |
sudo mkdir -p /tmp/rd | |
sudo mount -t tmpfs -o size=5M none /tmp/rd | |
sudo echo "$SSH_KEY" > /tmp/rd/ak.tmp | |
sudo chmod 600 /tmp/rd/ak.tmp | |
sudo echo "$KNOWN_HOSTS" > /tmp/rd/known_hosts | |
env: | |
SSH_KEY: ${{ secrets.AWS_SSH_KEY }} | |
KNOWN_HOSTS: ${{ secrets.AWS_SSH_KNOWN_HOSTS }} | |
- name: Set up QEMU | |
uses: docker/setup-qemu-action@v3 | |
with: | |
platforms: 'arm64' | |
- uses: docker/setup-buildx-action@v3 | |
with: | |
version: latest | |
buildkitd-flags: --debug | |
- name: Build | |
uses: docker/build-push-action@v5 | |
with: | |
load: true | |
tags: grindsa/acme2certifier:${{ matrix.websrv }}-${{ matrix.dbhandler }}-${{ env.UUID }} | |
file: examples/Docker/${{ matrix.websrv }}/${{ matrix.dbhandler }}/Dockerfile | |
platforms: linux/arm64 | |
- name: "Check if image is built" | |
run: | | |
docker image save -o /tmp/a2c-image-$WEB_SRV-$DB_HANDLER.tar grindsa/acme2certifier:$WEB_SRV-$DB_HANDLER-$UUID | |
ls -la /tmp/a2c-image-$WEB_SRV-$DB_HANDLER.tar | |
env: | |
WEB_SRV: ${{ matrix.websrv }} | |
DB_HANDLER: ${{ matrix.dbhandler }} | |
UUID: ${{ env.UUID }} | |
- name: "Compress image" | |
run: | | |
gzip /tmp/a2c-image-$WEB_SRV-$DB_HANDLER.tar | |
ls -la /tmp/a2c-image-$WEB_SRV-$DB_HANDLER.tar.gz | |
env: | |
WEB_SRV: ${{ matrix.websrv }} | |
DB_HANDLER: ${{ matrix.dbhandler }} | |
- name: "Create working directory on remote host" | |
run: sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts mkdir -p /tmp/a2c/$UUID | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
UUID: ${{ env.UUID }} | |
- name: "Copy image to remote host" | |
run: sudo scp -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts /tmp/a2c-image-$WEB_SRV-$DB_HANDLER.tar.gz $SSH_USER@$SSH_HOST:/tmp/a2c/$UUID/ | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
WEB_SRV: ${{ matrix.websrv }} | |
DB_HANDLER: ${{ matrix.dbhandler }} | |
UUID: ${{ env.UUID }} | |
- name: "Unpack image on remote host" | |
run: sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts gunzip /tmp/a2c/$UUID/a2c-image-$WEB_SRV-$DB_HANDLER.tar.gz | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
WEB_SRV: ${{ matrix.websrv }} | |
DB_HANDLER: ${{ matrix.dbhandler }} | |
UUID: ${{ env.UUID }} | |
- name: "Load image on remote host" | |
run: sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker load < /tmp/a2c/$UUID/a2c-image-$WEB_SRV-$DB_HANDLER.tar" | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
WEB_SRV: ${{ matrix.websrv }} | |
DB_HANDLER: ${{ matrix.dbhandler }} | |
UUID: ${{ env.UUID }} | |
- name: "Prepare and data package" | |
run: | | |
sudo mkdir -p /tmp/data/acme_ca/certs | |
sudo cp test/ca/sub-ca-key.pem test/ca/sub-ca-crl.pem test/ca/sub-ca-cert.pem test/ca/root-ca-cert.pem /tmp/data/acme_ca/ | |
sudo cp .github/openssl_ca_handler.py_acme_srv_choosen_handler.cfg /tmp/data/acme_srv.cfg | |
sudo cp .github/acme2certifier.pem /tmp/data/acme2certifier.pem | |
sudo cp .github/django_settings.py /tmp/data/settings.py | |
sudo cp .github/acme2certifier_cert.pem /tmp/data/acme2certifier_cert.pem | |
sudo cp .github/acme2certifier_key.pem /tmp/data/acme2certifier_key.pem | |
- name: "Copy data package to remote host" | |
run: sudo scp -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts -r /tmp/data $SSH_USER@$SSH_HOST:/tmp/a2c/$UUID/ | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
WEB_SRV: ${{ matrix.websrv }} | |
DB_HANDLER: ${{ matrix.dbhandler }} | |
UUID: ${{ env.UUID }} | |
- run: echo "Image name - grindsa/acme2certifier:$WEB_SRV-$DB_HANDLER-$UUID" | |
env: | |
WEB_SRV: ${{ matrix.websrv }} | |
DB_HANDLER: ${{ matrix.dbhandler }} | |
UUID: ${{ env.UUID }} | |
- name: "Start container on remote host" | |
run: | | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker network create $UUID" | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker run -d --rm -id --platform linux/arm64 --network $UUID --name=acme-srv-$UUID -v /tmp/a2c/$UUID/data:/var/www/acme2certifier/volume/ grindsa/acme2certifier:$WEB_SRV-$DB_HANDLER-$UUID" | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
WEB_SRV: ${{ matrix.websrv }} | |
DB_HANDLER: ${{ matrix.dbhandler }} | |
UUID: ${{ env.UUID }} | |
- name: "Sleep for 5s" | |
uses: juliangruber/sleep-action@v2.0.3 | |
with: | |
time: 5s | |
- name: "Test http://acme-srv/directory internally" | |
run: sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker run -i --rm --network $UUID curlimages/curl -f http://acme-srv-$UUID/directory" | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
UUID: ${{ env.UUID }} | |
- name: "Test if https://acme-srv/directory internally" | |
run: sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker run -i --rm --network $UUID curlimages/curl --insecure -f https://acme-srv-$UUID/directory" | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
UUID: ${{ env.UUID }} | |
- name: "acme.sh enroll" | |
run: | | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "mkdir -p /tmp/a2c/$UUID/acme-sh" | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker run --rm -id -v /tmp/a2c/$UUID/acme-sh:/acme.sh --network $UUID --name=acme-sh-$UUID neilpang/acme.sh:latest daemon" | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker exec -i acme-sh-$UUID acme.sh --server http://acme-srv-$UUID --accountemail 'acme-sh@example.com' --issue -d acme-sh-$UUID --standalone --debug 3 --output-insecure --force" | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
UUID: ${{ env.UUID }} | |
- name: "acme.sh revoke" | |
run: | | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker exec -i acme-sh-$UUID acme.sh --server http://acme-srv-$UUID --revoke -d acme-sh-$UUID --standalone --debug 3 --output-insecure" | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
UUID: ${{ env.UUID }} | |
- name: "Certbot enroll" | |
run: | | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "mkdir -p /tmp/a2c/$UUID/certbot" | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker run -i --rm --name certbot-$UUID --network $UUID -v /tmp/a2c/$UUID/certbot:/etc/letsencrypt/ certbot/certbot register --agree-tos -m 'certbot@example.com' --server http://acme-srv-$UUID --no-eff-email" | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker run -i --rm --name certbot-$UUID --network $UUID -v /tmp/a2c/$UUID/certbot:/etc/letsencrypt/ certbot/certbot certonly --server http://acme-srv-$UUID --standalone --preferred-challenges http -d certbot-$UUID --cert-name certbot-$UUID" | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
UUID: ${{ env.UUID }} | |
- name: "Certbot revoke" | |
run: | | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker run -i --rm --name certbot-$UUID --network $UUID -v /tmp/a2c/$UUID/certbot:/etc/letsencrypt/ certbot/certbot revoke --delete-after-revoke --server http://acme-srv-$UUID -d certbot-$UUID --cert-name certbot-$UUID" | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
UUID: ${{ env.UUID }} | |
- name: "Lego enroll" | |
run: | | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "mkdir -p /tmp/a2c/$UUID/lego" | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker run -i -v /tmp/a2c/$UUID/lego:/.lego/ --rm --name lego-$UUID --network $UUID goacme/lego -s http://acme-srv-$UUID/directory -a --email lego@example.com -d lego-$UUID --http run" | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
UUID: ${{ env.UUID }} | |
- name: "Lego revoke" | |
run: | | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker run -i -v /tmp/a2c/$UUID/lego:/.lego/ --rm --name lego-$UUID --network $UUID goacme/lego -s http://acme-srv-$UUID -a --email "lego@example.com" -d lego-$UUID revoke" | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
UUID: ${{ env.UUID }} | |
- name: "Cleanup on remote host" | |
run: | | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker stop acme-sh-$UUID" | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker stop acme-srv-$UUID" | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker network rm $UUID" | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "docker image rm grindsa/acme2certifier:$WEB_SRV-$DB_HANDLER-$UUID" | |
sudo ssh $SSH_USER@$SSH_HOST -i /tmp/rd/ak.tmp -o UserKnownHostsFile=/tmp/rd/known_hosts "sudo rm -rf /tmp/a2c/$UUID" | |
env: | |
SSH_USER: ${{ secrets.AWS_SSH_USER }} | |
SSH_HOST: ${{ secrets.AWS_SSH_HOST }} | |
WEB_SRV: ${{ matrix.websrv }} | |
DB_HANDLER: ${{ matrix.dbhandler }} | |
UUID: ${{ env.UUID }} | |
instance_stop: | |
name: instance_stop | |
runs-on: ubuntu-latest | |
needs: build_test | |
steps: | |
- name: "install awccli" | |
run: | | |
sudo apt-get update | |
pip3 install awscli --upgrade --user | |
pip3 install boto3 --upgrade --user | |
export PATH=$PATH:$HOME/.local/bin | |
- name: "configure awccli" | |
run: | | |
aws --version | |
aws configure set aws_access_key_id ${{ secrets.AWS_ACCESS_KEY_ID }} | |
aws configure set aws_secret_access_key ${{ secrets.AWS_SECRET_ACCESS_KEY }} | |
aws configure set default.region ${{ secrets.AWS_REGION }} | |
- name: "stop instance" | |
run: | | |
wget https://raw.githubusercontent.com/grindsa/aws_ec2_mgr/main/aws_ec_mgr.py | |
chmod a+rx ./aws_ec_mgr.py | |
python3 ./aws_ec_mgr.py -a stop -r ${{ secrets.AWS_REGION }} -i ${{ secrets.AWS_INSTANCE_ID }} | |
python3 ./aws_ec_mgr.py -a state -r ${{ secrets.AWS_REGION }} -i ${{ secrets.AWS_INSTANCE_ID }} |