-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy path.gitlab-ci.yml
executable file
·1067 lines (1027 loc) · 35.9 KB
/
.gitlab-ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
variables:
### AWS
AWS_REGION: us-east-1
### Application
LOCAL_API_URL: http://localhost:5000/posts
REVIEW_API_URL: https://review.devops-product-hunting.com:5000/posts
PROD_API_URL: https://devops-product-hunting.com:5000/posts
### Environments
REVIEW_URL: https://review.devops-product-hunting.com
PROD_URL: https://devops-product-hunting.com
### FinOps
TF_ROOT: infrastructure/prod
### Project CI/CD variables
# AWS_ACCESS_KEY_ID - Protected
# AWS_ACCOUNT_ID - Protected
# AWS_SECRET_ACCESS_KEY - Protected/Masked
# GITLAB_TOKEN - Masked
# INFRACOST_API_KEY - Masked
# PUBLIC_IP - Protected/Masked
# POSTGRES_DB - Protected
# POSTGRES_PASSWORD - Protected/Masked
# POSTGRES_USER - Protected
# PRODUCT_HUNT_API_ACCESS_TOKEN - Protected/Masked
# SQLIZER_API_KEY - Protected/Masked
stages:
- prerequisites
- finops
- infrastructure
- build
- test
- release
- deploy
- prod prerequisites
- prod infrastructure
- prod build
- prod release
- prod deploy
- prod operate
- prod monitor
- prod finops
- prod destroy
### ###
### Dev - Merge request
### ###
infracost on prod infra:
stage: finops
image:
name: infracost/infracost:ci-0.10
entrypoint: [""]
only:
- merge_requests
script:
# Clone the base branch of the pull request into a temp directory.
- git clone $CI_REPOSITORY_URL --branch=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME --single-branch /tmp/base
### Generate an Infracost cost snapshot from the comparison branch, so that Infracost can compare the cost difference.
- |
infracost breakdown \
--path=/tmp/base/${TF_ROOT} \
--format=json \
--out-file=infracost-base.json
### Generate an Infracost diff and save it to a JSON file.
- |
infracost diff \
--path=${TF_ROOT} \
--compare-to=infracost-base.json \
--format=json \
--out-file=infracost.json
### Post a comment to the PR using the 'update' behavior.
- |
infracost comment gitlab \
--path=infracost.json \
--repo=$CI_PROJECT_PATH \
--merge-request=$CI_MERGE_REQUEST_IID \
--gitlab-server-url=$CI_SERVER_URL \
--gitlab-token=$GITLAB_TOKEN \
--behavior=update
build client-server app:
stage: build
image: alpine:3.16
only:
- merge_requests
cache:
-
key: DEV-NODE-MODULES-$CI_PIPELINE_ID
paths:
- $CI_PROJECT_DIR/application/backend/api/node_modules/
- $CI_PROJECT_DIR/application/frontend/client/node_modules/
before_script:
- apk add --no-cache nodejs npm
script:
### ### Server-side
- cd $CI_PROJECT_DIR/application/backend/api
### Base dependencies
- npm install express pg cors
### OpenTelemetry dependencies
- npm install --save @opentelemetry/api @opentelemetry/sdk-node @opentelemetry/auto-instrumentations-node @opentelemetry/exporter-jaeger
### ### Client-side
- cd $CI_PROJECT_DIR/application/frontend/client
- npm install --save react-icons bootswatch
smoke test:
stage: test
image: alpine:3.16
services:
- postgres:alpine3.16
variables:
POSTGRES_USER: test_user
POSTGRES_PASSWORD: test_password
POSTGRES_DB: test_db
only:
- merge_requests
cache:
-
key: DEV-NODE-MODULES-$CI_PIPELINE_ID
paths:
- $CI_PROJECT_DIR/application/backend/api/node_modules/
- $CI_PROJECT_DIR/application/frontend/client/node_modules/
before_script:
- apk add --no-cache postgresql-client nodejs npm
### Packages for Selenium
- apk add --no-cache python3 py3-pip chromium-chromedriver gcc python3-dev libffi-dev musl-dev
- pip3 install selenium
script:
### Push .sql file to Postgres DB
- cd $CI_PROJECT_DIR/merge-request
- export PGPASSWORD="$POSTGRES_PASSWORD"
- psql -h postgres -U $POSTGRES_USER -d $POSTGRES_DB -f sample_posts.sql
### Launch server API
- cd $CI_PROJECT_DIR/application/backend/api
- export POSTGRES_HOST="postgres"
- export POSTGRES_PORT=5432
- node --require './tracing.js' index.js &
### Launch client Web application
- cd $CI_PROJECT_DIR/application/frontend/client
- export REACT_APP_API_URL="$LOCAL_API_URL"
- npm start &
### Wait for application to start
- sleep 15
### Smoke test with Selenium
- cd $CI_PROJECT_DIR/test/smoke-test
- python3 extractWebPage.py
- cat page.html | grep "Rank 1"
artifacts:
paths:
- test/smoke-test/page.html
include:
- template: Code-Quality.gitlab-ci.yml
code_quality:
### Execute this job only during merge requests
rules:
- if: $CODE_QUALITY_DISABLED
when: never
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
variables:
REPORT_FORMAT: html
artifacts:
paths: [gl-code-quality-report.html]
### ###
### Review
### ###
verify state locking:
stage: prerequisites
image:
name: hashicorp/terraform:1.2.2
entrypoint: [""]
only:
- main
before_script:
- apk add --no-cache python3 py3-pip
- pip3 install awscli
- aws configure set region $AWS_REGION
script:
### Verify if state-lock table exists (i.e. if state lock applied)
- aws dynamodb describe-table --table-name product-hunting-terraform-state-lock
### Verify is S3 state storage exists
- aws s3 ls product-hunting-terraform-state
apply state locking:
stage: prerequisites
needs: ["verify state locking"]
when: on_failure
image:
name: hashicorp/terraform:1.2.2
entrypoint: [""]
only:
- main
before_script:
- apk add --no-cache python3 py3-pip
- pip3 install awscli
- aws configure set region $AWS_REGION
script:
- cd ./state-lock
- terraform init
- terraform apply -auto-approve
create main infra:
stage: infrastructure
image:
name: hashicorp/terraform:1.2.2
entrypoint: [""]
only:
- main
before_script:
- apk add --no-cache python3 py3-pip
- pip3 install awscli
- aws configure set region $AWS_REGION
script:
- cd ./infrastructure/main
- terraform init
- terraform apply -auto-approve
store top posts in sql file:
stage: build
image: alpine:3.16
only:
- main
cache:
-
key: REVIEW-POSTGRES
paths:
- $CI_PROJECT_DIR/application/backend/worker/posts.sql
-
key: API-LIMITATION
paths:
- $CI_PROJECT_DIR/application/backend/worker/api_last_use.txt
before_script:
### ### Product Hunt API limits large requests to every 15 minutes
### Stop if API limitation still running
- |
if [ -f $CI_PROJECT_DIR/application/backend/worker/api_last_use.txt ]; then
API_LIMIT_EXPIRE=$(($(cat $CI_PROJECT_DIR/application/backend/worker/api_last_use.txt) + 900))
NOW=$(date "+%s")
if [ $NOW -lt $API_LIMIT_EXPIRE ]; then
exit 0
fi
fi
### Install dependencies
- apk add --no-cache python3 py3-pip
- pip3 install sqlizer-io-client
script:
- cd ./application/backend/worker
### GET top 500 most voted posts from Product Hunt API (output .json file)
- export API_ACCESS_TOKEN=$PRODUCT_HUNT_API_ACCESS_TOKEN
- python3 getTopPosts.py
### Define API last use by adding actual timestamp (needed for API limitation)
- date '+%s' > api_last_use.txt
### Delete emojis inside .json file
- sed -i -e 's/\(\\u\).\{4\}//g' posts.json
### Convert JSON to SQL
- export API_KEY=$SQLIZER_API_KEY
- python3 convertJsonToSql.py
### Reformat fields name & add id field
- sed -i 's/list_node_//g' posts.sql
- sed -i '2s/^/ "id" SERIAL PRIMARY KEY,\n/' posts.sql
- sed -i "s/('/(DEFAULT,'/g" posts.sql
build project:
stage: build
image: alpine:3.16
only:
- main
cache:
-
key: REVIEW-NODE-MODULES-$CI_PIPELINE_ID
paths:
- $CI_PROJECT_DIR/application/backend/api/node_modules/
-
key: REVIEW-BUILD-$CI_PIPELINE_ID
paths:
- $CI_PROJECT_DIR/application/frontend/client/build
before_script:
- apk add --no-cache npm
script:
### ### Server-side
- cd $CI_PROJECT_DIR/application/backend/api
### Base dependencies
- npm install express pg cors
### OpenTelemetry dependencies
- npm install --save @opentelemetry/api @opentelemetry/sdk-node @opentelemetry/auto-instrumentations-node @opentelemetry/exporter-jaeger
### ### Client-side
- cd $CI_PROJECT_DIR/application/frontend/client
- npm install --save react-icons bootswatch
- export REACT_APP_API_URL="$REVIEW_API_URL"
- npm run build
create review docker images:
stage: release
image: docker:20.10
services:
- docker:20.10-dind
only:
- main
cache:
-
key: REVIEW-POSTGRES
paths:
- $CI_PROJECT_DIR/application/backend/worker/posts.sql
-
key: REVIEW-NODE-MODULES-$CI_PIPELINE_ID
paths:
- $CI_PROJECT_DIR/application/backend/api/node_modules/
-
key: REVIEW-BUILD-$CI_PIPELINE_ID
paths:
- $CI_PROJECT_DIR/application/frontend/client/build/
before_script:
- apk add --no-cache python3 py3-pip
- pip3 install awscli
- aws configure set region $AWS_REGION
script:
- cd ./release
### Build images
- |
docker-compose build \
--build-arg POSTGRES_USER=$POSTGRES_USER \
--build-arg POSTGRES_PASSWORD=$POSTGRES_PASSWORD \
--build-arg POSTGRES_DB=$POSTGRES_DB \
--no-cache
### Connect to AWS ECR registry
- aws ecr get-login-password --region $AWS_REGION | docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
### Create image tags
- docker tag product-hunting-postgres:latest $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/product-hunting-postgres:review-$CI_COMMIT_SHORT_SHA
- docker tag product-hunting-api:latest $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/product-hunting-api:review-$CI_COMMIT_SHORT_SHA
- docker tag product-hunting-client:latest $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/product-hunting-client:review-$CI_COMMIT_SHORT_SHA
### Push images to ECR
- docker push $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/product-hunting-postgres:review-$CI_COMMIT_SHORT_SHA
- docker push $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/product-hunting-api:review-$CI_COMMIT_SHORT_SHA
- docker push $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/product-hunting-client:review-$CI_COMMIT_SHORT_SHA
deploy review on ecs:
stage: deploy
image:
name: hashicorp/terraform:1.2.2
entrypoint: [""]
only:
- main
environment:
name: review
url: $REVIEW_URL
on_stop: destroy review
before_script:
- apk add --no-cache python3 py3-pip
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
script:
### Configure Terraform
- cd ./deploy/ecs
- |
cat <<EOF | tee ./main.tfvars
ecr_registry = "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com"
aws_account_id = "$AWS_ACCOUNT_ID"
public_ip = "$PUBLIC_IP"
ci_commit_short_sha = "$CI_COMMIT_SHORT_SHA"
EOF
- terraform init
- terraform apply -var-file=main.tfvars -auto-approve
destroy review:
stage: deploy
needs: ["deploy review on ecs"]
when: manual
image:
name: hashicorp/terraform:1.2.2
entrypoint: [""]
only:
- main
environment:
name: review
action: stop
before_script:
- apk add --no-cache python3 py3-pip
- pip3 install awscli
- aws configure set region $AWS_REGION
script:
- cd ./deploy/ecs
- |
cat <<EOF | tee ./main.tfvars
ecr_registry = "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com"
aws_account_id = "$AWS_ACCOUNT_ID"
public_ip = "$PUBLIC_IP"
ci_commit_short_sha = "$CI_COMMIT_SHORT_SHA"
EOF
- terraform init
- terraform destroy -var-file=main.tfvars -auto-approve
### ###
### Production
### ###
push to prod:
stage: prod prerequisites
needs: ["deploy review on ecs"]
when: manual
variables:
GIT_STRATEGY: none
only:
- main
script:
- ""
create prod infra:
stage: prod infrastructure
needs: ["push to prod"]
when: on_success
image:
name: hashicorp/terraform:1.2.2
entrypoint: [""]
only:
- main
before_script:
- apk add --no-cache python3 py3-pip
- pip3 install awscli
- aws configure set region $AWS_REGION
script:
- cd ./infrastructure/prod
- terraform init
- terraform apply -auto-approve
rebuild project for prod:
stage: prod build
needs: ["push to prod"]
when: on_success
image: alpine:3.16
only:
- main
cache:
-
key: PROD-BUILD-$CI_PIPELINE_ID
paths:
- $CI_PROJECT_DIR/application/frontend/client/build
before_script:
- apk add --no-cache npm
script:
- cd $CI_PROJECT_DIR/application/frontend/client
- npm install --save react-icons bootswatch
- export REACT_APP_API_URL="$PROD_API_URL"
- npm run build
create prod docker images:
stage: prod release
needs: ["rebuild project for prod"]
when: on_success
image: docker:20.10
services:
- docker:20.10-dind
only:
- main
cache:
-
key: PROD-BUILD-$CI_PIPELINE_ID
paths:
- $CI_PROJECT_DIR/application/frontend/client/build/
before_script:
- apk add --no-cache python3 py3-pip jq
- pip3 install awscli
- aws configure set region $AWS_REGION
script:
### Add tag to postgres image
- MANIFEST=$(aws ecr batch-get-image --repository-name product-hunting-postgres --image-ids imageTag=review-$CI_COMMIT_SHORT_SHA --output json | jq --raw-output --join-output '.images[0].imageManifest')
- aws ecr put-image --repository-name product-hunting-postgres --image-tag prod-$CI_COMMIT_SHORT_SHA --image-manifest "$MANIFEST"
### Add tag to api image
- MANIFEST=$(aws ecr batch-get-image --repository-name product-hunting-api --image-ids imageTag=review-$CI_COMMIT_SHORT_SHA --output json | jq --raw-output --join-output '.images[0].imageManifest')
- aws ecr put-image --repository-name product-hunting-api --image-tag prod-$CI_COMMIT_SHORT_SHA --image-manifest "$MANIFEST"
### Recreate client image
- cd ./release
- docker-compose build --no-cache client
### Connect to AWS ECR registry
- aws ecr get-login-password --region $AWS_REGION | docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com
### Create image tag
- docker tag product-hunting-client:latest $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/product-hunting-client:prod-$CI_COMMIT_SHORT_SHA
### Push image to ECR
- docker push $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/product-hunting-client:prod-$CI_COMMIT_SHORT_SHA
deploy prod on eks:
stage: prod deploy
needs: ["create prod infra", "create prod docker images"]
when: on_success
image:
name: hashicorp/terraform:1.2.2
entrypoint: [""]
only:
- main
environment:
name: production
url: $PROD_URL
on_stop: destroy prod deploy
before_script:
- apk add --no-cache curl python3 py3-pip openssl
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### Configure Terraform
- cd ./deploy/eks
- |
cat <<EOF | tee ./main.tfvars
ecr_registry = "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com"
ci_commit_short_sha = "$CI_COMMIT_SHORT_SHA"
EOF
- terraform init
- terraform refresh -var-file=main.tfvars
- terraform apply -var-file=main.tfvars -auto-approve
### Install Kubernetes Metrics Server
- kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
apply vertical pod autoscaler:
stage: prod operate
needs: ["deploy prod on eks"]
when: on_success
image:
name: golang:1.18-alpine3.16
entrypoint: [""]
only:
- main
before_script:
- apk add --no-cache curl python3 py3-pip git bash openssl
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### Install Vertical Pod Autoscaler (VPA)
- cd ./operate/vpa
- git clone https://github.com/kubernetes/autoscaler.git
- bash ./autoscaler/vertical-pod-autoscaler/hack/vpa-up.sh
### Verify VPA installation
- kubectl get pods -n kube-system | grep vpa
### Create VPA object
- kubectl apply -f product-hunting-vpa.yaml
apply horizontal pod autoscaler:
stage: prod operate
needs: ["deploy prod on eks"]
when: on_success
image:
name: hashicorp/terraform:1.2.2
entrypoint: [""]
only:
- main
before_script:
- apk add --no-cache curl python3 py3-pip
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### Configure Terraform
- cd ./operate/hpa
- terraform init
- terraform apply -auto-approve
rollback to previous revision:
stage: prod operate
needs: ["deploy prod on eks"]
when: manual
image: alpine:3.16
only:
- main
before_script:
- apk add --no-cache curl python3 py3-pip
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### Perform a rollback to the previous revision
- kubectl rollout undo deployment product-hunting
monitoring prerequisites:
stage: prod monitor
needs: ["deploy prod on eks"]
when: on_success
image: alpine:3.16
only:
- main
cache:
-
key: PROD-PROMETHEUS-$CI_PIPELINE_ID
paths:
- $CI_PROJECT_DIR/monitor/kube-prometheus/default/manifests/
before_script:
- apk add --no-cache curl python3 py3-pip git
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### ### Needed by 'monitor kube with kube-prometheus' job
### Install kube-prometheus release-0.10 compatible with Kubernetes 1.22
- cd $CI_PROJECT_DIR/monitor/kube-prometheus/default
- git clone --depth 1 https://github.com/prometheus-operator/kube-prometheus.git -b release-0.10 /tmp/prometheus
- cp -R /tmp/prometheus/manifests .
- kubectl apply --server-side -f manifests/setup
### Add persistent storage
- cd $CI_PROJECT_DIR/monitor/
- kubectl apply -f persistent-storage/
### ### Needed by 'app tracing with jaeger' job
### Install cert-manager
- kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.6.3/cert-manager.yaml
### Install Elasticsearch Operator
- kubectl apply -f https://download.elastic.co/downloads/eck/2.3.0/crds.yaml
- kubectl apply -f https://download.elastic.co/downloads/eck/2.3.0/operator.yaml
monitor vertical pod autoscaler using goldilocks:
stage: prod monitor
needs: ["apply vertical pod autoscaler"]
when: on_success
image:
name: golang:1.18-alpine3.16
entrypoint: [""]
only:
- main
before_script:
- apk add --no-cache curl python3 py3-pip git bash
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### Install Goldilocks
- cd ./monitor/vpa
- git clone https://github.com/FairwindsOps/goldilocks.git
- cd goldilocks
- kubectl create namespace goldilocks --dry-run=client -o yaml | kubectl apply -f -
- kubectl -n goldilocks apply -f hack/manifests/controller
- kubectl -n goldilocks apply -f hack/manifests/dashboard
### Enable 'default' namespace to Goldilocks Dashboard
- kubectl label --overwrite ns default goldilocks.fairwinds.com/enabled=true
### ### Goldilocks is now accessible using port-forwarding
### kubectl -n goldilocks port-forward svc/goldilocks-dashboard 8080:80
monitor kube with kube-prometheus:
stage: prod monitor
needs: ["monitoring prerequisites"]
when: delayed
start_in: 5 minutes
image: alpine:3.16
only:
- main
cache:
-
key: PROD-PROMETHEUS-$CI_PIPELINE_ID
paths:
- $CI_PROJECT_DIR/monitor/kube-prometheus/default/manifests/
before_script:
- apk add --no-cache curl python3 py3-pip
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### ### kube-prometheus customizations
- cd ./monitor/kube-prometheus/custom
### Edit Prometheus config to make it persistent using Storage Class
- cp -f ./prometheus/* ../default/manifests
### Edit Grafana config to add Loki as datasource and make Grafana persistent with PVC
- cp -f ./grafana/* ../default/manifests/
### Deploy kube-prometheus
- cd ../default
- kubectl apply -f manifests/
### ### Prometheus, Grafana and Alert Manager are now accessible using port-forwarding
### kubectl -n monitoring port-forward svc/prometheus-operated 9090
### kubectl -n monitoring port-forward svc/grafana 3000
### kubectl -n monitoring port-forward svc/alertmanager-main 9093
app tracing with jaeger:
stage: prod monitor
needs: ["monitoring prerequisites"]
when: delayed
start_in: 5 minutes
image: alpine:3.16
only:
- main
before_script:
- apk add --no-cache curl python3 py3-pip
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### Install Jaeger Operator
- cd ./monitor/jaeger
- kubectl apply -f jaeger-operator-role-binding.yml
- kubectl create namespace observability --dry-run=client -o yaml | kubectl apply -f -
- kubectl apply -f https://github.com/jaegertracing/jaeger-operator/releases/download/v1.35.0/jaeger-operator.yaml -n observability
### Create Elasticsearch production cluster
- kubectl apply -f elasticsearch-prod.yml
### Create secret for Jaeger, based on Elasticsearch credentials
- PASSWORD=$(kubectl get secret jaeger-es-elastic-user -o go-template='{{.data.elastic | base64decode}}')
- kubectl create secret generic jaeger-es-secret --from-literal=ES_PASSWORD=$PASSWORD --from-literal=ES_USERNAME=elastic --dry-run=client -o yaml | kubectl apply -f -
### Wait for installation to finish
- sleep 30
### Deploy Jaeger for Production
- kubectl apply -f jaeger-prod.yml
### ### Jaeger is now accessible using port-forwarding
### kubectl port-forward svc/simple-prod-query 16686
log management with loki:
stage: prod monitor
needs: ["monitor kube with kube-prometheus"]
when: on_success
image: alpine:3.16
only:
- main
before_script:
- apk add --no-cache curl python3 py3-pip
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
### Install helm
- apk add --no-cache tar
- wget https://get.helm.sh/helm-v3.9.1-linux-amd64.tar.gz
- tar -zxvf helm-v3.9.1-linux-amd64.tar.gz
- chmod +x linux-amd64/helm
- mv linux-amd64/helm /usr/local/bin/helm
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### Install Loki
- helm repo add grafana https://grafana.github.io/helm-charts
- helm repo update
- helm upgrade --install loki grafana/loki-stack --namespace monitoring --set loki.persistence.enabled=true,loki.persistence.storageClassName=ssd,loki.persistence.size=10Gi
### ### Loki is now linked to Grafana
integrate kubecost:
stage: prod finops
needs: ["log management with loki"]
when: on_success
image: alpine:3.16
only:
- main
before_script:
- apk add --no-cache curl python3 py3-pip
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
### Install helm
- apk add --no-cache tar
- wget https://get.helm.sh/helm-v3.9.1-linux-amd64.tar.gz
- tar -zxvf helm-v3.9.1-linux-amd64.tar.gz
- chmod +x linux-amd64/helm
- mv linux-amd64/helm /usr/local/bin/helm
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### Install Kubecost
- |
helm upgrade -i kubecost \
oci://public.ecr.aws/kubecost/cost-analyzer --version 1.96.0 \
--namespace kubecost --create-namespace \
-f https://raw.githubusercontent.com/kubecost/cost-analyzer-helm-chart/develop/cost-analyzer/values-eks-cost-monitoring.yaml
### ### Kubecost is now accessible using port-forwarding
### kubectl port-forward --namespace kubecost deployment/kubecost-cost-analyzer 9090
### ### WARNING
### The next jobs are part of the 'prod destroy' stage.
### In a real use case, it is strongly discouraged to create stages/jobs destroying resources in production.
### Exceptionally for this practical project made for learning, this stage brings a comfort of use, especially for debugging.
### ###
destroy kubecost:
stage: prod destroy
needs: ["integrate kubecost"]
when: manual
image: alpine:3.16
only:
- main
before_script:
- apk add --no-cache curl python3 py3-pip
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
### Install helm
- apk add --no-cache tar
- wget https://get.helm.sh/helm-v3.9.1-linux-amd64.tar.gz
- tar -zxvf helm-v3.9.1-linux-amd64.tar.gz
- chmod +x linux-amd64/helm
- mv linux-amd64/helm /usr/local/bin/helm
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### Destroy Kubecost
- helm uninstall kubecost --namespace kubecost
destroy goldilocks:
stage: prod destroy
needs: ["monitor vertical pod autoscaler using goldilocks"]
when: manual
image:
name: golang:1.18-alpine3.16
entrypoint: [""]
only:
- main
before_script:
- apk add --no-cache curl python3 py3-pip git bash
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### Destroy Goldilocks
- cd ./monitor/vpa
- git clone https://github.com/FairwindsOps/goldilocks.git
- cd goldilocks
- kubectl -n goldilocks delete -f hack/manifests/dashboard
- kubectl -n goldilocks delete -f hack/manifests/controller
- kubectl delete namespace goldilocks
destroy kube-prometheus:
stage: prod destroy
needs: ["monitor kube with kube-prometheus"]
when: manual
image: alpine:3.16
only:
- main
cache:
-
key: PROD-PROMETHEUS-$CI_PIPELINE_ID
paths:
- $CI_PROJECT_DIR/monitor/kube-prometheus/default/manifests/
before_script:
- apk add --no-cache curl python3 py3-pip
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### Destroy kube-prometheus
- cd ./monitor/kube-prometheus/default
- kubectl delete --ignore-not-found=true -f manifests/
destroy jaeger:
stage: prod destroy
needs: ["app tracing with jaeger"]
when: manual
image: alpine:3.16
only:
- main
before_script:
- apk add --no-cache curl python3 py3-pip
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### Destroy Jaeger
- cd ./monitor/jaeger
- kubectl delete -f jaeger-prod.yml
- kubectl delete secret jaeger-es-secret
- kubectl delete -f elasticsearch-prod.yml
- kubectl delete -f https://github.com/jaegertracing/jaeger-operator/releases/download/v1.35.0/jaeger-operator.yaml -n observability
- kubectl delete namespace observability
- kubectl delete -f jaeger-operator-role-binding.yml
destroy loki:
stage: prod destroy
needs: ["log management with loki"]
when: manual
image: alpine:3.16
only:
- main
before_script:
- apk add --no-cache curl python3 py3-pip
### Install awscli
- pip3 install awscli
- aws configure set region $AWS_REGION
### Install Docker
- apk add --no-cache docker openrc
### Install kubectl
- curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
- chmod +x ./kubectl
- mv ./kubectl /usr/local/bin/kubectl
### Install helm
- apk add --no-cache tar
- wget https://get.helm.sh/helm-v3.9.1-linux-amd64.tar.gz
- tar -zxvf helm-v3.9.1-linux-amd64.tar.gz
- chmod +x linux-amd64/helm
- mv linux-amd64/helm /usr/local/bin/helm
script:
### Install EKS kubeconfig file locally
- aws eks update-kubeconfig --name product-hunting-eks-cluster
### Destroy Loki
- helm uninstall loki --namespace monitoring
destroy prod deploy:
stage: prod destroy
needs: ["deploy prod on eks"]
when: manual
image:
name: hashicorp/terraform:1.2.2
entrypoint: [""]
only:
- main