# Navigate to locust python directory
cd locust/locust-tasks
# Create virtual environment
virtualenv -p python3 venv
# Activate virtualenv
source venv/bin/activate
# Install dependencies
pip install -r requirements.txt
# Run locust
locust -f locustfile.py
Access via browser on http://localhost:8089 and start swarming!
This will walk through setting up distributed load testing using Locust running on Google Container Engine (GKE).
We will deploy target app and Locust in separate clusters.
# create cluster
gcloud container clusters --zone your-gcp-zone create example-cluster
# resize cluster size (number of nodes)
gcloud container clusters resize example-cluster --node-pool default-pool --num-nodes 3
# configure kubectl command to connect to the cluster
gcloud container clusters get-credentials example-cluster --zone your-zone --project your-gcp-project
# build docker containers
docker build -t asatrya/goexample goexample/
# push to docker registry
docker push asatrya/goexample
# apply goexample deployment and service
kubectl apply -f kubernetes/goexample-deployment.yaml
kubectl apply -f kubernetes/goexample-service.yaml
Check the endpoint using
kubectl get svc
and access using browser at
- http://goexample-service-ip:8081/home
- http://goexample-service-ip:8081/profile
- http://goexample-service-ip:8081/login
- http://goexample-service-ip:8081/article/1
- http://goexample-service-ip:8081/article/2
- http://goexample-service-ip:8081/article/3
- http://goexample-service-ip:8081/article/4
- http://goexample-service-ip:8081/article/5
# create cluster
gcloud container clusters --zone your-gcp-zone create locust-cluster
# resize cluster size (number of nodes)
gcloud container clusters resize locust-cluster --node-pool default-pool --num-nodes 3
# configure kubectl command to connect to the cluster
gcloud container clusters get-credentials locust-cluster --zone your-zone --project your-gcp-project
# build docker containers
docker build -t asatrya/locust locust/
# push to docker registry
docker push asatrya/locust
# if you run in minikube, apply metallb configurations
kubectl apply -f kubernetes/metallb-config.yaml
# clone source code and navigate into the folder
git clone https://github.com/asatrya/locust_k8s
cd locust_k8s
# create configMap form locustfile.py
kubectl create configmap locust-task-conf --from-file=locust/locust-tasks/locustfile.py -o=yaml --dry-run | kubectl apply -f -
# or replace configMap if you want updated locustfile.py
kubectl create configmap locust-task-conf --from-file=locust/locust-tasks/locustfile.py -o=yaml --dry-run | kubectl replace -f -
# configure TARGET_HOST and apply configMap (get external IP from goexample service)
nano kubernetes/locust-config.yaml
kubectl apply -f kubernetes/locust-config.yaml
# apply locust master deployment and service
kubectl apply -f kubernetes/locust-master-deployment.yaml
kubectl apply -f kubernetes/locust-master-service.yaml
# apply locust slave deployment
kubectl apply -f kubernetes/locust-slave-deployment.yaml
Check the endpoint using
kubectl get svc
and access Locust UI using browser at http://locust-service-ip:8089
Adjust number of locust-slave
to accomodate desired concurrent users (ie: number of slave:users = 1:500)
kubectl scale --replicas=8 deployment/locust-slave
Watch resource usage in locust-cluster
by nodes/pods using this command:
# per nodes
watch -n 1 'kubectl top nodes'
# per pods
watch -n 1 'kubectl top pods'
Adjust number of instance of tested application to get desired response time
kubectl scale --replicas=4 deployment/goexample
Watch resource usage in example-cluster
to know if you should scale up/down instances.
Spot on the bottlenecks by watching metrics on Locust Dashboard.
# delete clusters
gcloud container clusters delete example-cluster
gcloud container clusters delete locust-cluster