-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathscript.sh
executable file
·259 lines (211 loc) · 10.7 KB
/
script.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
#! /bin/bash
set -e
set -o pipefail
trap ctrl_c INT
# If an interrupt signal is received, perform the appropriate cleanup
function ctrl_c() {
if grep -q "scale_dummy_deployment" <<< "$EXPERIMENT_CMD"; then
scale_dummy_deployment_cleanup
fi
if grep -q "scale_cluster" <<< "$EXPERIMENT_CMD"; then
scale_cluster_cleanup
fi
exit 0
}
export RESULTS_DIR=${RESULTS_DIR:-'./_output/results/'}
export EXPERIMENT_CMD=${EXPERIMENT_CMD:-'./script.sh scale_dummy_deployment'}
export EXPERIMENT_DIR_NAME=${EXPERIMENT_DIR_NAME:-'scale-dummy-deployment'}
export FIRST=${FIRST:-0}
export LAST=${LAST:-40}
export INCREMENT=${INCREMENT:-10}
export INCREMENT_INTERVAL=${INCREMENT_INTERVAL:-1200}
export PROM_SERVER=${PROM_SERVER:-http://localhost:9090}
export KEPLER_PROM_SERVER=${KEPLER_PROM_SERVER:-${PROM_SERVER}} # Specify if different from default prometheus server
export KEPLER_LABEL_MATCHER=${KEPLER_LABEL_MATCHER:-'pod=~"kepler-exporter.*"'}
export KEPLER_PROM_LABEL_MATCHER=${KEPLER_PROM_LABEL_MATCHER:-'pod=~"prometheus.*"'} # Should only match prometheus instance targeting Kepler
export MULTIPLE_KEPLER_PROM_INSTANCES=${MULTIPLE_KEPLER_PROM_INSTANCES:-false}
export HOURS_TO_SAVE=${HOURS:-1}
function prepare_output_dir(){
# Prepare output CSV header
export OUTPUT_DIR=${OUTPUT_DIR:-"${RESULTS_DIR}${EXPERIMENT_DIR_NAME}/$(date -d @${START} +"%Y_%m_%d_%I_%M_%p")/"}
mkdir -p $OUTPUT_DIR
}
function create_timestamp_file(){
export TIMESTAMP_OUTPUT_FILE=${OUTPUT_DIR}timestamps.csv
echo "Replicas,Start time,End time" > $TIMESTAMP_OUTPUT_FILE
}
function validate_cluster(){
set +e
# Check if Prometheus is scraping Kepler metrics
curl -s -g "${KEPLER_PROM_SERVER}/api/v1/query?query=kepler_container_package_joules_total" | jq '.data.result[].value' --exit-status > /dev/null
EXIT_STATUS=$?
if [ $EXIT_STATUS -ne 0 ]; then
if [ $EXIT_STATUS -eq 7 ]; then
echo "Error: Could not reach Kepler Prometheus server at ${KEPLER_PROM_SERVER}"
echo 'Help: If not using a ClusterIP/NodePort service to expose Prometheus ensure you forward the port. e.g. `kubectl --insecure-skip-tls-verify -n monitoring port-forward service/kube-prom-stack-prometheus-prometheus 9090:9090`'
echo 'Help: Rerun with `PROM_SERVER=[your Prometheus url] ./script.sh`'
elif [ $EXIT_STATUS -eq 4 ]; then
echo "Error: Prometheus is not scraping Kepler metrics"
echo "Help: https://github.com/sustainable-computing-io/kepler/issues/767#issuecomment-1717990301"
fi
exit $EXIT_STATUS
fi
# Check if Prometheus is collecting metrics on the Kepler pod(s)
CURL_OUTPUT=$(curl -s -g "${PROM_SERVER}/api/v1/query?query=container_cpu_usage_seconds_total{${KEPLER_LABEL_MATCHER}}")
MATCHING_KEPLER_INSTANCE_COUNT=$(echo $CURL_OUTPUT | jq '.data.result | length')
MATCHING_KEPLER_INSTANCES=$(echo $CURL_OUTPUT | jq '.data.result[].metric.pod')
if [ $MATCHING_KEPLER_INSTANCE_COUNT -eq 0 ]; then
echo "Error: No Kepler pods are being targeted"
echo "Help: Set KEPLER_LABEL_MATCHER (currently KEPLER_LABEL_MATCHER='${KEPLER_LABEL_MATCHER}') to match your desired Kepler pod(s)"
exit 1
elif [ $MATCHING_KEPLER_INSTANCE_COUNT -eq 1 ]; then
echo "Measuring overhead of a single Kepler pod: ${MATCHING_KEPLER_INSTANCES}"
else
echo -e "Measuring average overhead of multiple Kepler pods:\n${MATCHING_KEPLER_INSTANCES}"
fi
# Check if Prometheus is collecting metrics on the Prometheus pod(s) scraping Kepler metrics
CURL_OUTPUT=$(curl -s -g "${PROM_SERVER}/api/v1/query?query=container_cpu_usage_seconds_total{${KEPLER_PROM_LABEL_MATCHER}}")
MATCHING_PROM_INSTANCE_COUNT=$(echo $CURL_OUTPUT | jq '.data.result | length')
MATCHING_PROM_INSTANCES=$(echo $CURL_OUTPUT | jq '.data.result[].metric.pod')
if [ $MATCHING_PROM_INSTANCE_COUNT -eq 0 ]; then
echo "Error: No Prometheus pods are being targeted"
echo "Help: Set KEPLER_PROM_LABEL_MATCHER (currently KEPLER_PROM_LABEL_MATCHER='${KEPLER_PROM_LABEL_MATCHER}') to match your desired Prometheus pod(s)"
exit 1
elif [ $MATCHING_PROM_INSTANCE_COUNT -eq 1 ]; then
echo "Measuring overhead of a single Prometheus pod: ${MATCHING_PROM_INSTANCES}"
elif [ "${MULTIPLE_KEPLER_PROM_INSTANCES,,}" = true ]; then
echo -e "Measuring average overhead of multiple Prometheus pods:\n${MATCHING_PROM_INSTANCES}"
else
echo -e "Error: Multiple Prometheus pods are being targeted:\n${MATCHING_PROM_INSTANCES}"
echo "Help: If intentionally scraping Kepler metrics from multiple Prometheus instances, set MULTIPLE_KEPLER_PROM_INSTANCES=true"
echo "Help: If more Prometheus instances are being targeted than desired set KEPLER_PROM_LABEL_MATCHER (currently KEPLER_PROM_LABEL_MATCHER='${KEPLER_PROM_LABEL_MATCHER}') to only match desired Prometheus pod(s)"
exit 1
fi
set -e
}
function scale_dummy_deployment(){
kubectl apply -f dummy-container-deployment.yaml
# Wait for pods to delete if a deployment already exists
kubectl wait --for=delete pod -l type=dummy --timeout 5m
echo "Replicas,Start time,End time" > $TIMESTAMP_OUTPUT_FILE
for REPLICAS in `seq $FIRST $INCREMENT $LAST`
do
kubectl scale deployment dummy-container-deployment --replicas=$REPLICAS
# Wait for containers to be ready and mark start time with this replica count
kubectl rollout status deployment dummy-container-deployment
echo -n "${REPLICAS},$(date +%s)," >> $TIMESTAMP_OUTPUT_FILE
# Allow Prometheus to collect overhead measurements for 20 minutes
sleep $INCREMENT_INTERVAL
# Mark end time with this replica count
echo "$(date +%s)" >> $TIMESTAMP_OUTPUT_FILE
done
# Cleanup
scale_dummy_deployment_cleanup
}
function scale_dummy_deployment_cleanup(){
kubectl delete deployment dummy-container-deployment --ignore-not-found
}
function restrict_kepler_metrics_by_node(){
PATCH='[{"op": "add", "path": "/spec/selector/exposenode", "value": "true"}]'
kubectl patch service kepler-exporter -n kepler --type json -p "$PATCH" 2>/dev/null
# Sleep for Prometheus scrape interval
sleep 30
}
function unrestrict_kepler_metrics_by_node(){
PATCH='[{"op": "remove", "path": "/spec/selector/exposenode"}]'
kubectl patch service kepler-exporter -n kepler --type json -p "$PATCH" 2>/dev/null
}
function enable_kepler_pod_sraping(){
kubectl label pod $POD exposenode=true --overwrite -n kepler
# Sleep for Prometheus scrape interval
sleep 30
}
function record_current_interval(){
echo -n "${KEPLER_POD_COUNT},$(date +%s)," >> $TIMESTAMP_OUTPUT_FILE
sleep $INCREMENT_INTERVAL
echo "$(date +%s)" >> $TIMESTAMP_OUTPUT_FILE
}
function scale_cluster(){
KEPLER_PODS=($(kubectl get pods -l app.kubernetes.io/name=kepler-exporter -n kepler -o custom-columns="NAME:.metadata.name" --no-headers))
KEPLER_POD_COUNT=0
kubectl label pods ${KEPLER_PODS[@]} exposenode="false" --overwrite -n kepler
# Only expose kepler metrics to Prometheus for nodes with a kepler-exporter with the label exposenode="true"
restrict_kepler_metrics_by_node
echo "Nodes,Start time,End time" > $TIMESTAMP_OUTPUT_FILE
# Benchmark the overhead when all nodes are being scraped
record_current_interval
COUNT=$INCREMENT
for POD in ${KEPLER_PODS[@]}
do
if [ "$COUNT" -eq 0 ]; then
COUNT="$INCREMENT"
enable_kepler_pod_sraping
record_current_interval
fi
COUNT=$((COUNT - 1))
KEPLER_POD_COUNT=$((KEPLER_POD_COUNT + 1))
done
# Benchmark the overhead when all kepler metrics aren't exposed for any nodes
record_current_interval
# Cleanup
scale_cluster_cleanup
}
function scale_cluster_cleanup(){
unrestrict_kepler_metrics_by_node
if [ -n "${KEPLER_PODS}" ]; then
kubectl label pods ${KEPLER_PODS[@]} exposenode- -n kepler
fi
}
function save_overhead_data(){
# The plotting functions expect a single timeseries
# Wrapping queries in a final aggregation function guarantees this
QUERIES=(
"max(avg(rate(container_cpu_usage_seconds_total{${KEPLER_LABEL_MATCHER}}[2m])) by (pod))" # max Kepler cpu
"avg(avg(rate(container_cpu_usage_seconds_total{${KEPLER_LABEL_MATCHER}}[2m])) by (pod))" # average Kepler cpu
"avg(rate(container_cpu_usage_seconds_total{${KEPLER_PROM_LABEL_MATCHER}}[2m]))" # average Prometheus cpu (in case of multiple Prometheus instances)
"max(avg(rate(container_memory_usage_bytes{${KEPLER_LABEL_MATCHER}}[2m])) by (pod))" # max Kepler memory
"avg(avg(rate(container_memory_usage_bytes{${KEPLER_LABEL_MATCHER}}[2m])) by (pod))" # average Kepler memory
"avg(rate(container_memory_usage_bytes{${KEPLER_PROM_LABEL_MATCHER}}[2m]))" # average Prometheus memory (in case of multiple Prometheus instances)
"max(rate(container_network_receive_bytes_total{${KEPLER_LABEL_MATCHER}}[2m]))" # max Kepler network receive
"avg(rate(container_network_receive_bytes_total{${KEPLER_LABEL_MATCHER}}[2m]))" # avg Kepler network receive
"avg(rate(container_network_receive_bytes_total{${KEPLER_PROM_LABEL_MATCHER}}[2m]))" # Prometheus network receive
"max(rate(container_network_transmit_bytes_total{${KEPLER_LABEL_MATCHER}}[2m]))" # max Kepler network transmit
"avg(rate(container_network_transmit_bytes_total{${KEPLER_LABEL_MATCHER}}[2m]))" # avg Kepler network transmit
"avg(rate(container_network_receive_bytes_total{${KEPLER_PROM_LABEL_MATCHER}}[2m]))" # Prometheus network transmit
)
QUERY_NAMES=(
"max-kepler-cpu"
"avg-kepler-cpu"
"avg-prometheus-cpu"
"max-kepler-memory"
"avg-kepler-memory"
"avg-prometheus-memory"
"max-kepler-network-receive"
"avg-kepler-network-receive"
"avg-prometheus-network-receive"
"max-kepler-network-transmit"
"avg-kepler-network-transmit"
"avg-prometheus-network-transmit"
)
for i in ${!QUERIES[@]}; do
OUTPUT_FILE=${OUTPUT_DIR}${QUERY_NAMES[$i]}.csv
curl -X POST -d "query=${QUERIES[$i]}&start=${START}&end=${END}&step=30s" "${PROM_SERVER}/api/v1/query_range" | jq '.data.result[].values' > $OUTPUT_FILE
done
}
function run_benchmark(){
validate_cluster
START=$(date +%s)
prepare_output_dir
create_timestamp_file
bash -c "$EXPERIMENT_CMD"
END=$(date +%s)
save_overhead_data
}
function save_current_overhead(){
END=$(date +%s)
START=$((END - $((HOURS_TO_SAVE * 3600))))
export OUTPUT_DIR=${OUTPUT_DIR:-"./_output/overhead-snapshots/$(date -d @${END} +"%Y_%m_%d_%I_%M_%p")/"}
prepare_output_dir
save_overhead_data
}
"$@"