From b7ebc79b5c2325f2b3b2ce80e284ebced62362f7 Mon Sep 17 00:00:00 2001 From: EC2 Default User Date: Wed, 7 Aug 2024 04:28:50 +0000 Subject: [PATCH] tcu testing --- .../create_timestream_resource.py | 54 ++++ .../create_timestream_resource.tf | 22 ++ .../create_timestream_resource.yaml | 26 ++ .../ingestion.py | 192 ++++++++++++++ .../lastpoint-query.ipynb | 232 +++++++++++++++++ .../single-groupby-orderby.ipynb | 234 ++++++++++++++++++ 6 files changed, 760 insertions(+) create mode 100644 tools/python/timestream-compute-units-testing/create_timestream_resource.py create mode 100644 tools/python/timestream-compute-units-testing/create_timestream_resource.tf create mode 100644 tools/python/timestream-compute-units-testing/create_timestream_resource.yaml create mode 100644 tools/python/timestream-compute-units-testing/ingestion.py create mode 100644 tools/python/timestream-compute-units-testing/lastpoint-query.ipynb create mode 100644 tools/python/timestream-compute-units-testing/single-groupby-orderby.ipynb diff --git a/tools/python/timestream-compute-units-testing/create_timestream_resource.py b/tools/python/timestream-compute-units-testing/create_timestream_resource.py new file mode 100644 index 00000000..ad7d3da2 --- /dev/null +++ b/tools/python/timestream-compute-units-testing/create_timestream_resource.py @@ -0,0 +1,54 @@ +import boto3 + +database = "devops" +table = "sample_devops" +region = "us-east-1" +memory_store_retenion_in_hours = 24 +magnetic_store_retention_in_days = 365 +partition_key = "hostname" + +timestream_client = boto3.client('timestream-write', region_name=region) + +# create database +try: + timestream_client.create_database(DatabaseName=database) + print(f"Database {database} created successfully") +except timestream_client.exceptions.ConflictException: + print(f"Database {database} exists. Skipping database creation") +except Exception as err: + print(f"Create database failed with error : {err}") + raise + +# create table +print("Creating table") +retention_properties = { + 'MemoryStoreRetentionPeriodInHours': memory_store_retenion_in_hours, + 'MagneticStoreRetentionPeriodInDays': magnetic_store_retention_in_days +} +magnetic_store_write_properties = { + 'EnableMagneticStoreWrites': True +} + +schema = { + "CompositePartitionKey": [ + { + "EnforcementInRecord": "REQUIRED", + "Name": partition_key, + "Type": "DIMENSION" + } + ] +} + +try: + timestream_client.create_table(DatabaseName=database, TableName=table, + RetentionProperties=retention_properties, + MagneticStoreWriteProperties=magnetic_store_write_properties, + Schema=schema + ) + print(f"Table {table} successfully created") +except timestream_client.exceptions.ConflictException: + print( + f"Table {table} exists on database {database}. Skipping table creation") +except Exception as err: + print(f"Create table failed: {err}") + raise diff --git a/tools/python/timestream-compute-units-testing/create_timestream_resource.tf b/tools/python/timestream-compute-units-testing/create_timestream_resource.tf new file mode 100644 index 00000000..9be56b07 --- /dev/null +++ b/tools/python/timestream-compute-units-testing/create_timestream_resource.tf @@ -0,0 +1,22 @@ +resource "aws_timestreamwrite_database" "tcu_testing" { + database_name = "devops" +} + + +resource "aws_timestreamwrite_table" "tcu_testing" { + database_name = aws_timestreamwrite_database.tcu_testing.database_name + table_name = "sample_devops" + + retention_properties { + magnetic_store_retention_period_in_days = 365 + memory_store_retention_period_in_hours = 24 + } + + schema { + composite_partition_key { + enforcement_in_record = "REQUIRED" + name = "hostname" + type = "DIMENSION" + } + } +} \ No newline at end of file diff --git a/tools/python/timestream-compute-units-testing/create_timestream_resource.yaml b/tools/python/timestream-compute-units-testing/create_timestream_resource.yaml new file mode 100644 index 00000000..6d5e9701 --- /dev/null +++ b/tools/python/timestream-compute-units-testing/create_timestream_resource.yaml @@ -0,0 +1,26 @@ +AWSTemplateFormatVersion: 2010-09-09 +Description: Create Timestream Resources + + +Resources: + MyDatabase: + Type: AWS::Timestream::Database + Properties: + DatabaseName: "devops1" + + MyTable: + DependsOn: MyDatabase + Type: AWS::Timestream::Table + Properties: + DatabaseName: !Ref MyDatabase + TableName : "sample_devops" + RetentionProperties: + MemoryStoreRetentionPeriodInHours: "24" + MagneticStoreRetentionPeriodInDays: "7300" + MagneticStoreWriteProperties: + EnableMagneticStoreWrites: true + Schema: + CompositePartitionKey: + - EnforcementInRecord: "REQUIRED" + Name: "hostname" + Type: "DIMENSION" \ No newline at end of file diff --git a/tools/python/timestream-compute-units-testing/ingestion.py b/tools/python/timestream-compute-units-testing/ingestion.py new file mode 100644 index 00000000..9f3f6699 --- /dev/null +++ b/tools/python/timestream-compute-units-testing/ingestion.py @@ -0,0 +1,192 @@ +import random +import time +import boto3 +import datetime +import threading +from botocore.config import Config + +class Generator: + INTERVAL = 1 # Seconds + INTERVAL_MILLI = 100 + BATCH_SIZE = 100 + + + def __init__(self): + self.time_lock = threading.Lock() + + + def prepare_common_attributes(self): + common_attributes = { + 'MeasureName': self.measure_name, + 'MeasureValueType': 'MULTI' + } + print(common_attributes) + return common_attributes + + def prepare_record(self, current_time): + record = { + 'Time': str(current_time), + 'TimeUnit': 'SECONDS', + 'MeasureValues': [], + 'Dimensions':[] + } + return record + + def prepare_measure(self, measure_name, measure_value): + measure = { + 'Name': measure_name, + 'Value': str(measure_value), + 'Type': 'DOUBLE' + } + return measure + + def prepare_dimension(self, name, value): + dimension = { + 'Name': name, + 'Value': str(value), + } + return dimension + + def write_records(self, records, common_attributes): + try: + result = self.write_client.write_records(DatabaseName=self.database_name, + TableName=self.table_name, + CommonAttributes=common_attributes, + Records=records) + status = result['ResponseMetadata']['HTTPStatusCode'] + print("Processed %d records. WriteRecords HTTPStatusCode: %s" % + (len(records), status)) + print(result) + except Exception as err: + print("Error:", err) + print(f'Error ingesting data for : {str(err.response["RejectedRecords"])}') + + def unix_time_millis(self, dt): + epoch = datetime.datetime.utcfromtimestamp(0) + return (dt - epoch).total_seconds() * 1000.0 + + def write_buffer(self, buffer, common_attributes): + start_time = time.time() + total_records = 0 + for records in buffer: + elapsed_time = time.time() - start_time + self.write_records(records, common_attributes) + total_records += len(records) + if elapsed_time == 0.0: + elapsed_time = 0.00001 + rps = total_records/elapsed_time + print(f'{total_records} written rps = {rps}') + + def generate_data(self, pid, region, database_name, table_name, max_records): + + self.database_name = database_name + self.table_name = table_name + print(f'writing data to database {self.database_name} table {self.table_name}') + + session = boto3.Session(region_name=region) + self.write_client = session.client('timestream-write', config=Config( + read_timeout=20, max_pool_connections=5000, retries={'max_attempts': 10})) + + self.measure_name = f"metric{pid % 8192}" + common_attributes = self.prepare_common_attributes() + + records = [] + + total_records = 0 + + launch_time = time.time() + current_time_seconds = int(datetime.datetime.now().timestamp()) + current_time_nanoseconds = current_time_seconds * 10**9 + time_delta_seconds = 365 * 24 * 60 * 60 # 365 days in seconds + host_number = 0 + + + for i in range(0, int(max_records)): + current_time = int(time.time()) + record = self.prepare_record(current_time) + host_number += 1 + + record['Dimensions'].append(self.prepare_dimension('hostname', f"host{host_number}")) + record['Dimensions'].append(self.prepare_dimension('region', "us-east-1")) + record['Dimensions'].append(self.prepare_dimension('az', f"az{int(random.randint(1,6))}")) + record['MeasureValues'].append(self.prepare_measure('cpu_utilization', float(random.randint(0, 1000)) / 10.0)) + record['MeasureValues'].append(self.prepare_measure('memory', float(random.randint(0, 1000)) / 10.0)) + record['MeasureValues'].append(self.prepare_measure('network_in', float(random.randint(0, 1000)) / 10.0)) + record['MeasureValues'].append(self.prepare_measure('network_out', float(random.randint(0, 1000)) / 10.0)) + record['MeasureValues'].append(self.prepare_measure('disk_read_ops', float(random.randint(0, 1000)) / 10.0)) + record['MeasureValues'].append(self.prepare_measure('dicsk_write_iops', float(random.randint(0, 1000)) / 10.0)) + records.append(record) + + + if len(records) == self.BATCH_SIZE: + total_records += len(records) + + self.write_records(records, common_attributes) + + if self.INTERVAL > 0.0: + time.sleep(self.INTERVAL) + host_number = 0 + records = [] + + if len(records) > 0: + total_records += len(records) + self.write_records(records, common_attributes) + + total_time = time.time() - launch_time + rps = total_records / total_time + print(f'Total Records in thread: {total_records:,} in {rps} rps') + + return total_records + + +def lambda_handler(event, context): + lambda_time = time.time() + pid = 1 + max_threads = event['threads'] + threads = [] + record_counts = [] + + for i in range(0, max_threads): + id = i + thread = threading.Thread(target=thread_handler, args=(id, event, context)) + thread.start() + print( + f'[{pid}] process_record: Started process #{i} with pid={thread} to dump data chunk to timestream') + time.sleep(0.1) + threads.append(thread) + + for thread in threads: + thread.join() + + end_time = time.time() + total_time = end_time - lambda_time + total_records = int(event['records']) + rps = total_records / total_time + s_lambda_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(lambda_time)) + s_end_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time)) + print(f'{s_lambda_time} - {s_end_time}') + print(f'Total Records in lambda: {total_records:,} in {rps} rps') + + return + +def thread_handler(pid, event, context): + generator = Generator() + region = event['region'] + database = event['database'] + table = event['table'] + threads = int(event['threads']) + max_records_per_thread = int(event['records']) / threads + generator.generate_data(pid, region, database, table, max_records_per_thread) + return + +if __name__ == '__main__': + event = { + 'threads': 1, + 'records': 1000000000, + 'region': 'us-east-2', + 'database': 'devops', + 'table': 'sample_devops' + } + context = {} + lambda_handler(event, context) + diff --git a/tools/python/timestream-compute-units-testing/lastpoint-query.ipynb b/tools/python/timestream-compute-units-testing/lastpoint-query.ipynb new file mode 100644 index 00000000..76ffe12f --- /dev/null +++ b/tools/python/timestream-compute-units-testing/lastpoint-query.ipynb @@ -0,0 +1,232 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "d55b79d3", + "metadata": {}, + "outputs": [], + "source": [ + "import boto3\n", + "import concurrent.futures\n", + "import time\n", + "from datetime import datetime\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from botocore.exceptions import ClientError\n", + "\n", + "# Initialize the Timestream client\n", + "client = boto3.client('timestream-query', region_name='us-east-2')\n", + "\n", + "# List of different Timestream queries\n", + "queries = [\n", + " f'select memory from \"devops\".\"sample_devops\" where time > ago(10m) and hostname=\\'host1\\' order by time desc limit 1',\n", + " ]\n", + "\n", + "# Function to run a single query and measure its duration\n", + "def run_query(query, worker_id):\n", + " start_time = time.time()\n", + " try:\n", + " response = client.query(QueryString=query)\n", + " duration = time.time() - start_time\n", + " return worker_id, response, duration, None\n", + " except ClientError as e:\n", + " duration = time.time() - start_time\n", + " if e.response['Error']['Code'] == 'ThrottlingException':\n", + " return worker_id, None, duration, 'ThrottlingException'\n", + " else:\n", + " raise e\n", + "\n", + "# Function to run queries in parallel\n", + "def run_parallel_queries(duration_seconds, queries, max_workers):\n", + " end_time = time.time() + duration_seconds\n", + " total_queries = 0\n", + " query_durations = []\n", + " throttling_count = 0\n", + "\n", + " print(f\"\\nStart time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\")\n", + "\n", + " with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n", + " while time.time() < end_time:\n", + " # Launch each query in parallel with a worker ID\n", + " futures = {executor.submit(run_query, queries[i % len(queries)], i): i for i in range(max_workers)}\n", + "\n", + " # Process results\n", + " for future in concurrent.futures.as_completed(futures):\n", + " try:\n", + " worker_id, result, duration, error = future.result()\n", + " query_durations.append(duration)\n", + " total_queries += 1\n", + " if error == 'ThrottlingException':\n", + " throttling_count += 1\n", + " #print(f\"Worker {worker_id}: Duration: {duration:.2f} seconds, results: {result}\")\n", + " except Exception as e:\n", + " print(f\"Worker {worker_id}: Query failed: {e}\")\n", + "\n", + " print(f\"End time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\")\n", + "\n", + " if query_durations:\n", + " p50 = np.percentile(query_durations, 50)\n", + " p90 = np.percentile(query_durations, 90)\n", + " p99 = np.percentile(query_durations, 99)\n", + " else:\n", + " p50 = p90 = p99 = None\n", + "\n", + " return total_queries, p50, p90, p99, throttling_count\n", + "\n", + "# Number of threads (workers) to test\n", + "worker_counts = [7, 14, 21, 28, 42, 50, 60]\n", + "duration_seconds = 60\n", + "\n", + "results = []\n", + "\n", + "# Run the tests for different worker counts\n", + "for max_workers in worker_counts:\n", + " total_queries, p50, p90, p99, throttling_count = run_parallel_queries(duration_seconds, queries, max_workers)\n", + " results.append((max_workers, total_queries, p50, p90, p99, throttling_count))\n", + " print(f\"num_workers: {max_workers}\")\n", + " print(f\"Total number of queries run in {duration_seconds} seconds: {total_queries}\")\n", + " print(f\"p50 (50th percentile) of query durations: {p50:.2f} seconds\")\n", + " print(f\"p90 (90th percentile) of query durations: {p90:.2f} seconds\")\n", + " print(f\"p99 (99th percentile) of query durations: {p99:.2f} seconds\")\n", + " print(f\"Throttling count: {throttling_count}\")\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "48f18dcd", + "metadata": {}, + "outputs": [], + "source": [ + "worker_counts = [result[0] for result in results]\n", + "total_queries = [result[1] for result in results]\n", + "p50s = [result[2] for result in results]\n", + "p90s = [result[3] for result in results]\n", + "p99s = [result[4] for result in results]\n", + "throttling_counts = [result[5] for result in results]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "123c3d90", + "metadata": {}, + "outputs": [], + "source": [ + "print(worker_counts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "55f0a802", + "metadata": {}, + "outputs": [], + "source": [ + "print(total_queries)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "52a4013f", + "metadata": {}, + "outputs": [], + "source": [ + "print(p50s)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "203ed0b4", + "metadata": {}, + "outputs": [], + "source": [ + "print(p90s)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6a07cca8", + "metadata": {}, + "outputs": [], + "source": [ + "print(p99s)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7363b0b", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(12, 8))\n", + "\n", + "# Plot latency percentiles\n", + "plt.subplot(3, 1, 1)\n", + "plt.plot(worker_counts, p50s, label='p50')\n", + "plt.plot(worker_counts, p90s, label='p90')\n", + "plt.plot(worker_counts, p99s, label='p99')\n", + "plt.xlabel('Number of Workers')\n", + "plt.ylabel('Latency (seconds)')\n", + "plt.title('Latency Percentiles')\n", + "plt.legend()\n", + "\n", + "# Plot Queries Per Minute (QPM)\n", + "plt.subplot(3, 1, 2)\n", + "qpm = [q / (duration_seconds / 60) for q in total_queries]\n", + "plt.plot(worker_counts, qpm, label='Queries Per Minute (QPM)')\n", + "plt.xlabel('Number of Workers')\n", + "plt.ylabel('Queries Per Minute')\n", + "plt.title('Queries Per Minute')\n", + "plt.legend()\n", + "\n", + "# Plot Throttling Counts\n", + "plt.subplot(3, 1, 3)\n", + "plt.plot(worker_counts, throttling_counts, label='Throttling Count', color='red')\n", + "plt.xlabel('Number of Workers')\n", + "plt.ylabel('Throttling Count')\n", + "plt.title('Throttling Count')\n", + "plt.legend()\n", + "\n", + "plt.tight_layout()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1c78828c", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "conda_python3", + "language": "python", + "name": "conda_python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tools/python/timestream-compute-units-testing/single-groupby-orderby.ipynb b/tools/python/timestream-compute-units-testing/single-groupby-orderby.ipynb new file mode 100644 index 00000000..7ca5f5bc --- /dev/null +++ b/tools/python/timestream-compute-units-testing/single-groupby-orderby.ipynb @@ -0,0 +1,234 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "d55b79d3", + "metadata": {}, + "outputs": [], + "source": [ + "import boto3\n", + "import concurrent.futures\n", + "import time\n", + "from datetime import datetime\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from botocore.exceptions import ClientError\n", + "\n", + "# Initialize the Timestream client\n", + "client = boto3.client('timestream-query', region_name='us-east-2')\n", + "\n", + "# List of different Timestream queries\n", + "queries = [\n", + " f'select BIN(time, 1m) AS binned_time, max(cpu_utilization) as max_cpu_utilization from \"devops\".\"sample_devops\" where time > ago(10m) and hostname=\\'host2\\' group by BIN(time, 1m) order by binned_time asc',\n", + "]\n", + "\n", + "# Function to run a single query and measure its duration\n", + "def run_query(query, worker_id):\n", + " start_time = time.time()\n", + " try:\n", + " response = client.query(QueryString=query)\n", + " duration = time.time() - start_time\n", + " return worker_id, response, duration, None\n", + " except ClientError as e:\n", + " duration = time.time() - start_time\n", + " if e.response['Error']['Code'] == 'ThrottlingException':\n", + " return worker_id, None, duration, 'ThrottlingException'\n", + " else:\n", + " raise e\n", + "\n", + "# Function to run queries in parallel\n", + "def run_parallel_queries(duration_seconds, queries, max_workers):\n", + " end_time = time.time() + duration_seconds\n", + " total_queries = 0\n", + " query_durations = []\n", + " throttling_count = 0\n", + "\n", + " print(f\"\\nStart time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\")\n", + "\n", + " with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n", + " while time.time() < end_time:\n", + " # Launch each query in parallel with a worker ID\n", + " futures = {executor.submit(run_query, queries[i % len(queries)], i): i for i in range(max_workers)}\n", + "\n", + " # Process results\n", + " for future in concurrent.futures.as_completed(futures):\n", + " try:\n", + " worker_id, result, duration, error = future.result()\n", + " query_durations.append(duration)\n", + " total_queries += 1\n", + " if error == 'ThrottlingException':\n", + " throttling_count += 1\n", + " #print(f\"Worker {worker_id}: Duration: {duration:.2f} seconds, Error: {error}\")\n", + " except Exception as e:\n", + " print(f\"Worker {worker_id}: Query failed: {e}\")\n", + "\n", + " print(f\"End time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\")\n", + "\n", + " if query_durations:\n", + " p50 = np.percentile(query_durations, 50)\n", + " p90 = np.percentile(query_durations, 90)\n", + " p99 = np.percentile(query_durations, 99)\n", + " else:\n", + " p50 = p90 = p99 = None\n", + "\n", + " return total_queries, p50, p90, p99, throttling_count\n", + "\n", + "# Number of threads (workers) to test\n", + "worker_counts = [7, 14, 21, 28, 42, 50, 60]\n", + "duration_seconds = 60\n", + "\n", + "results = []\n", + "\n", + "# Run the tests for different worker counts\n", + "for max_workers in worker_counts:\n", + " total_queries, p50, p90, p99, throttling_count = run_parallel_queries(duration_seconds, queries, max_workers)\n", + " results.append((max_workers, total_queries, p50, p90, p99, throttling_count))\n", + " print(f\"num_workers: {max_workers}\")\n", + " print(f\"Total number of queries run in {duration_seconds} seconds: {total_queries}\")\n", + " print(f\"p50 (50th percentile) of query durations: {p50:.2f} seconds\")\n", + " print(f\"p90 (90th percentile) of query durations: {p90:.2f} seconds\")\n", + " print(f\"p99 (99th percentile) of query durations: {p99:.2f} seconds\")\n", + " print(f\"Throttling count: {throttling_count}\")\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "48f18dcd", + "metadata": {}, + "outputs": [], + "source": [ + "worker_counts = [result[0] for result in results]\n", + "total_queries = [result[1] for result in results]\n", + "p50s = [result[2] for result in results]\n", + "p90s = [result[3] for result in results]\n", + "p99s = [result[4] for result in results]\n", + "throttling_counts = [result[5] for result in results]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "123c3d90", + "metadata": {}, + "outputs": [], + "source": [ + "print(worker_counts)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "55f0a802", + "metadata": {}, + "outputs": [], + "source": [ + "print(total_queries)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "52a4013f", + "metadata": {}, + "outputs": [], + "source": [ + "print(p50s)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "203ed0b4", + "metadata": {}, + "outputs": [], + "source": [ + "print(p90s)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6a07cca8", + "metadata": {}, + "outputs": [], + "source": [ + "print(p99s)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7363b0b", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "plt.figure(figsize=(12, 8))\n", + "\n", + "# Plot latency percentiles\n", + "plt.subplot(3, 1, 1)\n", + "plt.plot(worker_counts, p50s, label='p50')\n", + "plt.plot(worker_counts, p90s, label='p90')\n", + "plt.plot(worker_counts, p99s, label='p99')\n", + "plt.xlabel('Number of Workers')\n", + "plt.ylabel('Latency (seconds)')\n", + "plt.title('Latency Percentiles')\n", + "plt.legend()\n", + "\n", + "# Plot Queries Per Minute (QPM)\n", + "plt.subplot(3, 1, 2)\n", + "qpm = [q / (duration_seconds / 60) for q in total_queries]\n", + "plt.plot(worker_counts, qpm, label='Queries Per Minute (QPM)')\n", + "plt.xlabel('Number of Workers')\n", + "plt.ylabel('Queries Per Minute')\n", + "plt.title('Queries Per Minute')\n", + "plt.legend()\n", + "\n", + "# Plot Throttling Counts\n", + "plt.subplot(3, 1, 3)\n", + "plt.plot(worker_counts, throttling_counts, label='Throttling Count', color='red')\n", + "plt.xlabel('Number of Workers')\n", + "plt.ylabel('Throttling Count')\n", + "plt.title('Throttling Count')\n", + "plt.legend()\n", + "\n", + "plt.tight_layout()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1c78828c", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "conda_python3", + "language": "python", + "name": "conda_python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}