From 730d61ed8ceb1126a0c258fcc7261de78b04de91 Mon Sep 17 00:00:00 2001 From: Geoffrey Yu Date: Thu, 28 Mar 2019 14:04:32 -0400 Subject: [PATCH] Improve batch size selection for throughput profiling --- server/lib/config/__init__.py | 4 ++-- server/lib/profiler/throughput.py | 20 ++++++++++++++++---- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/server/lib/config/__init__.py b/server/lib/config/__init__.py index bc43968..eb1e50f 100644 --- a/server/lib/config/__init__.py +++ b/server/lib/config/__init__.py @@ -5,8 +5,8 @@ class _Config: def __init__(self): self.Hints = None - self.warm_up = 200 - self.measure_for = 200 + self.warm_up = 100 + self.measure_for = 10 def initialize_hints_config(self, hints_file): with open(hints_file, 'r') as f: diff --git a/server/lib/profiler/throughput.py b/server/lib/profiler/throughput.py index b40d65f..655a625 100644 --- a/server/lib/profiler/throughput.py +++ b/server/lib/profiler/throughput.py @@ -18,9 +18,10 @@ def get_throughput_info(model, annotation_info, memory_info): runtime_model_ms = _get_runtime_model(model, input_size) throughput = input_size[0] / runtime_ms * 1000 - # NOTE: Reduce the theoretical max by 5%, since it is asymptotic + # Reduce maximum throughput by 0.1% since throughput vs. batch size + # will asymptotically approach the theoretical max value max_throughput = ( - 1.0 / runtime_model_ms.coefficient * 1000 * .95 + 1.0 / runtime_model_ms.coefficient * 1000 * 0.999 ) return ThroughputInfo(throughput, max_throughput, runtime_model_ms) @@ -32,8 +33,7 @@ def get_throughput_info(model, annotation_info, memory_info): def _get_runtime_model(model, input_size): - # TODO: Select batch sizes for this more intelligently - batches = [8, 16, 32] + batches = _batch_size_selector(input_size) runtimes_ms = list(map( lambda batch_size: _measure_runtime(model, batch_size, input_size), batches, @@ -84,6 +84,18 @@ def iteration(): return start_event.elapsed_time(end_event) / Config.measure_for +def _batch_size_selector(input_size): + # TODO: Select batch sizes more intelligently + batch_size = input_size[0] + if batch_size < 128: + smaller = max(batch_size // 2, 1) + larger = batch_size * 2 + else: + smaller = max(batch_size - 50, 1) + larger = batch_size + 50 + return [smaller, batch_size, larger] + + def main(): import argparse import code