Skip to content

Commit

Permalink
Bug fix: Always update source code locations
Browse files Browse the repository at this point in the history
  • Loading branch information
geoffxy committed Mar 28, 2019
1 parent 730d61e commit 3519958
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 14 deletions.
30 changes: 16 additions & 14 deletions server/lib/analysis/request_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,33 +65,36 @@ def _handle_analysis_request(self, analysis_request, state, address):
if not state.is_request_current(analysis_request):
return

# 0. If the parse tree has not changed, use our cached response
# 1. Parse the code to extract source locations
class_name, annotation_info, model_operations = analyze_code(
tree, source_map)
if not state.is_request_current(analysis_request):
return

# 2. If the parse tree has not changed, use our cached response
cached_results = self._source_cache.query(tree)
if cached_results is not None:
logger.debug(
'Using cached response for request %d from (%s:%d).',
analysis_request.sequence_number,
*address,
)
# cached_results[0] is the cached model_operations map
model_operations.set_runtimes_from_cache(cached_results[0])
self._enqueue_response(
self._send_analysis_response,
*cached_results,
annotation_info,
model_operations,
*cached_results[1:],
analysis_request.sequence_number,
address,
)
return

# 1. Parse the code to extract relevant information
class_name, annotation_info, model_operations = analyze_code(
tree, source_map)
if not state.is_request_current(analysis_request):
return

model = to_trainable_model(tree, class_name)
if not state.is_request_current(analysis_request):
return

# 2. Profile the model layer by layer
# 3. Profile the model layer by layer
# NOTE: This function makes in-place changes to model_operations
# NOTE: This function will attach hooks to the model
get_operation_runtimes(
Expand All @@ -110,7 +113,7 @@ def _handle_analysis_request(self, analysis_request, state, address):
del model
model = to_trainable_model(tree, class_name)

# 3. Profile the model's overall memory usage
# 4. Profile the model's overall memory usage
memory_info = get_memory_info(
analysis_request.source_code,
class_name,
Expand All @@ -127,7 +130,7 @@ def _handle_analysis_request(self, analysis_request, state, address):
address,
)

# 4. Profile the model's throughput
# 5. Profile the model's throughput
throughput_info = get_throughput_info(
model, annotation_info, memory_info)
perf_limits = get_performance_limits(memory_info, throughput_info)
Expand All @@ -139,9 +142,8 @@ def _handle_analysis_request(self, analysis_request, state, address):
address,
)

# 5. Cache the overall results
# 6. Cache the overall results
results = (
annotation_info,
model_operations,
memory_info,
throughput_info,
Expand Down
18 changes: 18 additions & 0 deletions server/lib/models/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,16 @@ def get_operation_info_by_bound_name(self, bound_name):
def get_operations(self):
return self.operations.values()

def set_runtimes_from_cache(self, cached_info_map):
"""
Used to set the runtimes from cache for when the parsed code has not
changed.
"""
for bound_name, op_info in self.operations.items():
cached_op_info = cached_info_map.get_operation_info_by_bound_name(
bound_name)
op_info.runtime_us = cached_op_info.runtime_us

def fill_protobuf(self, operation_list_pb):
for operation in self.get_operations():
pb = operation_list_pb.add()
Expand Down Expand Up @@ -157,6 +167,14 @@ def __init__(self, max_batch_size, throughput_limit):
self.max_batch_size = max_batch_size
self.throughput_limit = throughput_limit

def __repr__(self):
return (
'PerformanceLimits(max_batch={:.2f}, thpt_limit={:.2f})'.format(
self.max_batch_size,
self.throughput_limit,
)
)

def fill_protobuf(self, limits_pb):
limits_pb.max_batch_size = self.max_batch_size
limits_pb.throughput_limit = self.throughput_limit

0 comments on commit 3519958

Please sign in to comment.