Skip to content

Commit

Permalink
updated
Browse files Browse the repository at this point in the history
  • Loading branch information
robertgshaw2-redhat committed Jan 3, 2025
1 parent 884879a commit bb86a03
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 6 deletions.
1 change: 1 addition & 0 deletions vllm/v1/engine/llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ def from_engine_args(
logger.debug("Enabling multiprocessing for LLMEngine.")
enable_multiprocessing = True

print(f"{enable_multiprocessing=}")
# Create the LLMEngine.
return cls(vllm_config=vllm_config,
executor_class=executor_class,
Expand Down
6 changes: 0 additions & 6 deletions vllm/v1/worker/gpu_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,6 @@ def __init__(
rank: int,
distributed_init_method: str,
):

self.i = 0

# TODO: use WorkerBase.__init__(self, vllm_config=vllm_config)
self.vllm_config = vllm_config
Expand Down Expand Up @@ -203,10 +201,6 @@ def execute_model(
self,
scheduler_output: "SchedulerOutput",
) -> ModelRunnerOutput:
if self.rank == 0 and self.i == 10:
raise ValueError("ERROR FROM HERE :)")
self.i += 1

output = self.model_runner.execute_model(scheduler_output)
return output if self.rank == 0 else None

Expand Down

0 comments on commit bb86a03

Please sign in to comment.