-
Notifications
You must be signed in to change notification settings - Fork 503
/
Copy path1b_sglang_infer.yaml
54 lines (50 loc) · 2.03 KB
/
1b_sglang_infer.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
# Inference config for Llama 1B Instruct.
#
# Requirements:
# - Install SGLang: https://docs.sglang.ai/start/install.html
# - Log into HF: `huggingface-cli login`
# - Request access to Llama 3.2: https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct
#
# Usage:
# oumi infer -i -c configs/recipes/llama3_2/inference/1b_sglang_infer.yaml
#
# Sample command to start SGLang server using Docker
# (the recommended method, avoids Python dependencies conflicts):
# https://docs.sglang.ai/start/install.html#method-3-using-docker
#
# docker run --gpus all --shm-size 32g -p 6864:6864 --ipc=host \
# -v ~/.cache/huggingface:/root/.cache/huggingface \
# lmsysorg/sglang:latest \
# python3 -m sglang.launch_server \
# --model-path meta-llama/Llama-3.2-1B-Instruct \
# --host 0.0.0.0 --port 6864 --disable-cuda-graph \
# --mem-fraction-static=0.9
#
# Sample command to start SGLang server using Python:
# CUDA_VISIBLE_DEVICES=0 python -m sglang.launch_server \
# --model-path meta-llama/Llama-3.2-1B-Instruct \
# --port 6864 --disable-cuda-graph --mem-fraction-static=0.9
#
# See Also:
# - Documentation: https://oumi.ai/docs/en/latest/user_guides/infer/infer.html
# - Config class: oumi.core.configs.InferenceConfig
# - Config source: https://github.com/oumi-ai/oumi/blob/main/src/oumi/core/configs/inference_config.py
# - Other inference configs: configs/**/inference/
model:
model_name: "meta-llama/Llama-3.2-1B-Instruct"
model_max_length: 2048
torch_dtype_str: "bfloat16"
attn_implementation: "sdpa"
load_pretrained_weights: True
trust_remote_code: True
generation:
max_new_tokens: 2048
batch_size: 4
remote_params:
# This address is just an example (it's what you usually get when you start sglang sever locally).
# For production use, set it to a remote address/port.
# For more details, see the following:
# https://sgl-project.github.io/references/sampling_params.html
# https://github.com/skypilot-org/skypilot/blob/master/llm/sglang/README.md
api_url: "http://127.0.0.1:6864/generate"
engine: SGLANG