diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index e84fc0da5..1a5d1a6db 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -132,7 +132,7 @@ qwen3.5-bf16-mi355x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } qwen3.5-bf16-mi300x-sglang: - image: lmsysorg/sglang:v0.5.9-rocm720-mi30x + image: lmsysorg/sglang:v0.5.10rc0-rocm720-mi30x model: Qwen/Qwen3.5-397B-A17B model-prefix: qwen3.5 runner: mi300x @@ -150,7 +150,7 @@ qwen3.5-bf16-mi300x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } qwen3.5-bf16-mi325x-sglang: - image: lmsysorg/sglang:v0.5.9-rocm720-mi30x + image: lmsysorg/sglang:v0.5.10rc0-rocm720-mi30x model: Qwen/Qwen3.5-397B-A17B model-prefix: qwen3.5 runner: mi325x @@ -168,7 +168,7 @@ qwen3.5-bf16-mi325x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } qwen3.5-fp8-mi325x-sglang: - image: lmsysorg/sglang:v0.5.9-rocm720-mi30x + image: lmsysorg/sglang:v0.5.10rc0-rocm720-mi30x model: Qwen/Qwen3.5-397B-A17B-FP8 model-prefix: qwen3.5 runner: mi325x @@ -204,7 +204,7 @@ qwen3.5-fp8-mi355x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } qwen3.5-fp8-mi300x-sglang: - image: lmsysorg/sglang:v0.5.9-rocm720-mi30x + image: lmsysorg/sglang:v0.5.10rc0-rocm720-mi30x model: Qwen/Qwen3.5-397B-A17B-FP8 model-prefix: qwen3.5 runner: mi300x diff --git a/benchmarks/single_node/qwen3.5_bf16_mi300x.sh b/benchmarks/single_node/qwen3.5_bf16_mi300x.sh index 8aca9860a..c4d971a41 100755 --- a/benchmarks/single_node/qwen3.5_bf16_mi300x.sh +++ b/benchmarks/single_node/qwen3.5_bf16_mi300x.sh @@ -19,11 +19,14 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 EVAL_CONTEXT_ARGS="" if [ "${EVAL_ONLY}" = "true" ]; then setup_eval_context EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" +else EVAL_CONTEXT_ARGS="--context-length $CONTEXT_LENGTH" fi # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor @@ -35,9 +38,15 @@ python3 -m sglang.launch_server \ --host=0.0.0.0 \ --port $PORT \ --tensor-parallel-size $TP \ + --data-parallel-size 1 \ --trust-remote-code \ - --mem-fraction-static 0.8 \ - --disable-radix-cache $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + --tokenizer-worker-num 6 \ + --enable-aiter-allreduce-fusion \ + --cuda-graph-max-bs $CONC \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ + --mem-fraction-static 0.75 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/benchmarks/single_node/qwen3.5_bf16_mi325x.sh b/benchmarks/single_node/qwen3.5_bf16_mi325x.sh index 8aca9860a..c4d971a41 100644 --- a/benchmarks/single_node/qwen3.5_bf16_mi325x.sh +++ b/benchmarks/single_node/qwen3.5_bf16_mi325x.sh @@ -19,11 +19,14 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 EVAL_CONTEXT_ARGS="" if [ "${EVAL_ONLY}" = "true" ]; then setup_eval_context EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" +else EVAL_CONTEXT_ARGS="--context-length $CONTEXT_LENGTH" fi # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor @@ -35,9 +38,15 @@ python3 -m sglang.launch_server \ --host=0.0.0.0 \ --port $PORT \ --tensor-parallel-size $TP \ + --data-parallel-size 1 \ --trust-remote-code \ - --mem-fraction-static 0.8 \ - --disable-radix-cache $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + --tokenizer-worker-num 6 \ + --enable-aiter-allreduce-fusion \ + --cuda-graph-max-bs $CONC \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ + --mem-fraction-static 0.75 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/benchmarks/single_node/qwen3.5_bf16_mi355x.sh b/benchmarks/single_node/qwen3.5_bf16_mi355x.sh index 701695def..ce82b9a53 100755 --- a/benchmarks/single_node/qwen3.5_bf16_mi355x.sh +++ b/benchmarks/single_node/qwen3.5_bf16_mi355x.sh @@ -9,7 +9,8 @@ check_env_vars \ ISL \ OSL \ RANDOM_RANGE_RATIO \ - RESULT_FILENAME + RESULT_FILENAME \ + EP_SIZE if [[ -n "$SLURM_JOB_ID" ]]; then echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" @@ -19,11 +20,14 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 EVAL_CONTEXT_ARGS="" if [ "${EVAL_ONLY}" = "true" ]; then setup_eval_context EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" +else EVAL_CONTEXT_ARGS="--context-length $CONTEXT_LENGTH" fi # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor @@ -34,8 +38,16 @@ python3 -m sglang.launch_server \ --host=0.0.0.0 \ --port $PORT \ --tensor-parallel-size $TP \ + --ep-size $EP_SIZE \ + --data-parallel-size 1 \ --trust-remote-code \ - --mem-fraction-static 0.8 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + --tokenizer-worker-num 6 \ + --enable-aiter-allreduce-fusion \ + --cuda-graph-max-bs $CONC \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ + --mem-fraction-static 0.75 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/benchmarks/single_node/qwen3.5_fp8_mi300x.sh b/benchmarks/single_node/qwen3.5_fp8_mi300x.sh index 00cc9cf91..406631bbb 100755 --- a/benchmarks/single_node/qwen3.5_fp8_mi300x.sh +++ b/benchmarks/single_node/qwen3.5_fp8_mi300x.sh @@ -19,11 +19,14 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 EVAL_CONTEXT_ARGS="" if [ "${EVAL_ONLY}" = "true" ]; then setup_eval_context EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" +else EVAL_CONTEXT_ARGS="--context-length $CONTEXT_LENGTH" fi # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor @@ -36,9 +39,15 @@ python3 -m sglang.launch_server \ --host=0.0.0.0 \ --port $PORT \ --tensor-parallel-size $TP \ + --data-parallel-size 1 \ --trust-remote-code \ - --mem-fraction-static 0.8 \ - --disable-radix-cache $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + --tokenizer-worker-num 6 \ + --enable-aiter-allreduce-fusion \ + --cuda-graph-max-bs $CONC \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ + --mem-fraction-static 0.75 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/benchmarks/single_node/qwen3.5_fp8_mi325x.sh b/benchmarks/single_node/qwen3.5_fp8_mi325x.sh index 00cc9cf91..406631bbb 100755 --- a/benchmarks/single_node/qwen3.5_fp8_mi325x.sh +++ b/benchmarks/single_node/qwen3.5_fp8_mi325x.sh @@ -19,11 +19,14 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 EVAL_CONTEXT_ARGS="" if [ "${EVAL_ONLY}" = "true" ]; then setup_eval_context EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" +else EVAL_CONTEXT_ARGS="--context-length $CONTEXT_LENGTH" fi # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor @@ -36,9 +39,15 @@ python3 -m sglang.launch_server \ --host=0.0.0.0 \ --port $PORT \ --tensor-parallel-size $TP \ + --data-parallel-size 1 \ --trust-remote-code \ - --mem-fraction-static 0.8 \ - --disable-radix-cache $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + --tokenizer-worker-num 6 \ + --enable-aiter-allreduce-fusion \ + --cuda-graph-max-bs $CONC \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ + --mem-fraction-static 0.75 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/benchmarks/single_node/qwen3.5_fp8_mi355x.sh b/benchmarks/single_node/qwen3.5_fp8_mi355x.sh index 701695def..ce82b9a53 100644 --- a/benchmarks/single_node/qwen3.5_fp8_mi355x.sh +++ b/benchmarks/single_node/qwen3.5_fp8_mi355x.sh @@ -9,7 +9,8 @@ check_env_vars \ ISL \ OSL \ RANDOM_RANGE_RATIO \ - RESULT_FILENAME + RESULT_FILENAME \ + EP_SIZE if [[ -n "$SLURM_JOB_ID" ]]; then echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" @@ -19,11 +20,14 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 EVAL_CONTEXT_ARGS="" if [ "${EVAL_ONLY}" = "true" ]; then setup_eval_context EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" +else EVAL_CONTEXT_ARGS="--context-length $CONTEXT_LENGTH" fi # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor @@ -34,8 +38,16 @@ python3 -m sglang.launch_server \ --host=0.0.0.0 \ --port $PORT \ --tensor-parallel-size $TP \ + --ep-size $EP_SIZE \ + --data-parallel-size 1 \ --trust-remote-code \ - --mem-fraction-static 0.8 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + --tokenizer-worker-num 6 \ + --enable-aiter-allreduce-fusion \ + --cuda-graph-max-bs $CONC \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ + --mem-fraction-static 0.75 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 967edc19c..8d6cd2c9b 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1220,3 +1220,13 @@ - "Uses nvidia/GLM-5-NVFP4 model with modelopt_fp4 quantization" - "Image: lmsysorg/sglang:nightly-dev-cu13-20260328-a27651d5" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/973 +- config-keys: + - qwen3.5-bf16-mi300x-sglang + - qwen3.5-bf16-mi325x-sglang + - qwen3.5-fp8-mi300x-sglang + - qwen3.5-fp8-mi325x-sglang + description: + - "Update cli args of Qwen3.5 FP8 and BF16 SGLang benchmarks for MI300X and MI325X to achieve better performance" + - "Use lmsysorg/sglang:v0.5.10rc0-rocm720-mi30x" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/986 +