diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index 72ef89c09..584fbcab9 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -186,7 +186,7 @@ qwen3.5-fp8-mi325x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } qwen3.5-fp8-mi355x-sglang: - image: rocm/sgl-dev:v0.5.8.post1-rocm720-mi35x-20260218 + image: lmsysorg/sglang-rocm:v0.5.10rc0-rocm720-mi35x-20260330 model: Qwen/Qwen3.5-397B-A17B-FP8 model-prefix: qwen3.5 runner: mi355x @@ -197,11 +197,15 @@ qwen3.5-fp8-mi355x-sglang: - isl: 1024 osl: 1024 search-space: - - { tp: 8, conc-start: 4, conc-end: 64 } + - { tp: 2, conc-start: 8, conc-end: 128 } + - { tp: 4, conc-start: 4, conc-end: 32 } + - { tp: 8, conc-start: 4, conc-end: 4 } - isl: 8192 osl: 1024 search-space: - - { tp: 8, conc-start: 4, conc-end: 64 } + - { tp: 2, conc-start: 8, conc-end: 128 } + - { tp: 4, conc-start: 4, conc-end: 16 } + - { tp: 8, conc-start: 4, conc-end: 4 } qwen3.5-fp8-mi300x-sglang: image: lmsysorg/sglang:v0.5.9-rocm720-mi30x diff --git a/benchmarks/single_node/qwen3.5_fp8_mi355x.sh b/benchmarks/single_node/qwen3.5_fp8_mi355x.sh old mode 100644 new mode 100755 index 701695def..6f5565784 --- a/benchmarks/single_node/qwen3.5_fp8_mi355x.sh +++ b/benchmarks/single_node/qwen3.5_fp8_mi355x.sh @@ -19,29 +19,42 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +MEM_FRAC_STATIC=${MEM_FRAC_STATIC:-0.8} +CHUNK_SIZE=32768 EVAL_CONTEXT_ARGS="" -if [ "${EVAL_ONLY}" = "true" ]; then - setup_eval_context - EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" -fi +#if [ "${EVAL_ONLY}" = "true" ]; then +# setup_eval_context +# EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" +#fi # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor -python3 -m sglang.launch_server \ +set -x +sglang serve \ --attention-backend triton \ --model-path $MODEL \ --host=0.0.0.0 \ --port $PORT \ --tensor-parallel-size $TP \ --trust-remote-code \ - --mem-fraction-static 0.8 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + --mem-fraction-static $MEM_FRAC_STATIC \ + --kv-cache-dtype fp8_e4m3 \ + --cuda-graph-max-bs $CONC \ + --max-running-requests $CONC \ + --chunked-prefill-size $CHUNK_SIZE \ + --max-prefill-tokens $CHUNK_SIZE \ + --disable-radix-cache \ + --num-continuous-decode-steps 2 \ + $EVAL_CONTEXT_ARGS \ + > $SERVER_LOG 2>&1 & SERVER_PID=$! # Wait for server to be ready wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" +export PYTHONDONTWRITEBYTECODE=1 run_benchmark_serving \ --model "$MODEL" \ --port "$PORT" \ @@ -52,7 +65,8 @@ run_benchmark_serving \ --num-prompts "$((CONC * 10))" \ --max-concurrency "$CONC" \ --result-filename "$RESULT_FILENAME" \ - --result-dir /workspace/ + --result-dir /workspace/ \ + --trust-remote-code # After throughput, run evaluation only if RUN_EVAL is true if [ "${RUN_EVAL}" = "true" ]; then diff --git a/perf-changelog.yaml b/perf-changelog.yaml index a82882f61..5723d9801 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1235,3 +1235,10 @@ - "New model support on ATOM framework" - "Kimi-K2.5 FP4, and MiniMax-M2.5 FP8 configs added for MI355X ATOM" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/963 + +- config-keys: + - qwen3.5-fp8-mi355x-sglang + description: + - "Qwen3.5 fp8 mi355x performance update" + - "Relevant Issue: https://github.com/sgl-project/sglang/issues/19633" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/995