runtime: trtllm
compile_backend: torch-cudagraph
attn_backend: trtllm
max_seq_len: 262144
max_num_tokens: 16000
max_batch_size: 256
cuda_graph_config:
  batch_sizes: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 64, 128, 256]
world_size: 8
enable_chunked_prefill: true
# For text-only mode, use AutoModelForCausalLM until issue #12699 is resolved
# Once issue #12699 is resolved, consider to unify the factory to Qwen3_5MoeForConditionGeneration for both VLM and text mode
# model_factory: Qwen3_5MoeForConditionalGeneration
model_factory: AutoModelForCausalLM
kv_cache_config:
  enable_block_reuse: false
  free_gpu_memory_fraction: 0.8
  tokens_per_block: 32
model_kwargs:
  torch_dtype: bfloat16
transforms:
  # disable for text only use case (AutoModelForCausalLM does not consume mrope_delta_cache)
  initialize_mrope_delta_cache:
    enabled: false
  export_to_gm:
    num_moe_experts_for_export: 2
  fuse_gemms_mixed_children:
    enabled: true
  fuse_nvfp4_moe:
    backend: trtllm_gen
  detect_sharding:
    # for long input, tp8ep1 gives better performance
    # dist_mapping: {moe_tp: 8, moe_ep: 1}
    allreduce_strategy: SYMM_MEM
    shard_all_unprocessed: true
    simple_shard_filter: "lm_head"
    sharding_dims: ['tp','ep', 'bmm']
    # use only manual config for TP sharding
    sharding_source: ['manual']
    manual_config:
      tp_plan:
        # GDN layer
        "in_proj_qkv": "delta"
        # attention layer
        "q_proj": "colwise"
        "k_proj": "colwise"
        "v_proj": "colwise"
        "o_proj": "rowwise"
        # lm_head: "gather" = column split + all_gather (not "colwise" which
        # requires a LayerSubgraph and crashes for standalone unprocessed nodes)
        "lm_head": "gather"
        # replicating shared experts (keep them commented out)
        # "shared_expert_gate_proj": "colwise"
        # "shared_expert_up_proj": "colwise"
        # "shared_expert_down_proj": "rowwise"
        # gating layer should be replicated as well
        # "gate": "gather"
  multi_stream_moe:
    stage: compile
    enabled: true
  multi_stream_gemm:
    stage: compile
    enabled: true
  gather_logits_before_lm_head:
    enabled: true
  compile_model:
    piecewise_enabled: true
