runtime: trtllm
compile_backend: torch-cudagraph
attn_backend: trtllm
max_seq_len: 262144
max_num_tokens: 8192
max_batch_size: 32
cuda_graph_config:
  batch_sizes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
enable_chunked_prefill: true
model_factory: Qwen3_5MoeForConditionalGeneration
kv_cache_config:
  enable_block_reuse: false
  free_gpu_memory_fraction: 0.8
  tokens_per_block: 32
model_kwargs:
  torch_dtype: bfloat16
transforms:
  initialize_mrope_delta_cache:
    enabled: true
  export_to_gm:
    num_moe_experts_for_export: 2
  fuse_gemms_mixed_children:
    enabled: true
  detect_sharding:
    allreduce_strategy: SYMM_MEM
    shard_all_unprocessed: true
    simple_shard_filter: "lm_head"
    sharding_dims: ['tp','ep', 'bmm']
    # use only manual config for TP sharding
    sharding_source: ['manual']
    manual_config:
      tp_plan:
        # GDN layer
        "in_proj_qkv": "delta"
        # attention layer
        "q_proj": "colwise"
        "k_proj": "colwise"
        "v_proj": "colwise"
        "o_proj": "rowwise"
        # replicating shared experts (keep them commented out)
        # "shared_expert_gate_proj": "colwise"
        # "shared_expert_up_proj": "colwise"
        # "shared_expert_down_proj": "rowwise"
        # gating layer should be replicated as well
        # "gate": "gather"
  multi_stream_moe:
    stage: compile
    enabled: true
  multi_stream_gemm:
    stage: compile
    enabled: true
  gather_logits_before_lm_head:
    enabled: true
  compile_model:
    piecewise_enabled: true
