LLM Inference Customize

Source https://github.com/NVIDIA/TensorRT-LLM/tree/main/examples/llm-api/llm_inference_customize.py.

 1### Generate text
 2import tempfile
 3
 4from tensorrt_llm.hlapi import LLM, BuildConfig, KvCacheConfig, SamplingParams
 5
 6# The end user can customize the build configuration with the build_config class and other arguments borrowed from the lower-level APIs
 7build_config = BuildConfig()
 8build_config.max_batch_size = 128
 9build_config.max_num_tokens = 2048
10
11build_config.max_beam_width = 4
12
13# Model could accept HF model name or a path to local HF model.
14
15llm = LLM(
16    model="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
17    build_config=build_config,
18    kv_cache_config=KvCacheConfig(
19        free_gpu_memory_fraction=0.8
20    ),  # Similar to `build_config`, you can also customize the runtime configuration with the `kv_cache_config`, `runtime_config`, `peft_cache_config` or \
21    # other arguments borrowed from the lower-level APIs.
22)
23
24# You can save the engine to disk and load it back later, the LLM class can accept either a HF model or a TRT-LLM engine.
25llm.save(tempfile.mkdtemp())
26
27# Sample prompts.
28prompts = [
29    "Hello, my name is",
30    "The president of the United States is",
31    "The capital of France is",
32    "The future of AI is",
33]
34
35# With SamplingParams, you can customize the sampling strategy, such as beam search, temperature, and so on.
36sampling_params = SamplingParams(temperature=0.8, top_p=0.95, beam_width=4)
37
38for output in llm.generate(prompts, sampling_params):
39    print(
40        f"Prompt: {output.prompt!r}, Generated text: {output.outputs[0].text!r}"
41    )
42
43# Got output like
44# Prompt: 'Hello, my name is', Generated text: '\n\nJane Smith. I am a student pursuing my degree in Computer Science at [university]. I enjoy learning new things, especially technology and programming'
45# Prompt: 'The president of the United States is', Generated text: 'likely to nominate a new Supreme Court justice to fill the seat vacated by the death of Antonin Scalia. The Senate should vote to confirm the'
46# Prompt: 'The capital of France is', Generated text: 'Paris.'
47# Prompt: 'The future of AI is', Generated text: 'an exciting time for us. We are constantly researching, developing, and improving our platform to create the most advanced and efficient model available. We are'