Generate text#

Source NVIDIA/TensorRT-LLM.

 1### Generate text
 2import tempfile
 3
 4from tensorrt_llm import SamplingParams
 5from tensorrt_llm._tensorrt_engine import LLM
 6
 7
 8def main():
 9
10    # Model could accept HF model name, a path to local HF model,
11    # or TensorRT Model Optimizer's quantized checkpoints like nvidia/Llama-3.1-8B-Instruct-FP8 on HF.
12    llm = LLM(model="TinyLlama/TinyLlama-1.1B-Chat-v1.0")
13
14    # You can save the engine to disk and load it back later, the LLM class can accept either a HF model or a TRT-LLM engine.
15    llm.save(tempfile.mkdtemp())
16
17    # Sample prompts.
18    prompts = [
19        "Hello, my name is",
20        "The president of the United States is",
21        "The capital of France is",
22        "The future of AI is",
23    ]
24
25    # Create a sampling params.
26    sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
27
28    for output in llm.generate(prompts, sampling_params):
29        print(
30            f"Prompt: {output.prompt!r}, Generated text: {output.outputs[0].text!r}"
31        )
32
33    # Got output like
34    # Prompt: 'Hello, my name is', Generated text: '\n\nJane Smith. I am a student pursuing my degree in Computer Science at [university]. I enjoy learning new things, especially technology and programming'
35    # Prompt: 'The president of the United States is', Generated text: 'likely to nominate a new Supreme Court justice to fill the seat vacated by the death of Antonin Scalia. The Senate should vote to confirm the'
36    # Prompt: 'The capital of France is', Generated text: 'Paris.'
37    # Prompt: 'The future of AI is', Generated text: 'an exciting time for us. We are constantly researching, developing, and improving our platform to create the most advanced and efficient model available. We are'
38
39
40if __name__ == '__main__':
41    main()