Generate text in streaming#

Source NVIDIA/TensorRT-LLM.

 1import asyncio
 2
 3from tensorrt_llm import LLM, SamplingParams
 4
 5
 6def main():
 7
 8    # model could accept HF model name or a path to local HF model.
 9    llm = LLM(model="TinyLlama/TinyLlama-1.1B-Chat-v1.0")
10
11    # Sample prompts.
12    prompts = [
13        "Hello, my name is",
14        "The president of the United States is",
15        "The capital of France is",
16        "The future of AI is",
17    ]
18
19    # Create a sampling params.
20    sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
21
22    # Async based on Python coroutines
23    async def task(id: int, prompt: str):
24
25        # streaming=True is used to enable streaming generation.
26        async for output in llm.generate_async(prompt,
27                                               sampling_params,
28                                               streaming=True):
29            print(f"Generation for prompt-{id}: {output.outputs[0].text!r}")
30
31    async def main():
32        tasks = [task(id, prompt) for id, prompt in enumerate(prompts)]
33        await asyncio.gather(*tasks)
34
35    asyncio.run(main())
36
37    # Got output like follows:
38    # Generation for prompt-0: '\n'
39    # Generation for prompt-3: 'an'
40    # Generation for prompt-2: 'Paris'
41    # Generation for prompt-1: 'likely'
42    # Generation for prompt-0: '\n\n'
43    # Generation for prompt-3: 'an exc'
44    # Generation for prompt-2: 'Paris.'
45    # Generation for prompt-1: 'likely to'
46    # Generation for prompt-0: '\n\nJ'
47    # Generation for prompt-3: 'an exciting'
48    # Generation for prompt-2: 'Paris.'
49    # Generation for prompt-1: 'likely to nomin'
50    # Generation for prompt-0: '\n\nJane'
51    # Generation for prompt-3: 'an exciting time'
52    # Generation for prompt-1: 'likely to nominate'
53    # Generation for prompt-0: '\n\nJane Smith'
54    # Generation for prompt-3: 'an exciting time for'
55    # Generation for prompt-1: 'likely to nominate a'
56    # Generation for prompt-0: '\n\nJane Smith.'
57    # Generation for prompt-3: 'an exciting time for us'
58    # Generation for prompt-1: 'likely to nominate a new'
59
60
61if __name__ == '__main__':
62    main()