Automatic Parallelism with LLM
Source https://github.com/NVIDIA/TensorRT-LLM/tree/main/examples/llm-api/llm_auto_parallel.py.
1### Automatic Parallelism with LLM
2from tensorrt_llm import LLM, SamplingParams
3
4
5def main():
6 llm = LLM(
7 model="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
8
9 # Enable auto parallelism
10 auto_parallel=True,
11 auto_parallel_world_size=2)
12
13 prompts = [
14 "Hello, my name is",
15 "The president of the United States is",
16 "The capital of France is",
17 "The future of AI is",
18 ]
19
20 sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
21
22 for output in llm.generate(prompts, sampling_params):
23 print(
24 f"Prompt: {output.prompt!r}, Generated text: {output.outputs[0].text!r}"
25 )
26
27 # Got output like
28 # Prompt: 'Hello, my name is', Generated text: '\n\nJane Smith. I am a student pursuing my degree in Computer Science at [university]. I enjoy learning new things, especially technology and programming'
29 # Prompt: 'The president of the United States is', Generated text: 'likely to nominate a new Supreme Court justice to fill the seat vacated by the death of Antonin Scalia. The Senate should vote to confirm the'
30 # Prompt: 'The capital of France is', Generated text: 'Paris.'
31 # Prompt: 'The future of AI is', Generated text: 'an exciting time for us. We are constantly researching, developing, and improving our platform to create the most advanced and efficient model available. We are'
32
33
34if __name__ == '__main__':
35 main()