Basic usage example

  1# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
  2# Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
  3# SPDX-License-Identifier: Apache-2.0
  4#
  5# Licensed under the Apache License, Version 2.0 (the "License");
  6# you may not use this file except in compliance with the License.
  7# You may obtain a copy of the License at
  8#
  9# http://www.apache.org/licenses/LICENSE-2.0
 10#
 11# Unless required by applicable law or agreed to in writing, software
 12# distributed under the License is distributed on an "AS IS" BASIS,
 13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 14# See the License for the specific language governing permissions and
 15# limitations under the License.
 16
 17
 18# This example demonstrates how to integrate ``inprocess.Wrapper()`` into an
 19# existing PyTorch training codebase.
 20#
 21# In this case, the entire ``main()`` function is wrapped. While all features
 22# of ``inprocess.Wrapper()`` are available and active, the Wrapper is
 23# configured to restart the entire application upon any failure. Consequently,
 24# the application state is not preserved between restarts and the entire
 25# ``main()`` is relaunched, leading to less efficient recovery from failures.
 26#
 27# NOTE: inprocess.Wrapper is not fully compatible with modern
 28# ``torch.distributed.run``, because it automatically terminates all local
 29# workers upon any local worker process failure; in this case inprocess.Wrapper
 30# can only recover from transient faults that don't terminate any of the
 31# training processes
 32
 33import argparse
 34import datetime
 35import logging
 36import os
 37import pathlib
 38import random
 39import time
 40from typing import Optional
 41
 42os.environ['TORCH_CPP_LOG_LEVEL'] = 'error'
 43import torch
 44
 45import nvidia_resiliency_ext.inprocess as inprocess
 46
 47raise_timestamp = None
 48
 49
 50def parse_args():
 51    parser = argparse.ArgumentParser(
 52        description='Inprocess Restart Basic Example',
 53        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
 54    )
 55
 56    parser.add_argument(
 57        '--size',
 58        default=64,
 59        type=int,
 60        help='model hidden size',
 61    )
 62    parser.add_argument(
 63        '--layers',
 64        default=4,
 65        type=int,
 66        help='number of layers',
 67    )
 68    parser.add_argument(
 69        '--log-interval',
 70        default=100,
 71        type=int,
 72        help='logging interval',
 73    )
 74    parser.add_argument(
 75        '--chkpt-interval',
 76        default=100,
 77        type=int,
 78        help='checkpointing interval',
 79    )
 80    parser.add_argument(
 81        '--total-iterations',
 82        default=1000000,
 83        type=int,
 84        help='total training iterations',
 85    )
 86    parser.add_argument(
 87        '--seed',
 88        default=None,
 89        type=int,
 90        help='random seed, time-based if None',
 91    )
 92    parser.add_argument(
 93        '--path',
 94        default='/tmp/',
 95        type=str,
 96        help='directory for the checkpoint file',
 97    )
 98    parser.add_argument(
 99        '--fault-prob',
100        default=0.001,
101        type=float,
102        help='fault injection probability',
103    )
104    parser.add_argument(
105        '--device',
106        default='cpu',
107        choices=['cpu', 'cuda'],
108        help='device',
109    )
110    parser.add_argument(
111        '--log-level',
112        type=lambda s: logging._nameToLevel[s.upper()],
113        default=logging.INFO,
114        help='logging level',
115    )
116
117    return parser.parse_args()
118
119
120# TCPStore created by the Wrapper uses ``(MASTER_PORT + 1)`` port for the
121# internal Wrapper's TCPStore to avoid conflicts with application's TCPStore
122# listening on ``(MASTER_PORT + 2 + iteration)``, and with TCPStore created by
123# ``torch.distributed.run`` listening on ``MASTER_PORT``.
124#
125# An instance of ``inprocess.CallWrapper` is automatically injected into
126# wrapped function arguments when Wrapper is invoked.
127@inprocess.Wrapper(
128    store_kwargs={'port': int(os.getenv('MASTER_PORT', 29500)) + 1},
129    health_check=inprocess.health_check.CudaHealthCheck(),
130)
131def main(call_wrapper: Optional[inprocess.CallWrapper] = None):
132    global raise_timestamp
133    if raise_timestamp is not None:
134        restart_latency = time.perf_counter() - raise_timestamp
135        logging.info(f'restart latency: {restart_latency:.3f}s')
136    raise_timestamp = None
137
138    args = parse_args()
139    logging.info(f'{args}')
140
141    log_interval = args.log_interval
142    chkpt_interval = args.chkpt_interval
143
144    rank = int(os.environ['RANK'])
145    local_rank = int(os.environ['LOCAL_RANK'])
146    world_size = int(os.environ['WORLD_SIZE'])
147
148    if args.device == 'cuda':
149        torch.cuda.set_device(local_rank)
150        device = torch.device('cuda')
151        backend = 'nccl'
152        timeout = datetime.timedelta(seconds=150)
153    elif args.device == 'cpu':
154        device = torch.device('cpu')
155        backend = 'gloo'
156        timeout = datetime.timedelta(seconds=10)
157    else:
158        raise RuntimeError
159
160    if args.seed is not None:
161        torch.manual_seed(args.seed)
162    model = torch.nn.Sequential(
163        *[torch.nn.Linear(args.size, args.size) for _ in range(args.layers)]
164    ).to(device)
165    opt = torch.optim.Adam(model.parameters(), lr=1e-5)
166
167    # Application's TCPStore uses ``(MASTER_PORT + 2 + iteration)`` to avoid
168    # conflicts with a TCPStore created by ``torch.distributed.run``,
169    # inprocess.Wrapper and application's TCPStores created in previous restart
170    # iterations.
171    store = torch.distributed.TCPStore(
172        host_name=os.environ['MASTER_ADDR'],
173        port=int(os.environ['MASTER_PORT']) + 2 + call_wrapper.iteration,
174        world_size=int(os.environ['WORLD_SIZE']),
175        is_master=int(os.environ['RANK']) == 0,
176        multi_tenant=True,
177        wait_for_workers=True,
178        use_libuv=True,
179    )
180
181    torch.distributed.init_process_group(
182        backend=backend,
183        store=store,
184        rank=int(os.environ['RANK']),
185        world_size=int(os.environ['WORLD_SIZE']),
186        timeout=timeout,
187    )
188    model_ddp = torch.nn.parallel.DistributedDataParallel(model)
189
190    iteration = 0
191    loss = torch.tensor(float('nan'))
192    checkpoint_path = pathlib.Path(args.path) / 'checkpoint.pt'
193
194    # Application loads state from the latest checkpoint on every restart
195    # iteration of the wrapped function.
196    if checkpoint_path.exists():
197        checkpoint = torch.load(checkpoint_path)
198        model.load_state_dict(checkpoint['model'])
199        opt.load_state_dict(checkpoint['opt'])
200        torch.set_rng_state(checkpoint['rng'])
201        iteration = checkpoint['iteration']
202
203    if args.seed is not None:
204        random.seed(args.seed + iteration * world_size + rank)
205    else:
206        random.seed(time.perf_counter_ns())
207
208    for iteration in range(iteration, args.total_iterations):
209
210        # Application periodically saves a checkpoint. The checkpoint allows
211        # the application to continue from previous state after a restart.
212        if iteration % chkpt_interval == chkpt_interval - 1:
213            torch.distributed.barrier()
214            if rank == 0:
215                checkpoint = {
216                    'model': model.state_dict(),
217                    'opt': opt.state_dict(),
218                    'rng': torch.get_rng_state(),
219                    'iteration': iteration,
220                }
221                # Saving the checkpoint is performed within atomic() context
222                # manager to ensure that the main thread won't execute
223                # torch.save while a restart procedure is in progress.
224                with call_wrapper.atomic():
225                    torch.save(checkpoint, checkpoint_path)
226
227        # Randomly trigger an example fault
228        if random.random() < args.fault_prob:
229            raise_timestamp = time.perf_counter()
230            raise RuntimeError(f'example fault at {iteration=} from {rank=}')
231
232        inp = torch.rand(args.size, args.size).to(device)
233        model.zero_grad()
234        out = model_ddp(inp)
235        loss = out.square().mean()
236        loss.backward()
237        opt.step()
238        loss.item()
239
240        if rank == 0 and iteration % log_interval == log_interval - 1:
241            logging.info(f'{rank=} {iteration=} {loss.item()=}')
242
243
244if __name__ == '__main__':
245    # ``inprocess.Wrapper`` uses logging library to output messages. In this
246    # example the Wrapper is applied to ``main()``, therefore logging needs to
247    # be initialized and configured before the Wrapper is launched.
248    args = parse_args()
249    logging.basicConfig(
250        format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
251        level=args.log_level,
252    )
253    main()