
    sg>                     p    d dl Z d dlZd dlmc mc mc mZ  G d dej                  j                        Z	y)    Nc                       e Zd ZdZdej
                  j                  dej                  fdZ	e
d        Zd Zd Zd Zd	 Zdd
efdZd Zy)PostLocalSGDOptimizera  
    Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD <https://arxiv.org/abs/1808.07217>`_,
    This optimizer runs local optimizer at every step.
    After the warm-up stage, it averages parameters periodically afer the local optimizer is applied.

    Args:
        optim: The local optimizer.
        averager: A model averager instance to run post-localSGD algorithm.

    Example::

        >>> # xdoctest: +SKIP("undefined variables")
        >>> import torch
        >>> import torch.distributed as dist
        >>> import torch.distributed.algorithms.model_averaging.averagers as averagers
        >>> import torch.nn as nn
        >>> from torch.distributed.optim import PostLocalSGDOptimizer
        >>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import (
        >>>   PostLocalSGDState,
        >>>   post_localSGD_hook,
        >>> )
        >>>
        >>> model = nn.parallel.DistributedDataParallel(
        >>>    module, device_ids=[rank], output_device=rank
        >>> )
        >>>
        >>> # Register a post-localSGD communication hook.
        >>> state = PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=100)
        >>> model.register_comm_hook(state, post_localSGD_hook)
        >>>
        >>> # Create a post-localSGD optimizer that wraps a local optimizer.
        >>> # Note that ``warmup_steps`` used in ``PostLocalSGDOptimizer`` must be the same as
        >>> # ``start_localSGD_iter`` used in ``PostLocalSGDState``.
        >>> local_optim = torch.optim.SGD(params=model.parameters(), lr=0.01)
        >>> opt = PostLocalSGDOptimizer(
        >>>     optim=local_optim,
        >>>     averager=averagers.PeriodicModelAverager(period=4, warmup_steps=100)
        >>> )
        >>>
        >>> # In the first 100 steps, DDP runs global gradient averaging at every step.
        >>> # After 100 steps, DDP runs gradient averaging within each subgroup (intra-node by default),
        >>> # and post-localSGD optimizer runs global model averaging every 4 steps after applying the local optimizer.
        >>> for step in range(0, 200):
        >>>    opt.zero_grad()
        >>>    loss = loss_fn(output, labels)
        >>>    loss.backward()
        >>>    opt.step()
    optimaveragerc                 V    || _         | j                   j                  | _        || _        y N)r   param_groupsr   )selfr   r   s      b/var/www/html/venv/lib/python3.12/site-packages/torch/distributed/optim/post_localSGD_optimizer.py__init__zPostLocalSGDOptimizer.__init__:   s"    
 JJ33     c                 .    | j                   j                  S r   )r   stater
   s    r   r   zPostLocalSGDOptimizer.state?   s    zzr   c                 6    | j                   j                         S r   )r   __repr__r   s    r   r   zPostLocalSGDOptimizer.__repr__C   s    zz""$$r   c                 l    | j                   j                         }| j                  j                  |d<   |S )z
        This is the same as :class:`torch.optim.Optimizer` :meth:`state_dict`,
        but adds an extra entry to record model averager's step to the checkpoint
        to ensure reload does not cause unnecessary warm up again.
        step)r   
state_dictr   r   )r
   optim_state_dicts     r   r   z PostLocalSGDOptimizer.state_dictF   s2      ::002#'==#5#5 r   c                     | j                   j                  |       d|v r|d   | j                  _        yt	        j
                  d       d| j                  _        y)aW  
        This is the same as :class:`torch.optim.Optimizer` :meth:`load_state_dict`,
        but also restores model averager's step value to the one
        saved in the provided ``state_dict``.

        If there is no ``"step"`` entry in ``state_dict``,
        it will raise a warning and initialize the model averager's step to 0.
        r   z]Loaded state dict does not contain a step counter for an averager. Setting step counter to 0.r   N)r   load_state_dictr   r   warningswarn)r
   r   s     r   r   z%PostLocalSGDOptimizer.load_state_dictP   sN     	

"":.Z!+F!3DMMMM- "#DMMr   c                     | j                   j                          | j                  j                  | j                         y)zI
        Performs a single optimization step (parameter update).
        )paramsN)r   r   r   average_parametersr	   r   s    r   r   zPostLocalSGDOptimizer.stepc   s-     	

((0A0A(Br   set_to_nonec                 <    | j                   j                  |       y )N)r   )r   	zero_grad)r
   r   s     r   r    zPostLocalSGDOptimizer.zero_gradj   s    

5r   c                 :    | j                   j                  |       y r   )r   add_param_group)r
   param_groups     r   r"   z%PostLocalSGDOptimizer.add_param_groupm   s    

"";/r   N)T)__name__
__module____qualname____doc__torchr   	Optimizer	averagersModelAveragerr   propertyr   r   r   r   r   boolr    r"    r   r   r   r      se    /b!ekk33 !y?V?V !
    % #&C6T 60r   r   )
r   r(   6torch.distributed.algorithms.model_averaging.averagersdistributed
algorithmsmodel_averagingr*   r   r)   r   r.   r   r   <module>r3      s)      J Jf0EKK11 f0r   