
    sg                        d Z ddlZddlZddlmZ ddlmZmZ ddlmZ ddl	m
Z
 ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ  ej6                         5   ej8                  d        ej:                  ded       ddd        eej@                  d      rddl!m"Z" ddl#m$Z$ ddl%m&Z& g dZ'y# 1 sw Y   3xY w)aW  
:mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list
of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the
optimizer locally on the workers where the parameters live.  The distributed
optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
apply the gradients on each worker.
    N)optim   )_apply_optimizer_in_backward_get_in_backward_optimizers)_FunctionalAdadelta)_FunctionalAdagrad)_FunctionalAdam)_FunctionalAdamax)_FunctionalAdamW)_FunctionalRMSprop)_FunctionalRprop)_FunctionalSGD)_NamedOptimizer)as_functional_optimalwaysz`TorchScript` support for functional optimizers is deprecated and will be removed in a future PyTorch release. Consider using the `torch.compile` optimizer instead.   )
stacklevel	_rpc_init)DistributedOptimizer)PostLocalSGDOptimizer)ZeroRedundancyOptimizer)r   r   r   r   )(__doc__warningstorchr   apply_optimizer_in_backwardr   r   functional_adadeltar   functional_adagradr   functional_adamr	   functional_adamaxr
   functional_adamwr   functional_rmspropr   functional_rpropr   functional_sgdr   named_optimizerr   utilsr   catch_warningssimplefilterwarnDeprecationWarninghasattr_C	optimizerr   post_localSGD_optimizerr   zero_redundancy_optimizerr   __all__     S/var/www/html/venv/lib/python3.12/site-packages/torch/distributed/optim/__init__.py<module>r3      s       5 2 , 0 . 2 . * , & X H(#HMM	@ 	 588[!/ : >' s   &(C  C	