
    Pg                     J   d dl Z d dlZd dlZd dlZd dlmZ d dlmZmZm	Z	m
Z
 d dlmZ d dlZd dlmZ d dlmZ d dlmc mZ d dlmZ d dlmZ g dZ ej2                         Z G d d	      ZeZ G d
 d      Z G d dej<                  ee      Z G d de       Z! G d dej<                  eee!      Z" G d de"      Z#d Z$d Z% G d de#      Z&d-dZ'd Z(d.dZ)d Z* e)d d      Z+ e)d  d!e("      Z, e)d# d$d%&      Z- e)d' d(      Z. e'd) d* d!      Z/ G d+ d,e#      Z0y)/    N)OrderedDict)AnyListOptionalTuple)
deprecated)
_functions)custom_function_call)FunctionCtxBackwardCFunctionFunctionMetaFunctiononce_differentiableInplaceFunctionNestedIOFunctionc                       e Zd Zdej                  fdZdej                  fdZdej                  fdZ ede	      d        Z
dej                  fd	Zd
efdZy)r   tensorsc                     || _         y)a  Save given tensors for a future call to :func:`~Function.backward`.

        ``save_for_backward`` should be called at most once, in either the
        :func:`setup_context` or :func:`forward` methods, and only with tensors.

        All tensors intended to be used in the backward pass should be saved
        with ``save_for_backward`` (as opposed to directly on ``ctx``) to prevent
        incorrect gradients and memory leaks, and enable the application of saved
        tensor hooks. See :class:`torch.autograd.graph.saved_tensors_hooks`.

        Note that if intermediary tensors, tensors that are neither inputs
        nor outputs of :func:`forward`, are saved for backward, your custom Function
        may not support double backward.
        Custom Functions that do not support double backward should decorate their
        :func:`backward` method with ``@once_differentiable`` so that performing
        double backward raises an error. If you'd like to support double backward,
        you can either recompute intermediaries based on the inputs during backward
        or return the intermediaries as the outputs of the custom Function. See the
        `double backward tutorial <https://pytorch.org/tutorials/intermediate/custom_function_double_backward_tutorial.html>`_
        for more details.

        In :func:`backward`, saved tensors can be accessed through the :attr:`saved_tensors`
        attribute. Before returning them to the user, a check is made to ensure
        they weren't used in any in-place operation that modified their content.

        Arguments can also be ``None``. This is a no-op.

        See :ref:`extending-autograd` for more details on how to use this method.

        Example::
            >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
            >>> class Func(Function):
            >>>     @staticmethod
            >>>     def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
            >>>         w = x * z
            >>>         out = x * y + y * z + w * y
            >>>         ctx.save_for_backward(x, y, w, out)
            >>>         ctx.z = z  # z is not a tensor
            >>>         return out
            >>>
            >>>     @staticmethod
            >>>     @once_differentiable
            >>>     def backward(ctx, grad_out):
            >>>         x, y, w, out = ctx.saved_tensors
            >>>         z = ctx.z
            >>>         gx = grad_out * (y + y * z)
            >>>         gy = grad_out * (x + z + w)
            >>>         gz = None
            >>>         return gx, gy, gz
            >>>
            >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double)
            >>> b = torch.tensor(2., requires_grad=True, dtype=torch.double)
            >>> c = 4
            >>> d = Func.apply(a, b, c)

        N)to_save)selfr   s     ^/var/www/html/suriana-translation/venv/lib/python3.12/site-packages/torch/autograd/function.pysave_for_backwardzFunctionCtx.save_for_backward#   s    r     c                 h    |D ]&  }t        |t        j                        r|!J d        || _        y)a  Save given tensors for a future call to :func:`~Function.jvp`.

        ``save_for_forward`` should be called at most once, in either the
        :func:`setup_context` or :func:`forward` methods, and all arguments
        should be tensors.

        In :func:`jvp`, saved objects can be accessed through the :attr:`saved_tensors`
        attribute.

        Arguments can also be ``None``. This is a no-op.

        See :ref:`extending-autograd` for more details on how to use this method.

        Example::
            >>> # xdoctest: +SKIP
            >>> class Func(torch.autograd.Function):
            >>>     @staticmethod
            >>>     def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
            >>>         ctx.save_for_backward(x, y)
            >>>         ctx.save_for_forward(x, y)
            >>>         ctx.z = z
            >>>         return x * y * z
            >>>
            >>>     @staticmethod
            >>>     def jvp(ctx, x_t, y_t, _):
            >>>         x, y = ctx.saved_tensors
            >>>         z = ctx.z
            >>>         return z * (y * x_t + x * y_t)
            >>>
            >>>     @staticmethod
            >>>     def vjp(ctx, grad_out):
            >>>         x, y = ctx.saved_tensors
            >>>         z = ctx.z
            >>>         return z * grad_out * y, z * grad_out * x, None
            >>>
            >>>     a = torch.tensor(1., requires_grad=True, dtype=torch.double)
            >>>     t = torch.tensor(1., dtype=torch.double)
            >>>     b = torch.tensor(2., requires_grad=True, dtype=torch.double)
            >>>     c = 4
            >>>
            >>>     with fwAD.dual_level():
            >>>         a_dual = fwAD.make_dual(a, t)
            >>>         d = Func.apply(a_dual, b, c)

        Nzgsave_for_forward expects all arguments to be tensors; you should save non-tensors as attributes on ctx.)
isinstancetorchTensorsaved_for_forward)r   r   tensors      r   save_for_forwardzFunctionCtx.save_for_forward^   sB    \  	Ffell3v~ 9E	 ")r   argsc                     || _         y)a  Mark given tensors as modified in an in-place operation.

        This should be called at most once, in either the :func:`setup_context`
        or :func:`forward` methods, and all arguments should be inputs.

        Every tensor that's been modified in-place in a call to :func:`forward`
        should be given to this function, to ensure correctness of our checks.
        It doesn't matter whether the function is called before or after
        modification.

        Examples::
            >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
            >>> class Inplace(Function):
            >>>     @staticmethod
            >>>     def forward(ctx, x):
            >>>         x_npy = x.numpy() # x_npy shares storage with x
            >>>         x_npy += 1
            >>>         ctx.mark_dirty(x)
            >>>         return x
            >>>
            >>>     @staticmethod
            >>>     @once_differentiable
            >>>     def backward(ctx, grad_output):
            >>>         return grad_output
            >>>
            >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double).clone()
            >>> b = a * a
            >>> Inplace.apply(a)  # This would lead to wrong gradients!
            >>>                   # but the engine would not know unless we mark_dirty
            >>> # xdoctest: +SKIP
            >>> b.backward() # RuntimeError: one of the variables needed for gradient
            >>>              # computation has been modified by an inplace operation

        N)dirty_tensorsr   r!   s     r   
mark_dirtyzFunctionCtx.mark_dirty   s    F "r   z`mark_shared_storage` is deprecated. Tensors with shared storages are automatically tracked. Note that calls to `set_()` are not tracked)categoryc                      y N )r   pairss     r   mark_shared_storagezFunctionCtx.mark_shared_storage   s     	r   c                     || _         y)a  Mark outputs as non-differentiable.

        This should be called at most once, in either the :func:`setup_context`
        or :func:`forward` methods, and all arguments should be tensor outputs.

        This will mark outputs as not requiring gradients, increasing the
        efficiency of backward computation. You still need to accept a gradient
        for each output in :meth:`~Function.backward`, but it's always going to
        be a zero tensor with the same shape as the shape of a corresponding
        output.

        This is used e.g. for indices returned from a sort. See example::
            >>> class Func(Function):
            >>>     @staticmethod
            >>>     def forward(ctx, x):
            >>>         sorted, idx = x.sort()
            >>>         ctx.mark_non_differentiable(idx)
            >>>         ctx.save_for_backward(x, idx)
            >>>         return sorted, idx
            >>>
            >>>     @staticmethod
            >>>     @once_differentiable
            >>>     def backward(ctx, g1, g2):  # still need to accept g2
            >>>         x, idx = ctx.saved_tensors
            >>>         grad_input = torch.zeros_like(x)
            >>>         grad_input.index_add_(0, idx, g1)
            >>>         return grad_input

        N)non_differentiabler$   s     r   mark_non_differentiablez#FunctionCtx.mark_non_differentiable   s    < #'r   valuec                     || _         y)a  Set whether to materialize grad tensors. Default is ``True``.

        This should be called only from either the :func:`setup_context` or
        :func:`forward` methods.

        If ``True``, undefined grad tensors will be expanded to tensors full of zeros
        prior to calling the :func:`backward` and :func:`jvp` methods.

        Example::
            >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
            >>> class SimpleFunc(Function):
            >>>     @staticmethod
            >>>     def forward(ctx, x):
            >>>         return x.clone(), x.clone()
            >>>
            >>>     @staticmethod
            >>>     @once_differentiable
            >>>     def backward(ctx, g1, g2):
            >>>         return g1 + g2  # No check for None necessary
            >>>
            >>> # We modify SimpleFunc to handle non-materialized grad outputs
            >>> class Func(Function):
            >>>     @staticmethod
            >>>     def forward(ctx, x):
            >>>         ctx.set_materialize_grads(False)
            >>>         ctx.save_for_backward(x)
            >>>         return x.clone(), x.clone()
            >>>
            >>>     @staticmethod
            >>>     @once_differentiable
            >>>     def backward(ctx, g1, g2):
            >>>         x, = ctx.saved_tensors
            >>>         grad_input = torch.zeros_like(x)
            >>>         if g1 is not None:  # We must check for None now
            >>>             grad_input += g1
            >>>         if g2 is not None:
            >>>             grad_input += g2
            >>>         return grad_input
            >>>
            >>> a = torch.tensor(1., requires_grad=True)
            >>> b, _ = Func.apply(a)  # induces g2 to be undefined

        N)materialize_grads)r   r/   s     r   set_materialize_gradsz!FunctionCtx.set_materialize_grads   s    X "'r   N)__name__
__module____qualname__r   r   r   r    r%   r   FutureWarningr+   r.   boolr2   r)   r   r   r   r   "   su    9%,, 9v4) 4)l#" #"J 	6 	'U\\ '@,'4 ,'r   r   c                       e Zd Zed        Zy)
_HookMixinc                 j    | 
t               } t        j                  |       }|| |j                  <   | |fS r(   )r   hooksRemovableHandleid)backward_hookshookhandles      r   _register_hookz_HookMixin._register_hook  s8    !(]N&&~6$(vyy!v%%r   N)r3   r4   r5   staticmethodrA   r)   r   r   r9   r9     s    & &r   r9   c                   "    e Zd ZdZd Zd Zd Zy)r   zD
    This class is used for internal autograd work. Do not use.
    c                     | j                   j                  }| j                   j                  }|t        j                  ur|t        j                  urt	        d      |t        j                  ur|n|} || g| S )zP
        Apply method used when executing this Node during the backward
        zsImplementing both 'backward' and 'vjp' for a custom Function is not allowed. You should only implement one of them.)_forward_clsbackwardvjpr   RuntimeError)r   r!   backward_fnvjp_fnuser_fns        r   applyzBackwardCFunction.apply$  su     ''00""&&h///F(,,4N 
 #(,,6&Kt#d##r   c                 <     | j                   j                  | g| S )zU
        Apply method used when executing forward mode AD during the forward
        )rE   jvpr$   s     r   	apply_jvpzBackwardCFunction.apply_jvp5  s"    
 %t  $$T1D11r   c                 8    | j                   j                  |       S r(   )rE   _compiled_autograd_key)r   s    r   rQ   z(BackwardCFunction._compiled_autograd_key<  s      77==r   N)r3   r4   r5   __doc__rL   rO   rQ   r)   r   r   r   r     s    $"2>r   r   c                   "     e Zd ZdZ fdZ xZS )r   a   Function metaclass.

    This metaclass sets up the following properties:
        _backward_cls: The Function class corresponding to the differentiated
            version of this function (which is generated on the fly by this
            metaclass).
    c                     t        |dz   t        fd| i      }t        t              |_        |j                  dd      |_        || _        t        | %  |||       y )NBackwardrE   _compiled_autograd_should_liftT)
typer   nextAUTOGRAD_FUNCTION_COUNTER_autograd_function_idgetrV   _backward_clssuper__init__)clsnamebasesattrsrI   	__class__s        r   r^   zFunctionMeta.__init__I  sc    : 13nc5J
 -11J,K)5:YY,d6
2 (ue,r   r3   r4   r5   rR   r^   __classcell__rc   s   @r   r   r   @  s    
- 
-r   r   c            	           e Zd Zedededefd       Zededeedf   dedefd	       Zeded
edefd       ZeZ	edededefd       Z
y)_SingleLevelFunctionr!   kwargsreturnc                      t        d      )a  Define the forward of the custom autograd Function.

        This function is to be overridden by all subclasses.
        There are two ways to define forward:

        Usage 1 (Combined forward and ctx)::

            @staticmethod
            def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
                pass

        - It must accept a context ctx as the first argument, followed by any
          number of arguments (tensors or other types).
        - See :ref:`combining-forward-context` for more details

        Usage 2 (Separate forward and ctx)::

            @staticmethod
            def forward(*args: Any, **kwargs: Any) -> Any:
                pass

            @staticmethod
            def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
                pass

        - The forward no longer accepts a ctx argument.
        - Instead, you must also override the :meth:`torch.autograd.Function.setup_context`
          staticmethod to handle setting up the ``ctx`` object.
          ``output`` is the output of the forward, ``inputs`` are a Tuple of inputs
          to the forward.
        - See :ref:`extending-autograd` for more details

        The context can be used to store arbitrary data that can be then
        retrieved during the backward pass. Tensors should not be stored
        directly on `ctx` (though this is not currently enforced for
        backward compatibility). Instead, tensors should be saved either with
        :func:`ctx.save_for_backward` if they are intended to be used in
        ``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward`
        if they are intended to be used for in ``jvp``.
        zEYou must implement the forward function for custom autograd.Function.NotImplementedError)r!   ri   s     r   forwardz_SingleLevelFunction.forwardY  s    T "S
 	
r   ctxinputs.outputc                     t        d      )a}  There are two ways to define the forward pass of an autograd.Function.

        Either:

        1. Override forward with the signature ``forward(ctx, *args, **kwargs)``.
           ``setup_context`` is not overridden. Setting up the ctx for backward
           happens inside the ``forward``.
        2. Override forward with the signature ``forward(*args, **kwargs)`` and
           override ``setup_context``. Setting up the ctx for backward happens
           inside ``setup_context`` (as opposed to inside the ``forward``)

        See :meth:`torch.autograd.Function.forward` and :ref:`extending-autograd` for more details.
        z!setup_context is not implemented.rl   )ro   rp   rq   s      r   setup_contextz"_SingleLevelFunction.setup_context  s     ""EFFr   grad_outputsc                     t        d      )a  Define a formula for differentiating the operation with backward mode automatic differentiation.

        This function is to be overridden by all subclasses.
        (Defining this function is equivalent to defining the ``vjp`` function.)

        It must accept a context :attr:`ctx` as the first argument, followed by
        as many outputs as the :func:`forward` returned (None will be passed in
        for non tensor outputs of the forward function),
        and it should return as many tensors, as there were inputs to
        :func:`forward`. Each argument is the gradient w.r.t the given output,
        and each returned value should be the gradient w.r.t. the
        corresponding input. If an input is not a Tensor or is a Tensor not
        requiring grads, you can just pass None as a gradient for that input.

        The context can be used to retrieve tensors saved during the forward
        pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple
        of booleans representing whether each input needs gradient. E.g.,
        :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the
        first input to :func:`forward` needs gradient computed w.r.t. the
        output.
        zwYou must implement either the backward or vjp method for your custom autograd.Function to use it with backward mode AD.rl   )ro   rt   s     r   rF   z_SingleLevelFunction.backward  s    . "
 	
r   grad_inputsc                     t        d      )a{  Define a formula for differentiating the operation with forward mode automatic differentiation.

        This function is to be overridden by all subclasses.
        It must accept a context :attr:`ctx` as the first argument, followed by
        as many inputs as the :func:`forward` got (None will be passed in
        for non tensor inputs of the forward function),
        and it should return as many tensors as there were outputs to
        :func:`forward`. Each argument is the gradient w.r.t the given input,
        and each returned value should be the gradient w.r.t. the
        corresponding output. If an output is not a Tensor or the function is not
        differentiable with respect to that output, you can just pass None as a
        gradient for that input.

        You can use the :attr:`ctx` object to pass any value from the forward to this
        functions.
        z`You must implement the jvp function for custom autograd.Function to use it with forward mode AD.rl   )ro   rv   s     r   rN   z_SingleLevelFunction.jvp  s    $ "@
 	
r   N)r3   r4   r5   rB   r   rn   r   rs   rF   rG   rN   r)   r   r   rh   rh   V  s     +
s +
c +
c +
 +
Z G3 Gc3h G G G G  
c 
# 
# 
 
: C
 
C 
C 
 
r   rh   )	metaclassc                   ^     e Zd ZdZd Zd Z	 dZed        Ze	 fd       Z
ed        Z xZS )r   a  Base class to create custom `autograd.Function`.

    To create a custom `autograd.Function`, subclass this class and implement
    the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom
    op in the forward pass, call the class method ``apply``. Do not call
    :meth:`forward` directly.

    To ensure correctness and best performance, make sure you are calling the
    correct methods on ``ctx`` and validating your backward function using
    :func:`torch.autograd.gradcheck`.

    See :ref:`extending-autograd` for more details on how to use this class.

    Examples::

        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD)
        >>> class Exp(Function):
        >>>     @staticmethod
        >>>     def forward(ctx, i):
        >>>         result = i.exp()
        >>>         ctx.save_for_backward(result)
        >>>         return result
        >>>
        >>>     @staticmethod
        >>>     def backward(ctx, grad_output):
        >>>         result, = ctx.saved_tensors
        >>>         return grad_output * result
        >>>
        >>> # Use it by calling the apply method:
        >>> # xdoctest: +SKIP
        >>> output = Exp.apply(input)
    c                 V    t        j                  | j                   dt        d       y )Nz should not be instantiated. Methods on autograd functions are all static, so you should invoke them on the class itself. Instantiating an autograd function will raise an error in a future version of PyTorch.   )
stacklevel)warningswarnrc   DeprecationWarningr   r!   ri   s      r   r^   zFunction.__init__  s*    ~~ 4 4 	
r   c                     t        d      )NzLegacy autograd function with non-static forward method is deprecated. Please use new-style autograd function with static forward method. (Example: https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function))rH   r   s      r   __call__zFunction.__call__  s    _
 	
r   Fc                     t        d      )a{  Define the behavior for this autograd.Function underneath :func:`torch.vmap`.

        For a :func:`torch.autograd.Function` to support
        :func:`torch.vmap`, you must either override this static method, or set
        ``generate_vmap_rule`` to ``True`` (you may not do both).

        If you choose to override this staticmethod: it must accept

        - an ``info`` object as the first argument. ``info.batch_size``
          specifies the size of the dimension being vmapped over,
          while ``info.randomness`` is the randomness option passed to
          :func:`torch.vmap`.
        - an ``in_dims`` tuple as the second argument.
          For each arg in ``args``, ``in_dims`` has a corresponding
          ``Optional[int]``. It is ``None`` if the arg is not a Tensor or if
          the arg is not being vmapped over, otherwise, it is an integer
          specifying what dimension of the Tensor is being vmapped over.
        - ``*args``, which is the same as the args to :meth:`~Function.forward`.

        The return of the vmap staticmethod is a tuple of ``(output, out_dims)``.
        Similar to ``in_dims``, ``out_dims`` should be of the same structure as
        ``output`` and contain one ``out_dim`` per output that specifies if the
        output has the vmapped dimension and what index it is in.

        Please see :ref:`func-autograd-function` for more details.
        zrTo use autograd.Function with vmap, you must either override the vmap staticmethod or set generate_vmap_rule=True.rl   )infoin_dimsr!   s      r   vmapzFunction.vmap  s    8 "@
 	
r   c                 4   d }t        | j                        }|r || j                  g|i |}t        j                  j                         s.t        j                  j                  |      }t        | (  |i |S |st        d      t        | g|i |S )Nc                     t        j                  |       } |j                  |i |}|j                          |j                  S r(   )inspect	signaturebindapply_defaultsr!   )funcr!   ri   r   
bound_argss        r   bind_default_argsz)Function.apply.<locals>.bind_default_args1  s=    ))$/I'88J%%'??"r   zIn order to use an autograd.Function with functorch transforms (vmap, grad, jvp, jacrev, ...), it must override the setup_context staticmethod. For more details, please see https://pytorch.org/docs/main/notes/extending.func.html)_is_setup_context_definedrs   rn   r   _C _are_functorch_transforms_active
_functorchutilsunwrap_dead_wrappersr]   rL   rH   r
   )r_   r!   ri   r   is_setup_ctx_definedrc   s        r   rL   zFunction.apply/  s    	#  99J9JK$S[[B4B6BDxx88:##88>D7=$1&11#J  $C9$9&99r   c                     | j                   fS r(   )rZ   )ro   s    r   rQ   zFunction._compiled_autograd_keyK  s    ))++r   )r3   r4   r5   rR   r^   r   generate_vmap_rulerB   r   classmethodrL   rQ   re   rf   s   @r   r   r     s[    B

 
 
@ : :6 , ,r   r   c                 (    | t         j                  k7  S r(   )rh   rs   )fns    r   r   r   P  s    %3333r   c                 B     t        j                          fd       }|S )Nc                 f   t        j                         5   | g| }d d d        t        j                         sS t        d |D              }|sS t	        t
              s|f}t        j                  dt        |            }d } ||D cg c]
  } ||       c} S # 1 sw Y   xY wc c}w )Nc              3   l   K   | ],  }t        |t        j                        xr |j                   . y wr(   )r   r   r   requires_grad).0args     r   	<genexpr>z7once_differentiable.<locals>.wrapper.<locals>.<genexpr>f  s.      
DGJsELL)?c.?.??
s   24sR   trying to differentiate twice a function that was marked with @once_differentiablec                 8    | | j                         } d| _        | S )NT)detachr   )vars    r   fake_requires_gradz@once_differentiable.<locals>.wrapper.<locals>.fake_requires_gradx  s    jjl$(!Jr   )	r   no_gradis_grad_enabledanyr   tupler	   DelayedErrorlen)ro   r!   outputsr   err_fnr   vr   s          r   wrapperz$once_differentiable.<locals>.wrapperU  s    ]]_ 	%ntnG	% $$&N  
KO
 
 N'5)jG(()L
	 w?!*1-?@@O	% 	%N @s   
B"B."B+)	functoolswraps)r   r   s   ` r   r   r   T  s(    __R(A (AT Nr   c                   $     e Zd ZdZd fd	Z xZS )r   
    This class is here only for backward compatibility reasons.
    Use :class:`Function` instead of this for any new use case.
    c                 0    t         |           || _        y r(   )r]   r^   inplace)r   r   rc   s     r   r^   zInplaceFunction.__init__  s    r   )Frd   rf   s   @r   r   r     s    
 r   r   c                       fdS )Nc                     |       r |       S | y t        | t        t        f      r6fd| D        }t        | d      r t	        |       | S  t	        |       |      S t        | t
              r| D ci c]  }| | |          c}S t        dt        j                  |       z   rdz   dz   z         dz         c c}w )Nc              3   .   K   | ]  } |        y wr(   r)   )r   x_maps     r   r   z,_nested_map.<locals>._map.<locals>.<genexpr>  s     +!d1g+s   _fieldsAAuto nesting doesn't know how to process an input object of type . Accepted types: , or lists/tuples of them )	r   listr   hasattrrW   dict
ValueErrorr   typename)objmappedr   r   	conditioncondition_msgr   s      r   r   z_nested_map.<locals>._map  s    S>c7N[dE]++s+FsI& tCy&))49V$$T"-01AtCF|O11+..%&
 % )=8;VV		 	 	 	 2s   5B?r)   )r   r   r   r   s   ```@r   _nested_mapr     s    2 Kr   c                 >    t        | d      r| j                         S | S )N_jit_unwrap)r   r   )r   s    r   _jit_unwrap_structuredr     s    sM"  Jr   c                 "      fdS )Nc              3     K    |       }  |       r|  y | y t        | t        t        f      r| D ]  } |      E d {     y t        | t              r&| j	                         D ]  } |      E d {     y r|  y t        dt        j                  |       z   rdz   dz   z         dz         7 x7 Dw)Nr   r   r   r   )r   r   r   r   valuesr   r   r   )r   o_iterallow_unknownr   r   
conversions     r   r   z_iter_filter.<locals>._iter  s     !S/CS>I[dE]+ $ 8##$T"ZZ\ $ 8##$I+..%&
 % )=8;VV		 	 	 	 $ $s%   ACB=5C:B?;AC?Cr)   )r   r   r   r   r   s   ````@r   _iter_filterr     s     8 Lr   c                 &    fd | |      d   S )Nc                    g }t        |d      r|j                  |       S t        |t        t        f      s
| d   | dd  fS |D ]3  }||j                  |        | |      \  }} |j                  |       5  t        |      |      | fS )N	_jit_wrapr      )r   r   r   r   r   appendrW   )inputprotoreseres_eunflatten_helpers        r   r   z$_unflatten.<locals>.unflatten_helper  s    ,.5+&??5))%$/8U12Y&& 	"Ay

1/q9u

5!	" tE{3&&r   r   r)   )r   r   r   s     @r   
_unflattenr     s    ' E5)!,,r   c                 V    | d u xs$ t        | t        j                  j                        S r(   )r   r   r   Valuer   s    r   <lambda>r     s    a4i8:a8 r   zjit's Values or None)r   c                 6    t        | t        j                        S r(   r   r   r   r   s    r   r   r         jELL) r   Tensors)r   r   c                 6    t        | t        j                        S r(   r   r   s    r   r   r     r   r   TzTensors (permissive))r   r   c                 B    | d u xs t        | t        j                        S r(   r   r   s    r   r   r     s    a4i6:a6 r   zTensors or Nonec                 6    t        | t        j                        S r(   r   r   s    r   r   r     r   r   c                     | j                   S r(   )datar   s    r   r   r     s
    QVV r   c                        e Zd ZdZ fdZ fdZdedefdZeZdedefdZ	dedd	fd
Z
e fd       Zdededd	fdZdededd	fdZdedd	fdZdedd	fdZ xZS )r   r   c                     || _         t        t        |            }t        |   | }| j
                  }t        || j
                        }|S r(   )_nested_inputr   _iter_tensorsr]   _do_forward_nested_outputr   )r   r   
flat_inputflat_outputnested_outputnested_tensorsrc   s         r   r   zNestedIOFunction._do_forward  sL    "=/0
g):6++#K1D1DEr   c                 B    || _         t        | 	  ||      }|s| `| `|S r(   )retain_variablesr]   _do_backwardr   _to_save_nested)r   	gradientsr   resultrc   s       r   r   zNestedIOFunction._do_backward  s1     0%i1AB#$r   r   rj   c                 t    t        || j                        } | j                  | }t        t	        |            S )z*
        Shared backward utility.
        )r   r   backward_extendedr   _iter_None_tensors)r   r   nested_gradientsr   s       r   rF   zNestedIOFunction.backward  s;     &i1D1DE''')9:'/00r   r!   c                     t        | j                        } | j                  | }| `|| _        t	        t        |            S )z)
        Shared forward utility.
        )_map_tensor_datar   forward_extendedr   r   r   )r   r!   r   r   s       r   rn   zNestedIOFunction.forward  sD     *$*<*<=&&&7$]6*++r   Nc                 D    t        t        |            | _        || _        y)z9
        See :meth:`Function.save_for_backward`.
        N)r   r   r   r   r$   s     r   r   z"NestedIOFunction.save_for_backward'  s     ]401#r   c                 D    t         |   }t        || j                        S )z5
        See :meth:`Function.saved_tensors`.
        )r]   saved_tensorsr   r   )r   flat_tensorsrc   s     r   r	  zNestedIOFunction.saved_tensors.  s"    
 w,,(<(<==r   ri   c                 :    t        t        ||f            | _        y)z2
        See :meth:`Function.mark_dirty`.
        N)r   r   r#   r   s      r   r%   zNestedIOFunction.mark_dirty6  s     #=$#@Ar   c                 :    t        t        ||f            | _        y)z?
        See :meth:`Function.mark_non_differentiable`.
        N)r   r   r-   r   s      r   r.   z(NestedIOFunction.mark_non_differentiable<  s     #(tVn(E"Fr   r   c                     t         )z'
        User defined forward.
        rl   )r   r   s     r   r  z!NestedIOFunction.forward_extendedB  
     "!r   grad_outputc                     t         )z(
        User defined backward.
        rl   )r   r  s     r   r  z"NestedIOFunction.backward_extendedH  r  r   )r3   r4   r5   rR   r   r   r   rF   r   rn   r   propertyr	  r%   r.   r  r  re   rf   s   @r   r   r     s    13 13 1 H,S ,S ,$s $t $ > >B Bs Bt BGS GC GD G"s "t ""c "d "r   r   r(   )FNN)1r   r   	itertoolsr}   collectionsr   typingr   r   r   r   typing_extensionsr   r   torch._Cr   torch._functorchr   torch.utils.hooksr   r;   r	   "torch._functorch.autograd_functionr
   __all__countrY   r   _ContextMethodMixinr9   _FunctionBaser   rW   r   rh   r   r   r   r   r   r   r   r   _iter_jit_valuesr   _iter_tensors_permissiver  r  r   r)   r   r   <module>r      sX       # - - (   % ! !  C ,IOO- l' l'` " & &>((+z >B-4 -,w
k:w
t},# },@4,^h :@-(  8(  )%
 ()( 
 "6FW  )+;9 
Q"x Q"r   