
    Pg2-             	       1   d Z ddlZddlmZ ddlmZmZ d Z ed      Z	 ed      Z
 edd	 d
j                  dTi e
z           edd dj                  dTi e
z           edd dj                  dTi e
z           edd dj                  dTi e
z           edd dj                  dTi e
z           edd dj                  dTi e
z           edd        edd        edd        ed d!        ed"d#        ed$d%        ed&d'        ed(d)        ed*d+        ed,d-        ed.d/        ed0d1        ed2d3        ed4d5        ed6d7        ed8d9        ed:d;        ed<d=        ed>d?        ed@dA        edBdC        edDdE        edFdG        edHdI        edJdK        edLdM        edNdO        edPdQ        edRdS        edTdU        edVdW        edXdY        edZd[        ed\d]        ed^d_        ed`da        edbdc        eddde        edfdg        edhdi        edjdk        edldm        edndo        edpdq        edrds        edtdu        edvdw        edxdy        edzd{        ed|d}        ed~d        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        eddë        eddū        eddǫ        eddɫ        edd˫        eddͫ        eddϫ        eddѫ        eddӫ        ed dj                  dTi e	        edd׫        edd٫        edd۫        eddݫ        edd߫        edd        edd        edd        edd        edd        edd        edd        edd        edd        ed dj                  dTi e	        edd        edd        edd        edd        ed dj                  dTi e	        ed dj                  dTi e	        ed  dj                  dTi e	        ed dj                  dTi e	        edd        edd        edd	        ed
d        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        ed d!        ed"d#        ed$d%        ed&d'        ed(d)        ed*d+        ed,d-        ed.d/        ed0d1        ed2d3        ed4d5        ed6d7        ed8d9        ed:d;        ed<d=        ed>d?        ed@dA        edBdC        edDdE        edFdG        edHdI        edJdK        edLdM        edNdO        edPdQ        edRdS        edTdU        edVdW        edXdY        edZd[        ed\d]        ed^d_        ed`da        edbdc        eddde        edfdg        edhdi        edjdk        edldm        edndo        edpdq        edrds        edtdu        edvdw        edxdy        edzd{        ed|d}        ed~d        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        ed dj                  dTi e        edd        edd        eddë        edĐdū        ed dj                  dTi e        edȐdɫ        edʐd˫        ed̐dͫ        edΐdϫ        edАdѫ        edҐdӫ        edԐdի        ed֐d׫        edؐd٫        edڐd۫        edܐdݫ        edސd߫        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        ed d        edd        edd        edd        edd	        ed
d        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        ed d!        ed"d#        ed$d%        ed&d'        ed(d)        ed*d+        ed,d-        ed.d/        ed0d1        ed2d3        ed4d5        ed6d7        ed8d9        ed:d;        ed<d=        ed>d?        ed@dA        edBdC        edDdE        edFdG        edHdI        edJdK        edLdM        edNdO        edPdQ        edRdS        edTdU        edVdW        edXdY        edZd[        ed\d]        ed^d_        ed`da        edbdc        eddde        edfdg        edhdi        edjdk        edldm        edndo        edpdq        edrds        edtdu        edvdw        edxdy        edzd{        ed|d}        ed~d        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        eddë        edĐdū        edƐdǫ        edȐdɫ        edʐd˫        ed̐dͫ        edΐdϫ        edАdѫ        edҐdӫ        edԐdի        ed֐d׫        edؐd٫        edڐd۫        edܐdݫ        edސd߫        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        ed  dj                  dTi e        ed dj                  dTi e        edd        edd        edd	        ed
d        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        ed d!        ed"d#        ed$d%        ed&d'        ed(d)        ed*d+        ed,d-        ed.d/        ed0d1        ed2d3        ed4d5        ed6d7        ed8d9        ed:d;        ed<d=        ed>d?        ed@dA        edBdC        edDdE        edFdG        edHdI        edJdK        edLdM        edNdO        edPdQ        edRdS        edTdU        edVdW        edXdY        edZd[        ed\d]        ed^d_        ed`da        edbdc        eddde        edfdg        edhdi        edj dkj                  dTi e	        edl dmj                  dTi e	        edn doj                  dTi e	        edp dqj                  dTi e	        edr dsj                  dTi e	        edt duj                  dTi e	        edv dwj                  dTi e	        edx dyj                  dTi e	        edz d{j                  dTi e	        ed| d}j                  dTi e	        ed~ dj                  dTi e	        ed dj                  dTi e	        edd        ed dj                  dTi e	        ed dj                  dTi e	        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        eddë        edĐdū        edƐdǫ        edȐdɫ        edʐd˫        ed̐dͫ        edΐdϫ        edАdѫ        edҐdӫ        edԐdի        ed֐d׫        edؐd٫        edڐd۫        edܐdݫ        edސd߫        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        ed d        edd        edd        edd        edd	        ed
d        edd        edd        edd        edd        edd        edd        edd        edd        edd        edd        ed d!        ed"d#        ed$d%        ed&d'        ed(d)        ed*d+        ed,d-        ed.d/        ed0d1        ed2d3        ed4d5        ed6d7        ed8d9        ed:d;        ed<d=        ed>d?        ed@dA        edBdC        edDdE        edFdG        edHdI        edJdK        edLdM        edNdO        edPdQ        edRdS       y(U  z#Adds docstrings to Tensor functions    N)_add_docstr)parse_kwargsreproducibility_notesc                 `    t        t        t        j                  j                  |       |       y )N)
add_docstrgetattrtorch_C
TensorBase)methoddocstrs     Y/var/www/html/suriana-translation/venv/lib/python3.12/site-packages/torch/_tensor_docs.pyadd_docstr_allr   	   s    wuxx**F3V<    z
    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
        returned Tensor. Default: ``torch.preserve_format``.
a6  
    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
        shape of the output tensor.
    dtype (:class:`torch.dtype`, optional): the desired type of returned tensor.
        Default: if None, same :class:`torch.dtype` as this tensor.
    device (:class:`torch.device`, optional): the desired device of returned tensor.
        Default: if None, same :class:`torch.device` as this tensor.
    requires_grad (bool, optional): If autograd should record operations on the
        returned tensor. Default: ``False``.
    pin_memory (bool, optional): If set, returned tensor would be allocated in
        the pinned memory. Works only for CPU tensors. Default: ``False``.
    layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
        Default: ``torch.strided``.

new_tensorzu
new_tensor(data, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
a  

Returns a new Tensor with :attr:`data` as the tensor data.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.

.. warning::

    :func:`new_tensor` always copies :attr:`data`. If you have a Tensor
    ``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
    or :func:`torch.Tensor.detach`.
    If you have a numpy array and want to avoid a copy, use
    :func:`torch.from_numpy`.

.. warning::

    When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed,
    and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()``
    and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
    The equivalents using ``clone()`` and ``detach()`` are recommended.

Args:
    data (array_like): The returned Tensor copies :attr:`data`.

Keyword args:
    {dtype}
    {device}
    {requires_grad}
    {layout}
    {pin_memory}

Example::

    >>> tensor = torch.ones((2,), dtype=torch.int8)
    >>> data = [[0, 1], [2, 3]]
    >>> tensor.new_tensor(data)
    tensor([[ 0,  1],
            [ 2,  3]], dtype=torch.int8)

new_fullz
new_full(size, fill_value, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
as  

Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.

Args:
    fill_value (scalar): the number to fill the output tensor with.

Keyword args:
    {dtype}
    {device}
    {requires_grad}
    {layout}
    {pin_memory}

Example::

    >>> tensor = torch.ones((2,), dtype=torch.float64)
    >>> tensor.new_full((3, 4), 3.141592)
    tensor([[ 3.1416,  3.1416,  3.1416,  3.1416],
            [ 3.1416,  3.1416,  3.1416,  3.1416],
            [ 3.1416,  3.1416,  3.1416,  3.1416]], dtype=torch.float64)

	new_emptyzt
new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
aD  

Returns a Tensor of size :attr:`size` filled with uninitialized data.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.

Args:
    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
        shape of the output tensor.

Keyword args:
    {dtype}
    {device}
    {requires_grad}
    {layout}
    {pin_memory}

Example::

    >>> tensor = torch.ones(())
    >>> tensor.new_empty((2, 3))
    tensor([[ 5.8182e-18,  4.5765e-41, -1.0545e+30],
            [ 3.0949e-41,  4.4842e-44,  0.0000e+00]])

new_empty_stridedz
new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
ao  

Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with
uninitialized data. By default, the returned Tensor has the same
:class:`torch.dtype` and :class:`torch.device` as this tensor.

Args:
    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
        shape of the output tensor.

Keyword args:
    {dtype}
    {device}
    {requires_grad}
    {layout}
    {pin_memory}

Example::

    >>> tensor = torch.ones(())
    >>> tensor.new_empty_strided((2, 3), (3, 1))
    tensor([[ 5.8182e-18,  4.5765e-41, -1.0545e+30],
            [ 3.0949e-41,  4.4842e-44,  0.0000e+00]])

new_oneszs
new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
a(  

Returns a Tensor of size :attr:`size` filled with ``1``.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.

Args:
    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
        shape of the output tensor.

Keyword args:
    {dtype}
    {device}
    {requires_grad}
    {layout}
    {pin_memory}

Example::

    >>> tensor = torch.tensor((), dtype=torch.int32)
    >>> tensor.new_ones((2, 3))
    tensor([[ 1,  1,  1],
            [ 1,  1,  1]], dtype=torch.int32)

	new_zeroszt
new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor
a3  

Returns a Tensor of size :attr:`size` filled with ``0``.
By default, the returned Tensor has the same :class:`torch.dtype` and
:class:`torch.device` as this tensor.

Args:
    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
        shape of the output tensor.

Keyword args:
    {dtype}
    {device}
    {requires_grad}
    {layout}
    {pin_memory}

Example::

    >>> tensor = torch.tensor((), dtype=torch.float64)
    >>> tensor.new_zeros((2, 3))
    tensor([[ 0.,  0.,  0.],
            [ 0.,  0.,  0.]], dtype=torch.float64)

absz(
abs() -> Tensor

See :func:`torch.abs`
abs_z;
abs_() -> Tensor

In-place version of :meth:`~Tensor.abs`
absolutez-
absolute() -> Tensor

Alias for :func:`abs`
	absolute_z\
absolute_() -> Tensor

In-place version of :meth:`~Tensor.absolute`
Alias for :func:`abs_`
acosz*
acos() -> Tensor

See :func:`torch.acos`
acos_z=
acos_() -> Tensor

In-place version of :meth:`~Tensor.acos`
arccosz.
arccos() -> Tensor

See :func:`torch.arccos`
arccos_zA
arccos_() -> Tensor

In-place version of :meth:`~Tensor.arccos`
acoshz,
acosh() -> Tensor

See :func:`torch.acosh`
acosh_z?
acosh_() -> Tensor

In-place version of :meth:`~Tensor.acosh`
arccoshz.
acosh() -> Tensor

See :func:`torch.arccosh`
arccosh_zA
acosh_() -> Tensor

In-place version of :meth:`~Tensor.arccosh`
adda  
add(other, *, alpha=1) -> Tensor

Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha`
and :attr:`other` are specified, each element of :attr:`other` is scaled by
:attr:`alpha` before being used.

When :attr:`other` is a tensor, the shape of :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
tensor

See :func:`torch.add`
add_zL
add_(other, *, alpha=1) -> Tensor

In-place version of :meth:`~Tensor.add`
addbmmzP
addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor

See :func:`torch.addbmm`
addbmm_zc
addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor

In-place version of :meth:`~Tensor.addbmm`
addcdivzL
addcdiv(tensor1, tensor2, *, value=1) -> Tensor

See :func:`torch.addcdiv`
addcdiv_z_
addcdiv_(tensor1, tensor2, *, value=1) -> Tensor

In-place version of :meth:`~Tensor.addcdiv`
addcmulzL
addcmul(tensor1, tensor2, *, value=1) -> Tensor

See :func:`torch.addcmul`
addcmul_z_
addcmul_(tensor1, tensor2, *, value=1) -> Tensor

In-place version of :meth:`~Tensor.addcmul`
addmmzJ
addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor

See :func:`torch.addmm`
addmm_z]
addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor

In-place version of :meth:`~Tensor.addmm`
addmvzH
addmv(mat, vec, *, beta=1, alpha=1) -> Tensor

See :func:`torch.addmv`
addmv_z[
addmv_(mat, vec, *, beta=1, alpha=1) -> Tensor

In-place version of :meth:`~Tensor.addmv`
sspaddmmzP
sspaddmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor

See :func:`torch.sspaddmm`
smmz+
smm(mat) -> Tensor

See :func:`torch.smm`
addrzH
addr(vec1, vec2, *, beta=1, alpha=1) -> Tensor

See :func:`torch.addr`
addr_z[
addr_(vec1, vec2, *, beta=1, alpha=1) -> Tensor

In-place version of :meth:`~Tensor.addr`
align_asa  
align_as(other) -> Tensor

Permutes the dimensions of the :attr:`self` tensor to match the dimension order
in the :attr:`other` tensor, adding size-one dims for any new names.

This operation is useful for explicit broadcasting by names (see examples).

All of the dims of :attr:`self` must be named in order to use this method.
The resulting tensor is a view on the original tensor.

All dimension names of :attr:`self` must be present in ``other.names``.
:attr:`other` may contain named dimensions that are not in ``self.names``;
the output tensor has a size-one dimension for each of those new names.

To align a tensor to a specific order, use :meth:`~Tensor.align_to`.

Examples::

    # Example 1: Applying a mask
    >>> mask = torch.randint(2, [127, 128], dtype=torch.bool).refine_names('W', 'H')
    >>> imgs = torch.randn(32, 128, 127, 3, names=('N', 'H', 'W', 'C'))
    >>> imgs.masked_fill_(mask.align_as(imgs), 0)


    # Example 2: Applying a per-channel-scale
    >>> def scale_channels(input, scale):
    >>>    scale = scale.refine_names('C')
    >>>    return input * scale.align_as(input)

    >>> num_channels = 3
    >>> scale = torch.randn(num_channels, names=('C',))
    >>> imgs = torch.rand(32, 128, 128, num_channels, names=('N', 'H', 'W', 'C'))
    >>> more_imgs = torch.rand(32, num_channels, 128, 128, names=('N', 'C', 'H', 'W'))
    >>> videos = torch.randn(3, num_channels, 128, 128, 128, names=('N', 'C', 'H', 'W', 'D'))

    # scale_channels is agnostic to the dimension order of the input
    >>> scale_channels(imgs, scale)
    >>> scale_channels(more_imgs, scale)
    >>> scale_channels(videos, scale)

.. warning::
    The named tensor API is experimental and subject to change.

allz?
all(dim=None, keepdim=False) -> Tensor

See :func:`torch.all`
allclosez`
allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor

See :func:`torch.allclose`
anglez,
angle() -> Tensor

See :func:`torch.angle`
anyz?
any(dim=None, keepdim=False) -> Tensor

See :func:`torch.any`
apply_a+  
apply_(callable) -> Tensor

Applies the function :attr:`callable` to each element in the tensor, replacing
each element with the value returned by :attr:`callable`.

.. note::

    This function only works with CPU tensors and should not be used in code
    sections that require high performance.
asinz*
asin() -> Tensor

See :func:`torch.asin`
asin_z=
asin_() -> Tensor

In-place version of :meth:`~Tensor.asin`
arcsinz.
arcsin() -> Tensor

See :func:`torch.arcsin`
arcsin_zA
arcsin_() -> Tensor

In-place version of :meth:`~Tensor.arcsin`
asinhz,
asinh() -> Tensor

See :func:`torch.asinh`
asinh_z?
asinh_() -> Tensor

In-place version of :meth:`~Tensor.asinh`
arcsinhz0
arcsinh() -> Tensor

See :func:`torch.arcsinh`
arcsinh_zC
arcsinh_() -> Tensor

In-place version of :meth:`~Tensor.arcsinh`

as_stridedzW
as_strided(size, stride, storage_offset=None) -> Tensor

See :func:`torch.as_strided`
as_strided_zj
as_strided_(size, stride, storage_offset=None) -> Tensor

In-place version of :meth:`~Tensor.as_strided`
atanz*
atan() -> Tensor

See :func:`torch.atan`
atan_z=
atan_() -> Tensor

In-place version of :meth:`~Tensor.atan`
arctanz.
arctan() -> Tensor

See :func:`torch.arctan`
arctan_zA
arctan_() -> Tensor

In-place version of :meth:`~Tensor.arctan`
atan2z1
atan2(other) -> Tensor

See :func:`torch.atan2`
atan2_zD
atan2_(other) -> Tensor

In-place version of :meth:`~Tensor.atan2`
arctan2z5
arctan2(other) -> Tensor

See :func:`torch.arctan2`
arctan2_zF
atan2_(other) -> Tensor

In-place version of :meth:`~Tensor.arctan2`
atanhz,
atanh() -> Tensor

See :func:`torch.atanh`
atanh_zD
atanh_(other) -> Tensor

In-place version of :meth:`~Tensor.atanh`
arctanhz0
arctanh() -> Tensor

See :func:`torch.arctanh`
arctanh_zH
arctanh_(other) -> Tensor

In-place version of :meth:`~Tensor.arctanh`
baddbmmzR
baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor

See :func:`torch.baddbmm`
baddbmm_ze
baddbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor

In-place version of :meth:`~Tensor.baddbmm`
	bernoullia,  
bernoulli(*, generator=None) -> Tensor

Returns a result tensor where each :math:`\texttt{result[i]}` is independently
sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have
floating point ``dtype``, and the result will have the same ``dtype``.

See :func:`torch.bernoulli`

bernoulli_aU  
bernoulli_(p=0.5, *, generator=None) -> Tensor

Fills each location of :attr:`self` with an independent sample from
:math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral
``dtype``.

:attr:`p` should either be a scalar or tensor containing probabilities to be
used for drawing the binary random number.

If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor
will be set to a value sampled from
:math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have
floating point ``dtype``.

See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli`
bincountzK
bincount(weights=None, minlength=0) -> Tensor

See :func:`torch.bincount`
bitwise_notz8
bitwise_not() -> Tensor

See :func:`torch.bitwise_not`
bitwise_not_zK
bitwise_not_() -> Tensor

In-place version of :meth:`~Tensor.bitwise_not`
bitwise_andz8
bitwise_and() -> Tensor

See :func:`torch.bitwise_and`
bitwise_and_zK
bitwise_and_() -> Tensor

In-place version of :meth:`~Tensor.bitwise_and`

bitwise_orz6
bitwise_or() -> Tensor

See :func:`torch.bitwise_or`
bitwise_or_zI
bitwise_or_() -> Tensor

In-place version of :meth:`~Tensor.bitwise_or`
bitwise_xorz8
bitwise_xor() -> Tensor

See :func:`torch.bitwise_xor`
bitwise_xor_zK
bitwise_xor_() -> Tensor

In-place version of :meth:`~Tensor.bitwise_xor`
bitwise_left_shiftzK
bitwise_left_shift(other) -> Tensor

See :func:`torch.bitwise_left_shift`
bitwise_left_shift_z^
bitwise_left_shift_(other) -> Tensor

In-place version of :meth:`~Tensor.bitwise_left_shift`
bitwise_right_shiftzM
bitwise_right_shift(other) -> Tensor

See :func:`torch.bitwise_right_shift`
bitwise_right_shift_z`
bitwise_right_shift_(other) -> Tensor

In-place version of :meth:`~Tensor.bitwise_right_shift`
broadcast_toz@
broadcast_to(shape) -> Tensor

See :func:`torch.broadcast_to`.
logical_andz8
logical_and() -> Tensor

See :func:`torch.logical_and`
logical_and_zK
logical_and_() -> Tensor

In-place version of :meth:`~Tensor.logical_and`
logical_notz8
logical_not() -> Tensor

See :func:`torch.logical_not`
logical_not_zK
logical_not_() -> Tensor

In-place version of :meth:`~Tensor.logical_not`

logical_orz6
logical_or() -> Tensor

See :func:`torch.logical_or`
logical_or_zI
logical_or_() -> Tensor

In-place version of :meth:`~Tensor.logical_or`
logical_xorz8
logical_xor() -> Tensor

See :func:`torch.logical_xor`
logical_xor_zK
logical_xor_() -> Tensor

In-place version of :meth:`~Tensor.logical_xor`
bmmz.
bmm(batch2) -> Tensor

See :func:`torch.bmm`
cauchy_a5  
cauchy_(median=0, sigma=1, *, generator=None) -> Tensor

Fills the tensor with numbers drawn from the Cauchy distribution:

.. math::

    f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2}

.. note::
  Sigma (:math:`\sigma`) is used to denote the scale parameter in Cauchy distribution.
ceilz*
ceil() -> Tensor

See :func:`torch.ceil`
ceil_z=
ceil_() -> Tensor

In-place version of :meth:`~Tensor.ceil`
choleskyz=
cholesky(upper=False) -> Tensor

See :func:`torch.cholesky`
cholesky_solvezQ
cholesky_solve(input2, upper=False) -> Tensor

See :func:`torch.cholesky_solve`
cholesky_inversezM
cholesky_inverse(upper=False) -> Tensor

See :func:`torch.cholesky_inverse`
clampz>
clamp(min=None, max=None) -> Tensor

See :func:`torch.clamp`
clamp_zQ
clamp_(min=None, max=None) -> Tensor

In-place version of :meth:`~Tensor.clamp`
clipzF
clip(min=None, max=None) -> Tensor

Alias for :meth:`~Tensor.clamp`.
clip_zH
clip_(min=None, max=None) -> Tensor

Alias for :meth:`~Tensor.clamp_`.
clonezR
clone(*, memory_format=torch.preserve_format) -> Tensor

See :func:`torch.clone`
coalescea  
coalesce() -> Tensor

Returns a coalesced copy of :attr:`self` if :attr:`self` is an
:ref:`uncoalesced tensor <sparse-uncoalesced-coo-docs>`.

Returns :attr:`self` if :attr:`self` is a coalesced tensor.

.. warning::
  Throws an error if :attr:`self` is not a sparse COO tensor.

contiguousa  
contiguous(memory_format=torch.contiguous_format) -> Tensor

Returns a contiguous in memory tensor containing the same data as :attr:`self` tensor. If
:attr:`self` tensor is already in the specified memory format, this function returns the
:attr:`self` tensor.

Args:
    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
        returned Tensor. Default: ``torch.contiguous_format``.
copy_a0  
copy_(src, non_blocking=False) -> Tensor

Copies the elements from :attr:`src` into :attr:`self` tensor and returns
:attr:`self`.

The :attr:`src` tensor must be :ref:`broadcastable <broadcasting-semantics>`
with the :attr:`self` tensor. It may be of a different data type or reside on a
different device.

Args:
    src (Tensor): the source tensor to copy from
    non_blocking (bool): if ``True`` and this copy is between CPU and GPU,
        the copy may occur asynchronously with respect to the host. For other
        cases, this argument has no effect.
conjz*
conj() -> Tensor

See :func:`torch.conj`
conj_physicalz<
conj_physical() -> Tensor

See :func:`torch.conj_physical`
conj_physical_zO
conj_physical_() -> Tensor

In-place version of :meth:`~Tensor.conj_physical`
resolve_conjz:
resolve_conj() -> Tensor

See :func:`torch.resolve_conj`
resolve_negz8
resolve_neg() -> Tensor

See :func:`torch.resolve_neg`
copysignz7
copysign(other) -> Tensor

See :func:`torch.copysign`
	copysign_zJ
copysign_(other) -> Tensor

In-place version of :meth:`~Tensor.copysign`
cosz(
cos() -> Tensor

See :func:`torch.cos`
cos_z;
cos_() -> Tensor

In-place version of :meth:`~Tensor.cos`
coshz*
cosh() -> Tensor

See :func:`torch.cosh`
cosh_z=
cosh_() -> Tensor

In-place version of :meth:`~Tensor.cosh`
cpua  
cpu(memory_format=torch.preserve_format) -> Tensor

Returns a copy of this object in CPU memory.

If this object is already in CPU memory and on the correct device,
then no copy is performed and the original object is returned.

Args:
    {memory_format}

count_nonzerozD
count_nonzero(dim=None) -> Tensor

See :func:`torch.count_nonzero`
covzU
cov(*, correction=1, fweights=None, aweights=None) -> Tensor

See :func:`torch.cov`
corrcoefz2
corrcoef() -> Tensor

See :func:`torch.corrcoef`
crossz;
cross(other, dim=None) -> Tensor

See :func:`torch.cross`
cudaa]  
cuda(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor

Returns a copy of this object in CUDA memory.

If this object is already in CUDA memory and on the correct device,
then no copy is performed and the original object is returned.

Args:
    device (:class:`torch.device`): The destination GPU device.
        Defaults to the current CUDA device.
    non_blocking (bool): If ``True`` and the source is in pinned memory,
        the copy will be asynchronous with respect to the host.
        Otherwise, the argument has no effect. Default: ``False``.
    {memory_format}
mtiaa^  
mtia(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor

Returns a copy of this object in MTIA memory.

If this object is already in MTIA memory and on the correct device,
then no copy is performed and the original object is returned.

Args:
    device (:class:`torch.device`): The destination MTIA device.
        Defaults to the current MTIA device.
    non_blocking (bool): If ``True`` and the source is in pinned memory,
        the copy will be asynchronous with respect to the host.
        Otherwise, the argument has no effect. Default: ``False``.
    {memory_format}
ipuaY  
ipu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor

Returns a copy of this object in IPU memory.

If this object is already in IPU memory and on the correct device,
then no copy is performed and the original object is returned.

Args:
    device (:class:`torch.device`): The destination IPU device.
        Defaults to the current IPU device.
    non_blocking (bool): If ``True`` and the source is in pinned memory,
        the copy will be asynchronous with respect to the host.
        Otherwise, the argument has no effect. Default: ``False``.
    {memory_format}
xpuaY  
xpu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor

Returns a copy of this object in XPU memory.

If this object is already in XPU memory and on the correct device,
then no copy is performed and the original object is returned.

Args:
    device (:class:`torch.device`): The destination XPU device.
        Defaults to the current XPU device.
    non_blocking (bool): If ``True`` and the source is in pinned memory,
        the copy will be asynchronous with respect to the host.
        Otherwise, the argument has no effect. Default: ``False``.
    {memory_format}
logcumsumexpz=
logcumsumexp(dim) -> Tensor

See :func:`torch.logcumsumexp`
cummaxz;
cummax(dim) -> (Tensor, Tensor)

See :func:`torch.cummax`
cumminz;
cummin(dim) -> (Tensor, Tensor)

See :func:`torch.cummin`
cumprodz?
cumprod(dim, dtype=None) -> Tensor

See :func:`torch.cumprod`
cumprod_zR
cumprod_(dim, dtype=None) -> Tensor

In-place version of :meth:`~Tensor.cumprod`
cumsumz=
cumsum(dim, dtype=None) -> Tensor

See :func:`torch.cumsum`
cumsum_zP
cumsum_(dim, dtype=None) -> Tensor

In-place version of :meth:`~Tensor.cumsum`
data_ptrzU
data_ptr() -> int

Returns the address of the first element of :attr:`self` tensor.

dequantizezj
dequantize() -> Tensor

Given a quantized Tensor, dequantize it and return the dequantized float Tensor.
	dense_dima  
dense_dim() -> int

Return the number of dense dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.

.. note::
  Returns ``len(self.shape)`` if :attr:`self` is not a sparse tensor.

See also :meth:`Tensor.sparse_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
diagz4
diag(diagonal=0) -> Tensor

See :func:`torch.diag`

diag_embedzP
diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor

See :func:`torch.diag_embed`
diagflatz:
diagflat(offset=0) -> Tensor

See :func:`torch.diagflat`
diagonalzJ
diagonal(offset=0, dim1=0, dim2=1) -> Tensor

See :func:`torch.diagonal`
diagonal_scatterz_
diagonal_scatter(src, offset=0, dim1=0, dim2=1) -> Tensor

See :func:`torch.diagonal_scatter`
as_strided_scatterzl
as_strided_scatter(src, size, stride, storage_offset=None) -> Tensor

See :func:`torch.as_strided_scatter`
fill_diagonal_a  
fill_diagonal_(fill_value, wrap=False) -> Tensor

Fill the main diagonal of a tensor that has at least 2-dimensions.
When dims>2, all dimensions of input must be of equal length.
This function modifies the input tensor in-place, and returns the input tensor.

Arguments:
    fill_value (Scalar): the fill value
    wrap (bool): the diagonal 'wrapped' after N columns for tall matrices.

Example::

    >>> a = torch.zeros(3, 3)
    >>> a.fill_diagonal_(5)
    tensor([[5., 0., 0.],
            [0., 5., 0.],
            [0., 0., 5.]])
    >>> b = torch.zeros(7, 3)
    >>> b.fill_diagonal_(5)
    tensor([[5., 0., 0.],
            [0., 5., 0.],
            [0., 0., 5.],
            [0., 0., 0.],
            [0., 0., 0.],
            [0., 0., 0.],
            [0., 0., 0.]])
    >>> c = torch.zeros(7, 3)
    >>> c.fill_diagonal_(5, wrap=True)
    tensor([[5., 0., 0.],
            [0., 5., 0.],
            [0., 0., 5.],
            [0., 0., 0.],
            [5., 0., 0.],
            [0., 5., 0.],
            [0., 0., 5.]])

floor_dividez?
floor_divide(value) -> Tensor

See :func:`torch.floor_divide`
floor_divide_zR
floor_divide_(value) -> Tensor

In-place version of :meth:`~Tensor.floor_divide`
diffzP
diff(n=1, dim=-1, prepend=None, append=None) -> Tensor

See :func:`torch.diff`
digammaz0
digamma() -> Tensor

See :func:`torch.digamma`
digamma_zC
digamma_() -> Tensor

In-place version of :meth:`~Tensor.digamma`
dimzH
dim() -> int

Returns the number of dimensions of :attr:`self` tensor.
distz4
dist(other, p=2) -> Tensor

See :func:`torch.dist`
divzD
div(value, *, rounding_mode=None) -> Tensor

See :func:`torch.div`
div_zW
div_(value, *, rounding_mode=None) -> Tensor

In-place version of :meth:`~Tensor.div`
dividezJ
divide(value, *, rounding_mode=None) -> Tensor

See :func:`torch.divide`
divide_z]
divide_(value, *, rounding_mode=None) -> Tensor

In-place version of :meth:`~Tensor.divide`
dotz-
dot(other) -> Tensor

See :func:`torch.dot`
element_sizez
element_size() -> int

Returns the size in bytes of an individual element.

Example::

    >>> torch.tensor([]).element_size()
    4
    >>> torch.tensor([], dtype=torch.uint8).element_size()
    1

eqz+
eq(other) -> Tensor

See :func:`torch.eq`
eq_z>
eq_(other) -> Tensor

In-place version of :meth:`~Tensor.eq`
equalz/
equal(other) -> bool

See :func:`torch.equal`
erfz(
erf() -> Tensor

See :func:`torch.erf`
erf_z;
erf_() -> Tensor

In-place version of :meth:`~Tensor.erf`
erfcz*
erfc() -> Tensor

See :func:`torch.erfc`
erfc_z=
erfc_() -> Tensor

In-place version of :meth:`~Tensor.erfc`
erfinvz.
erfinv() -> Tensor

See :func:`torch.erfinv`
erfinv_zA
erfinv_() -> Tensor

In-place version of :meth:`~Tensor.erfinv`
expz(
exp() -> Tensor

See :func:`torch.exp`
exp_z;
exp_() -> Tensor

In-place version of :meth:`~Tensor.exp`
exp2z*
exp2() -> Tensor

See :func:`torch.exp2`
exp2_z=
exp2_() -> Tensor

In-place version of :meth:`~Tensor.exp2`
expm1z,
expm1() -> Tensor

See :func:`torch.expm1`
expm1_z?
expm1_() -> Tensor

In-place version of :meth:`~Tensor.expm1`
exponential_a  
exponential_(lambd=1, *, generator=None) -> Tensor

Fills :attr:`self` tensor with elements drawn from the PDF (probability density function):

.. math::

    f(x) = \lambda e^{-\lambda x}, x > 0

.. note::
  In probability theory, exponential distribution is supported on interval [0, :math:`\inf`) (i.e., :math:`x >= 0`)
  implying that zero can be sampled from the exponential distribution.
  However, :func:`torch.Tensor.exponential_` does not sample zero,
  which means that its actual support is the interval (0, :math:`\inf`).

  Note that :func:`torch.distributions.exponential.Exponential` is supported on the interval [0, :math:`\inf`) and can sample zero.
fill_zM
fill_(value) -> Tensor

Fills :attr:`self` tensor with the specified value.
floorz,
floor() -> Tensor

See :func:`torch.floor`
flipz.
flip(dims) -> Tensor

See :func:`torch.flip`
fliplrz.
fliplr() -> Tensor

See :func:`torch.fliplr`
flipudz.
flipud() -> Tensor

See :func:`torch.flipud`
rollz6
roll(shifts, dims) -> Tensor

See :func:`torch.roll`
floor_z?
floor_() -> Tensor

In-place version of :meth:`~Tensor.floor`
fmodz1
fmod(divisor) -> Tensor

See :func:`torch.fmod`
fmod_zD
fmod_(divisor) -> Tensor

In-place version of :meth:`~Tensor.fmod`
fracz*
frac() -> Tensor

See :func:`torch.frac`
frac_z=
frac_() -> Tensor

In-place version of :meth:`~Tensor.frac`
frexpzM
frexp(input) -> (Tensor mantissa, Tensor exponent)

See :func:`torch.frexp`
flattenzG
flatten(start_dim=0, end_dim=-1) -> Tensor

See :func:`torch.flatten`
gatherz8
gather(dim, index) -> Tensor

See :func:`torch.gather`
gcdz-
gcd(other) -> Tensor

See :func:`torch.gcd`
gcd_z@
gcd_(other) -> Tensor

In-place version of :meth:`~Tensor.gcd`
gez,
ge(other) -> Tensor

See :func:`torch.ge`.
ge_z?
ge_(other) -> Tensor

In-place version of :meth:`~Tensor.ge`.
greater_equalzB
greater_equal(other) -> Tensor

See :func:`torch.greater_equal`.
greater_equal_zU
greater_equal_(other) -> Tensor

In-place version of :meth:`~Tensor.greater_equal`.

geometric_a  
geometric_(p, *, generator=None) -> Tensor

Fills :attr:`self` tensor with elements drawn from the geometric distribution:

.. math::

    P(X=k) = (1 - p)^{k - 1} p, k = 1, 2, ...

.. note::
  :func:`torch.Tensor.geometric_` `k`-th trial is the first success hence draws samples in :math:`\{1, 2, \ldots\}`, whereas
  :func:`torch.distributions.geometric.Geometric` :math:`(k+1)`-th trial is the first success
  hence draws samples in :math:`\{0, 1, \ldots\}`.
geqrfz6
geqrf() -> (Tensor, Tensor)

See :func:`torch.geqrf`
gerz,
ger(vec2) -> Tensor

See :func:`torch.ger`
innerz2
inner(other) -> Tensor

See :func:`torch.inner`.
outerz1
outer(vec2) -> Tensor

See :func:`torch.outer`.
hypotz1
hypot(other) -> Tensor

See :func:`torch.hypot`
hypot_zD
hypot_(other) -> Tensor

In-place version of :meth:`~Tensor.hypot`
i0z&
i0() -> Tensor

See :func:`torch.i0`
i0_z9
i0_() -> Tensor

In-place version of :meth:`~Tensor.i0`
igammaz3
igamma(other) -> Tensor

See :func:`torch.igamma`
igamma_zF
igamma_(other) -> Tensor

In-place version of :meth:`~Tensor.igamma`
igammacz4
igammac(other) -> Tensor
See :func:`torch.igammac`
igammac_zG
igammac_(other) -> Tensor
In-place version of :meth:`~Tensor.igammac`
indicesaC  
indices() -> Tensor

Return the indices tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.

.. warning::
  Throws an error if :attr:`self` is not a sparse COO tensor.

See also :meth:`Tensor.values`.

.. note::
  This method can only be called on a coalesced sparse tensor. See
  :meth:`Tensor.coalesce` for details.

get_devicea:  
get_device() -> Device ordinal (Integer)

For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides.
For CPU tensors, this function returns `-1`.

Example::

    >>> x = torch.randn(3, 4, 5, device='cuda:0')
    >>> x.get_device()
    0
    >>> x.cpu().get_device()
    -1
valuesaB  
values() -> Tensor

Return the values tensor of a :ref:`sparse COO tensor <sparse-coo-docs>`.

.. warning::
  Throws an error if :attr:`self` is not a sparse COO tensor.

See also :meth:`Tensor.indices`.

.. note::
  This method can only be called on a coalesced sparse tensor. See
  :meth:`Tensor.coalesce` for details.
gtz,
gt(other) -> Tensor

See :func:`torch.gt`.
gt_z?
gt_(other) -> Tensor

In-place version of :meth:`~Tensor.gt`.
greaterz6
greater(other) -> Tensor

See :func:`torch.greater`.
greater_zI
greater_(other) -> Tensor

In-place version of :meth:`~Tensor.greater`.
	has_nameszT
Is ``True`` if any of this tensor's dimensions are named. Otherwise, is ``False``.

hardshrinkzM
hardshrink(lambd=0.5) -> Tensor

See :func:`torch.nn.functional.hardshrink`
	heavisidez:
heaviside(values) -> Tensor

See :func:`torch.heaviside`

heaviside_zM
heaviside_(values) -> Tensor

In-place version of :meth:`~Tensor.heaviside`
histczB
histc(bins=100, min=0, max=0) -> Tensor

See :func:`torch.histc`
	histogramzt
histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor)

See :func:`torch.histogram`

index_add_a  
index_add_(dim, index, source, *, alpha=1) -> Tensor

Accumulate the elements of :attr:`alpha` times ``source`` into the :attr:`self`
tensor by adding to the indices in the order given in :attr:`index`. For example,
if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of
``source`` is subtracted from the ``j``\ th row of :attr:`self`.

The :attr:`dim`\ th dimension of ``source`` must have the same size as the
length of :attr:`index` (which must be a vector), and all other dimensions must
match :attr:`self`, or an error will be raised.

For a 3-D tensor the output is given as::

    self[index[i], :, :] += alpha * src[i, :, :]  # if dim == 0
    self[:, index[i], :] += alpha * src[:, i, :]  # if dim == 1
    self[:, :, index[i]] += alpha * src[:, :, i]  # if dim == 2

Note:
    {forward_reproducibility_note}

Args:
    dim (int): dimension along which to index
    index (Tensor): indices of ``source`` to select from,
            should have dtype either `torch.int64` or `torch.int32`
    source (Tensor): the tensor containing values to add

Keyword args:
    alpha (Number): the scalar multiplier for ``source``

Example::

    >>> x = torch.ones(5, 3)
    >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
    >>> index = torch.tensor([0, 4, 2])
    >>> x.index_add_(0, index, t)
    tensor([[  2.,   3.,   4.],
            [  1.,   1.,   1.],
            [  8.,   9.,  10.],
            [  1.,   1.,   1.],
            [  5.,   6.,   7.]])
    >>> x.index_add_(0, index, t, alpha=-1)
    tensor([[  1.,   1.,   1.],
            [  1.,   1.,   1.],
            [  1.,   1.,   1.],
            [  1.,   1.,   1.],
            [  1.,   1.,   1.]])
index_copy_a  
index_copy_(dim, index, tensor) -> Tensor

Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting
the indices in the order given in :attr:`index`. For example, if ``dim == 0``
and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the
``j``\ th row of :attr:`self`.

The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the
length of :attr:`index` (which must be a vector), and all other dimensions must
match :attr:`self`, or an error will be raised.

.. note::
    If :attr:`index` contains duplicate entries, multiple elements from
    :attr:`tensor` will be copied to the same index of :attr:`self`. The result
    is nondeterministic since it depends on which copy occurs last.

Args:
    dim (int): dimension along which to index
    index (LongTensor): indices of :attr:`tensor` to select from
    tensor (Tensor): the tensor containing values to copy

Example::

    >>> x = torch.zeros(5, 3)
    >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
    >>> index = torch.tensor([0, 4, 2])
    >>> x.index_copy_(0, index, t)
    tensor([[ 1.,  2.,  3.],
            [ 0.,  0.,  0.],
            [ 7.,  8.,  9.],
            [ 0.,  0.,  0.],
            [ 4.,  5.,  6.]])
index_fill_aL  
index_fill_(dim, index, value) -> Tensor

Fills the elements of the :attr:`self` tensor with value :attr:`value` by
selecting the indices in the order given in :attr:`index`.

Args:
    dim (int): dimension along which to index
    index (LongTensor): indices of :attr:`self` tensor to fill in
    value (float): the value to fill with

Example::
    >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float)
    >>> index = torch.tensor([0, 2])
    >>> x.index_fill_(1, index, -1)
    tensor([[-1.,  2., -1.],
            [-1.,  5., -1.],
            [-1.,  8., -1.]])

index_put_a  
index_put_(indices, values, accumulate=False) -> Tensor

Puts values from the tensor :attr:`values` into the tensor :attr:`self` using
the indices specified in :attr:`indices` (which is a tuple of Tensors). The
expression ``tensor.index_put_(indices, values)`` is equivalent to
``tensor[indices] = values``. Returns :attr:`self`.

If :attr:`accumulate` is ``True``, the elements in :attr:`values` are added to
:attr:`self`. If accumulate is ``False``, the behavior is undefined if indices
contain duplicate elements.

Args:
    indices (tuple of LongTensor): tensors used to index into `self`.
    values (Tensor): tensor of same dtype as `self`.
    accumulate (bool): whether to accumulate into self
	index_putzj
index_put(indices, values, accumulate=False) -> Tensor

Out-place version of :meth:`~Tensor.index_put_`.
index_reduce_a	  
index_reduce_(dim, index, source, reduce, *, include_self=True) -> Tensor

Accumulate the elements of ``source`` into the :attr:`self`
tensor by accumulating to the indices in the order given in :attr:`index`
using the reduction given by the ``reduce`` argument. For example, if ``dim == 0``,
``index[i] == j``, ``reduce == prod`` and ``include_self == True`` then the ``i``\ th
row of ``source`` is multiplied by the ``j``\ th row of :attr:`self`. If
:obj:`include_self="True"`, the values in the :attr:`self` tensor are included
in the reduction, otherwise, rows in the :attr:`self` tensor that are accumulated
to are treated as if they were filled with the reduction identites.

The :attr:`dim`\ th dimension of ``source`` must have the same size as the
length of :attr:`index` (which must be a vector), and all other dimensions must
match :attr:`self`, or an error will be raised.

For a 3-D tensor with :obj:`reduce="prod"` and :obj:`include_self=True` the
output is given as::

    self[index[i], :, :] *= src[i, :, :]  # if dim == 0
    self[:, index[i], :] *= src[:, i, :]  # if dim == 1
    self[:, :, index[i]] *= src[:, :, i]  # if dim == 2

Note:
    {forward_reproducibility_note}

.. note::

    This function only supports floating point tensors.

.. warning::

    This function is in beta and may change in the near future.

Args:
    dim (int): dimension along which to index
    index (Tensor): indices of ``source`` to select from,
        should have dtype either `torch.int64` or `torch.int32`
    source (FloatTensor): the tensor containing values to accumulate
    reduce (str): the reduction operation to apply
        (:obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)

Keyword args:
    include_self (bool): whether the elements from the ``self`` tensor are
        included in the reduction

Example::

    >>> x = torch.empty(5, 3).fill_(2)
    >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=torch.float)
    >>> index = torch.tensor([0, 4, 2, 0])
    >>> x.index_reduce_(0, index, t, 'prod')
    tensor([[20., 44., 72.],
            [ 2.,  2.,  2.],
            [14., 16., 18.],
            [ 2.,  2.,  2.],
            [ 8., 10., 12.]])
    >>> x = torch.empty(5, 3).fill_(2)
    >>> x.index_reduce_(0, index, t, 'prod', include_self=False)
    tensor([[10., 22., 36.],
            [ 2.,  2.,  2.],
            [ 7.,  8.,  9.],
            [ 2.,  2.,  2.],
            [ 4.,  5.,  6.]])
index_selectzD
index_select(dim, index) -> Tensor

See :func:`torch.index_select`
sparse_maskab  
sparse_mask(mask) -> Tensor

Returns a new :ref:`sparse tensor <sparse-docs>` with values from a
strided tensor :attr:`self` filtered by the indices of the sparse
tensor :attr:`mask`. The values of :attr:`mask` sparse tensor are
ignored. :attr:`self` and :attr:`mask` tensors must have the same
shape.

.. note::

  The returned sparse tensor might contain duplicate values if :attr:`mask`
  is not coalesced. It is therefore advisable to pass ``mask.coalesce()``
  if such behavior is not desired.

.. note::

  The returned sparse tensor has the same indices as the sparse tensor
  :attr:`mask`, even when the corresponding values in :attr:`self` are
  zeros.

Args:
    mask (Tensor): a sparse tensor whose indices are used as a filter

Example::

    >>> nse = 5
    >>> dims = (5, 5, 2, 2)
    >>> I = torch.cat([torch.randint(0, dims[0], size=(nse,)),
    ...                torch.randint(0, dims[1], size=(nse,))], 0).reshape(2, nse)
    >>> V = torch.randn(nse, dims[2], dims[3])
    >>> S = torch.sparse_coo_tensor(I, V, dims).coalesce()
    >>> D = torch.randn(dims)
    >>> D.sparse_mask(S)
    tensor(indices=tensor([[0, 0, 0, 2],
                           [0, 1, 4, 3]]),
           values=tensor([[[ 1.6550,  0.2397],
                           [-0.1611, -0.0779]],

                          [[ 0.2326, -1.0558],
                           [ 1.4711,  1.9678]],

                          [[-0.5138, -0.0411],
                           [ 1.9417,  0.5158]],

                          [[ 0.0793,  0.0036],
                           [-0.2569, -0.1055]]]),
           size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo)
inversez0
inverse() -> Tensor

See :func:`torch.inverse`
isnanz,
isnan() -> Tensor

See :func:`torch.isnan`
isinfz,
isinf() -> Tensor

See :func:`torch.isinf`
isposinfz2
isposinf() -> Tensor

See :func:`torch.isposinf`
isneginfz2
isneginf() -> Tensor

See :func:`torch.isneginf`
isfinitez2
isfinite() -> Tensor

See :func:`torch.isfinite`
isclosez^
isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor

See :func:`torch.isclose`
isrealz.
isreal() -> Tensor

See :func:`torch.isreal`
is_coalesceda2  
is_coalesced() -> bool

Returns ``True`` if :attr:`self` is a :ref:`sparse COO tensor
<sparse-coo-docs>` that is coalesced, ``False`` otherwise.

.. warning::
  Throws an error if :attr:`self` is not a sparse COO tensor.

See :meth:`coalesce` and :ref:`uncoalesced tensors <sparse-uncoalesced-coo-docs>`.
is_contiguousa8  
is_contiguous(memory_format=torch.contiguous_format) -> bool

Returns True if :attr:`self` tensor is contiguous in memory in the order specified
by memory format.

Args:
    memory_format (:class:`torch.memory_format`, optional): Specifies memory allocation
        order. Default: ``torch.contiguous_format``.
	is_pinnedz7
Returns true if this tensor resides in pinned memory.
is_floating_pointzk
is_floating_point() -> bool

Returns True if the data type of :attr:`self` is a floating point data type.

is_complexz]
is_complex() -> bool

Returns True if the data type of :attr:`self` is a complex data type.
is_inferencez8
is_inference() -> bool

See :func:`torch.is_inference`
is_conjzV
is_conj() -> bool

Returns True if the conjugate bit of :attr:`self` is set to true.
is_negzT
is_neg() -> bool

Returns True if the negative bit of :attr:`self` is set to true.
	is_signedz[
is_signed() -> bool

Returns True if the data type of :attr:`self` is a signed data type.
	is_set_toz
is_set_to(tensor) -> bool

Returns True if both tensors are pointing to the exact same memory (same
storage, offset, size and stride).
itema  
item() -> number

Returns the value of this tensor as a standard Python number. This only works
for tensors with one element. For other cases, see :meth:`~Tensor.tolist`.

This operation is not differentiable.

Example::

    >>> x = torch.tensor([1.0])
    >>> x.item()
    1.0

kronz/
kron(other) -> Tensor

See :func:`torch.kron`
kthvaluezZ
kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor)

See :func:`torch.kthvalue`
ldexpz1
ldexp(other) -> Tensor

See :func:`torch.ldexp`
ldexp_zD
ldexp_(other) -> Tensor

In-place version of :meth:`~Tensor.ldexp`
lcmz-
lcm(other) -> Tensor

See :func:`torch.lcm`
lcm_z@
lcm_(other) -> Tensor

In-place version of :meth:`~Tensor.lcm`
lez,
le(other) -> Tensor

See :func:`torch.le`.
le_z?
le_(other) -> Tensor

In-place version of :meth:`~Tensor.le`.

less_equalz<
less_equal(other) -> Tensor

See :func:`torch.less_equal`.
less_equal_zO
less_equal_(other) -> Tensor

In-place version of :meth:`~Tensor.less_equal`.
lerpz5
lerp(end, weight) -> Tensor

See :func:`torch.lerp`
lerp_zH
lerp_(end, weight) -> Tensor

In-place version of :meth:`~Tensor.lerp`
lgammaz.
lgamma() -> Tensor

See :func:`torch.lgamma`
lgamma_zA
lgamma_() -> Tensor

In-place version of :meth:`~Tensor.lgamma`
logz(
log() -> Tensor

See :func:`torch.log`
log_z;
log_() -> Tensor

In-place version of :meth:`~Tensor.log`
log10z,
log10() -> Tensor

See :func:`torch.log10`
log10_z?
log10_() -> Tensor

In-place version of :meth:`~Tensor.log10`
log1pz,
log1p() -> Tensor

See :func:`torch.log1p`
log1p_z?
log1p_() -> Tensor

In-place version of :meth:`~Tensor.log1p`
log2z*
log2() -> Tensor

See :func:`torch.log2`
log2_z=
log2_() -> Tensor

In-place version of :meth:`~Tensor.log2`
	logaddexpz9
logaddexp(other) -> Tensor

See :func:`torch.logaddexp`

logaddexp2z;
logaddexp2(other) -> Tensor

See :func:`torch.logaddexp2`
log_normal_a  
log_normal_(mean=1, std=2, *, generator=None)

Fills :attr:`self` tensor with numbers samples from the log-normal distribution
parameterized by the given mean :math:`\mu` and standard deviation
:math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and
standard deviation of the underlying normal distribution, and not of the
returned distribution:

.. math::

    f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}}
	logsumexpzF
logsumexp(dim, keepdim=False) -> Tensor

See :func:`torch.logsumexp`
ltz,
lt(other) -> Tensor

See :func:`torch.lt`.
lt_z?
lt_(other) -> Tensor

In-place version of :meth:`~Tensor.lt`.
lessz.
lt(other) -> Tensor

See :func:`torch.less`.
less_zC
less_(other) -> Tensor

In-place version of :meth:`~Tensor.less`.
lu_solvezD
lu_solve(LU_data, LU_pivots) -> Tensor

See :func:`torch.lu_solve`
map_ab  
map_(tensor, callable)

Applies :attr:`callable` for each element in :attr:`self` tensor and the given
:attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and
the given :attr:`tensor` must be :ref:`broadcastable <broadcasting-semantics>`.

The :attr:`callable` should have the signature::

    def callable(a, b) -> number
masked_scatter_a-  
masked_scatter_(mask, source)

Copies elements from :attr:`source` into :attr:`self` tensor at positions where
the :attr:`mask` is True. Elements from :attr:`source` are copied into :attr:`self`
starting at position 0 of :attr:`source` and continuing in order one-by-one for each
occurrence of :attr:`mask` being True.
The shape of :attr:`mask` must be :ref:`broadcastable <broadcasting-semantics>`
with the shape of the underlying tensor. The :attr:`source` should have at least
as many elements as the number of ones in :attr:`mask`.

Args:
    mask (BoolTensor): the boolean mask
    source (Tensor): the tensor to copy from

.. note::

    The :attr:`mask` operates on the :attr:`self` tensor, not on the given
    :attr:`source` tensor.

Example:

    >>> self = torch.tensor([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
    >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
    >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
    >>> self.masked_scatter_(mask, source)
    tensor([[0, 0, 0, 0, 1],
            [2, 3, 0, 4, 5]])

masked_fill_aF  
masked_fill_(mask, value)

Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is
True. The shape of :attr:`mask` must be
:ref:`broadcastable <broadcasting-semantics>` with the shape of the underlying
tensor.

Args:
    mask (BoolTensor): the boolean mask
    value (float): the value to fill in with
masked_selectz@
masked_select(mask) -> Tensor

See :func:`torch.masked_select`
matrix_powerz
matrix_power(n) -> Tensor

.. note:: :meth:`~Tensor.matrix_power` is deprecated, use :func:`torch.linalg.matrix_power` instead.

Alias for :func:`torch.linalg.matrix_power`

matrix_expz6
matrix_exp() -> Tensor

See :func:`torch.matrix_exp`
maxzS
max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)

See :func:`torch.max`
amaxzA
amax(dim=None, keepdim=False) -> Tensor

See :func:`torch.amax`
maximumz5
maximum(other) -> Tensor

See :func:`torch.maximum`
fmaxz/
fmax(other) -> Tensor

See :func:`torch.fmax`
argmaxzI
argmax(dim=None, keepdim=False) -> LongTensor

See :func:`torch.argmax`
argwherez2
argwhere() -> Tensor

See :func:`torch.argwhere`
meanzP
mean(dim=None, keepdim=False, *, dtype=None) -> Tensor

See :func:`torch.mean`
nanmeanzV
nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor

See :func:`torch.nanmean`
medianzS
median(dim=None, keepdim=False) -> (Tensor, LongTensor)

See :func:`torch.median`
	nanmedianzY
nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor)

See :func:`torch.nanmedian`
minzS
min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor)

See :func:`torch.min`
aminzA
amin(dim=None, keepdim=False) -> Tensor

See :func:`torch.amin`
minimumz5
minimum(other) -> Tensor

See :func:`torch.minimum`
aminmaxz\
aminmax(*, dim=None, keepdim=False) -> (Tensor min, Tensor max)

See :func:`torch.aminmax`
fminz/
fmin(other) -> Tensor

See :func:`torch.fmin`
argminzI
argmin(dim=None, keepdim=False) -> LongTensor

See :func:`torch.argmin`
mmz*
mm(mat2) -> Tensor

See :func:`torch.mm`
modezO
mode(dim=None, keepdim=False) -> (Tensor, LongTensor)

See :func:`torch.mode`
movedimzC
movedim(source, destination) -> Tensor

See :func:`torch.movedim`
moveaxiszE
moveaxis(source, destination) -> Tensor

See :func:`torch.moveaxis`
mulz.
mul(value) -> Tensor

See :func:`torch.mul`.
mul_zA
mul_(value) -> Tensor

In-place version of :meth:`~Tensor.mul`.
multiplyz8
multiply(value) -> Tensor

See :func:`torch.multiply`.
	multiply_zK
multiply_(value) -> Tensor

In-place version of :meth:`~Tensor.multiply`.
multinomialzi
multinomial(num_samples, replacement=False, *, generator=None) -> Tensor

See :func:`torch.multinomial`
mvz)
mv(vec) -> Tensor

See :func:`torch.mv`
mvlgammaz3
mvlgamma(p) -> Tensor

See :func:`torch.mvlgamma`
	mvlgamma_zF
mvlgamma_(p) -> Tensor

In-place version of :meth:`~Tensor.mvlgamma`
narrowzG
narrow(dimension, start, length) -> Tensor

See :func:`torch.narrow`.
narrow_copyzQ
narrow_copy(dimension, start, length) -> Tensor

See :func:`torch.narrow_copy`.

ndimensionz6
ndimension() -> int

Alias for :meth:`~Tensor.dim()`

nan_to_numzX
nan_to_num(nan=0.0, posinf=None, neginf=None) -> Tensor

See :func:`torch.nan_to_num`.
nan_to_num_zk
nan_to_num_(nan=0.0, posinf=None, neginf=None) -> Tensor

In-place version of :meth:`~Tensor.nan_to_num`.
nez,
ne(other) -> Tensor

See :func:`torch.ne`.
ne_z?
ne_(other) -> Tensor

In-place version of :meth:`~Tensor.ne`.
	not_equalz:
not_equal(other) -> Tensor

See :func:`torch.not_equal`.

not_equal_zM
not_equal_(other) -> Tensor

In-place version of :meth:`~Tensor.not_equal`.
negz(
neg() -> Tensor

See :func:`torch.neg`
negativez2
negative() -> Tensor

See :func:`torch.negative`
neg_z;
neg_() -> Tensor

In-place version of :meth:`~Tensor.neg`
	negative_zE
negative_() -> Tensor

In-place version of :meth:`~Tensor.negative`
nelementz4
nelement() -> int

Alias for :meth:`~Tensor.numel`
	nextafterz8
nextafter(other) -> Tensor
See :func:`torch.nextafter`

nextafter_zK
nextafter_(other) -> Tensor
In-place version of :meth:`~Tensor.nextafter`
nonzeroz4
nonzero() -> LongTensor

See :func:`torch.nonzero`
nonzero_statica  
nonzero_static(input, *, size, fill_value=-1) -> Tensor

Returns a 2-D tensor where each row is the index for a non-zero value.
The returned Tensor has the same `torch.dtype` as `torch.nonzero()`.

Args:
    input (Tensor): the input tensor to count non-zero elements.

Keyword args:
    size (int): the size of non-zero elements expected to be included in the out
        tensor. Pad the out tensor with `fill_value` if the `size` is larger
        than total number of non-zero elements, truncate out tensor if `size`
        is smaller. The size must be a non-negative integer.
    fill_value (int): the value to fill the output tensor with when `size` is larger
        than the total number of non-zero elements. Default is `-1` to represent
        invalid index.

Example:

    # Example 1: Padding
    >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
    >>> static_size = 4
    >>> t = torch.nonzero_static(input_tensor, size = static_size)
    tensor([[  0,   0],
            [  1,   0],
            [  1,   1],
            [  -1, -1]], dtype=torch.int64)

    # Example 2: Truncating
    >>> input_tensor = torch.tensor([[1, 0], [3, 2]])
    >>> static_size = 2
    >>> t = torch.nonzero_static(input_tensor, size = static_size)
    tensor([[  0,   0],
            [  1,   0]], dtype=torch.int64)

    # Example 3: 0 size
    >>> input_tensor = torch.tensor([10])
    >>> static_size = 0
    >>> t = torch.nonzero_static(input_tensor, size = static_size)
    tensor([], size=(0, 1), dtype=torch.int64)

    # Example 4: 0 rank input
    >>> input_tensor = torch.tensor(10)
    >>> static_size = 2
    >>> t = torch.nonzero_static(input_tensor, size = static_size)
    tensor([], size=(2, 0), dtype=torch.int64)
normzF
norm(p=2, dim=None, keepdim=False) -> Tensor

See :func:`torch.norm`
normal_z
normal_(mean=0, std=1, *, generator=None) -> Tensor

Fills :attr:`self` tensor with elements samples from the normal distribution
parameterized by :attr:`mean` and :attr:`std`.
numelz)
numel() -> int

See :func:`torch.numel`
numpya  
numpy(*, force=False) -> numpy.ndarray

Returns the tensor as a NumPy :class:`ndarray`.

If :attr:`force` is ``False`` (the default), the conversion
is performed only if the tensor is on the CPU, does not require grad,
does not have its conjugate bit set, and is a dtype and layout that
NumPy supports. The returned ndarray and the tensor will share their
storage, so changes to the tensor will be reflected in the ndarray
and vice versa.

If :attr:`force` is ``True`` this is equivalent to
calling ``t.detach().cpu().resolve_conj().resolve_neg().numpy()``.
If the tensor isn't on the CPU or the conjugate or negative bit is set,
the tensor won't share its storage with the returned ndarray.
Setting :attr:`force` to ``True`` can be a useful shorthand.

Args:
    force (bool): if ``True``, the ndarray may be a copy of the tensor
               instead of always sharing memory, defaults to ``False``.
orgqrz2
orgqr(input2) -> Tensor

See :func:`torch.orgqr`
ormqrzV
ormqr(input2, input3, left=True, transpose=False) -> Tensor

See :func:`torch.ormqr`
permutez5
permute(*dims) -> Tensor

See :func:`torch.permute`
	polygammaz5
polygamma(n) -> Tensor

See :func:`torch.polygamma`

polygamma_zH
polygamma_(n) -> Tensor

In-place version of :meth:`~Tensor.polygamma`
positivez2
positive() -> Tensor

See :func:`torch.positive`
powz0
pow(exponent) -> Tensor

See :func:`torch.pow`
pow_zC
pow_(exponent) -> Tensor

In-place version of :meth:`~Tensor.pow`
float_powerz@
float_power(exponent) -> Tensor

See :func:`torch.float_power`
float_power_zS
float_power_(exponent) -> Tensor

In-place version of :meth:`~Tensor.float_power`
prodzM
prod(dim=None, keepdim=False, dtype=None) -> Tensor

See :func:`torch.prod`
put_a  
put_(index, source, accumulate=False) -> Tensor

Copies the elements from :attr:`source` into the positions specified by
:attr:`index`. For the purpose of indexing, the :attr:`self` tensor is treated as if
it were a 1-D tensor.

:attr:`index` and :attr:`source` need to have the same number of elements, but not necessarily
the same shape.

If :attr:`accumulate` is ``True``, the elements in :attr:`source` are added to
:attr:`self`. If accumulate is ``False``, the behavior is undefined if :attr:`index`
contain duplicate elements.

Args:
    index (LongTensor): the indices into self
    source (Tensor): the tensor containing values to copy from
    accumulate (bool): whether to accumulate into self

Example::

    >>> src = torch.tensor([[4, 3, 5],
    ...                     [6, 7, 8]])
    >>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10]))
    tensor([[  4,   9,   5],
            [ 10,   7,   8]])
putz
put(input, index, source, accumulate=False) -> Tensor

Out-of-place version of :meth:`torch.Tensor.put_`.
`input` corresponds to `self` in :meth:`torch.Tensor.put_`.
qrz9
qr(some=True) -> (Tensor, Tensor)

See :func:`torch.qr`
qschemezQ
qscheme() -> torch.qscheme

Returns the quantization scheme of a given QTensor.
quantilezg
quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor

See :func:`torch.quantile`
nanquantilezm
nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor

See :func:`torch.nanquantile`
q_scalez
q_scale() -> float

Given a Tensor quantized by linear(affine) quantization,
returns the scale of the underlying quantizer().
q_zero_pointz
q_zero_point() -> int

Given a Tensor quantized by linear(affine) quantization,
returns the zero_point of the underlying quantizer().
q_per_channel_scalesa  
q_per_channel_scales() -> Tensor

Given a Tensor quantized by linear (affine) per-channel quantization,
returns a Tensor of scales of the underlying quantizer. It has the number of
elements that matches the corresponding dimensions (from q_per_channel_axis) of
the tensor.
q_per_channel_zero_pointsa  
q_per_channel_zero_points() -> Tensor

Given a Tensor quantized by linear (affine) per-channel quantization,
returns a tensor of zero_points of the underlying quantizer. It has the number of
elements that matches the corresponding dimensions (from q_per_channel_axis) of
the tensor.
q_per_channel_axisz
q_per_channel_axis() -> int

Given a Tensor quantized by linear (affine) per-channel quantization,
returns the index of dimension on which per-channel quantization is applied.
random_a  
random_(from=0, to=None, *, generator=None) -> Tensor

Fills :attr:`self` tensor with numbers sampled from the discrete uniform
distribution over ``[from, to - 1]``. If not specified, the values are usually
only bounded by :attr:`self` tensor's data type. However, for floating point
types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every
value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()`
will be uniform in ``[0, 2^53]``.
rad2degz0
rad2deg() -> Tensor

See :func:`torch.rad2deg`
rad2deg_zC
rad2deg_() -> Tensor

In-place version of :meth:`~Tensor.rad2deg`
deg2radz0
deg2rad() -> Tensor

See :func:`torch.deg2rad`
deg2rad_zC
deg2rad_() -> Tensor

In-place version of :meth:`~Tensor.deg2rad`
ravelz,
ravel() -> Tensor

see :func:`torch.ravel`

reciprocalz6
reciprocal() -> Tensor

See :func:`torch.reciprocal`
reciprocal_zI
reciprocal_() -> Tensor

In-place version of :meth:`~Tensor.reciprocal`
record_streamah  
record_stream(stream)

Marks the tensor as having been used by this stream.  When the tensor
is deallocated, ensure the tensor memory is not reused for another tensor
until all work queued on :attr:`stream` at the time of deallocation is
complete.

.. note::

    The caching allocator is aware of only the stream where a tensor was
    allocated. Due to the awareness, it already correctly manages the life
    cycle of tensors on only one stream. But if a tensor is used on a stream
    different from the stream of origin, the allocator might reuse the memory
    unexpectedly. Calling this method lets the allocator know which streams
    have used the tensor.

.. warning::

    This method is most suitable for use cases where you are providing a
    function that created a tensor on a side stream, and want users to be able
    to make use of the tensor without having to think carefully about stream
    safety when making use of them.  These safety guarantees come at some
    performance and predictability cost (analogous to the tradeoff between GC
    and manual memory management), so if you are in a situation where
    you manage the full lifetime of your tensors, you may consider instead
    manually managing CUDA events so that calling this method is not necessary.
    In particular, when you call this method, on later allocations the
    allocator will poll the recorded stream to see if all operations have
    completed yet; you can potentially race with side stream computation and
    non-deterministically reuse or fail to reuse memory for an allocation.

    You can safely use tensors allocated on side streams without
    :meth:`~Tensor.record_stream`; you must manually ensure that
    any non-creation stream uses of a tensor are synced back to the creation
    stream before you deallocate the tensor.  As the CUDA caching allocator
    guarantees that the memory will only be reused with the same creation stream,
    this is sufficient to ensure that writes to future reallocations of the
    memory will be delayed until non-creation stream uses are done.
    (Counterintuitively, you may observe that on the CPU side we have already
    reallocated the tensor, even though CUDA kernels on the old tensor are
    still in progress.  This is fine, because CUDA operations on the new
    tensor will appropriately wait for the old operations to complete, as they
    are all on the same stream.)

    Concretely, this looks like this::

        with torch.cuda.stream(s0):
            x = torch.zeros(N)

        s1.wait_stream(s0)
        with torch.cuda.stream(s1):
            y = some_comm_op(x)

        ... some compute on s0 ...

        # synchronize creation stream s0 to side stream s1
        # before deallocating x
        s0.wait_stream(s1)
        del x

    Note that some discretion is required when deciding when to perform
    ``s0.wait_stream(s1)``.  In particular, if we were to wait immediately
    after ``some_comm_op``, there wouldn't be any point in having the side
    stream; it would be equivalent to have run ``some_comm_op`` on ``s0``.
    Instead, the synchronization must be placed at some appropriate, later
    point in time where you expect the side stream ``s1`` to have finished
    work.  This location is typically identified via profiling, e.g., using
    Chrome traces produced
    :meth:`torch.autograd.profiler.profile.export_chrome_trace`.  If you
    place the wait too early, work on s0 will block until ``s1`` has finished,
    preventing further overlapping of communication and computation.  If you
    place the wait too late, you will use more memory than is strictly
    necessary (as you are keeping ``x`` live for longer.)  For a concrete
    example of how this guidance can be applied in practice, see this post:
    `FSDP and CUDACachingAllocator
    <https://dev-discuss.pytorch.org/t/fsdp-cudacachingallocator-an-outsider-newb-perspective/1486>`_.
	remainderz;
remainder(divisor) -> Tensor

See :func:`torch.remainder`

remainder_zN
remainder_(divisor) -> Tensor

In-place version of :meth:`~Tensor.remainder`
renormz=
renorm(p, dim, maxnorm) -> Tensor

See :func:`torch.renorm`
renorm_zP
renorm_(p, dim, maxnorm) -> Tensor

In-place version of :meth:`~Tensor.renorm`
repeata  
repeat(*repeats) -> Tensor

Repeats this tensor along the specified dimensions.

Unlike :meth:`~Tensor.expand`, this function copies the tensor's data.

.. warning::

    :meth:`~Tensor.repeat` behaves differently from
    `numpy.repeat <https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html>`_,
    but is more similar to
    `numpy.tile <https://docs.scipy.org/doc/numpy/reference/generated/numpy.tile.html>`_.
    For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`.

Args:
    repeat (torch.Size, int..., tuple of int or list of int): The number of times to repeat this tensor along each dimension

Example::

    >>> x = torch.tensor([1, 2, 3])
    >>> x.repeat(4, 2)
    tensor([[ 1,  2,  3,  1,  2,  3],
            [ 1,  2,  3,  1,  2,  3],
            [ 1,  2,  3,  1,  2,  3],
            [ 1,  2,  3,  1,  2,  3]])
    >>> x.repeat(4, 2, 1).size()
    torch.Size([4, 2, 3])
repeat_interleavezk
repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor

See :func:`torch.repeat_interleave`.
requires_grad_a  
requires_grad_(requires_grad=True) -> Tensor

Change if autograd should record operations on this tensor: sets this tensor's
:attr:`requires_grad` attribute in-place. Returns this tensor.

:func:`requires_grad_`'s main use case is to tell autograd to begin recording
operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False``
(because it was obtained through a DataLoader, or required preprocessing or
initialization), ``tensor.requires_grad_()`` makes it so that autograd will
begin to record operations on ``tensor``.

Args:
    requires_grad (bool): If autograd should record operations on this tensor.
        Default: ``True``.

Example::

    >>> # Let's say we want to preprocess some saved weights and use
    >>> # the result as new weights.
    >>> saved_weights = [0.1, 0.2, 0.3, 0.25]
    >>> loaded_weights = torch.tensor(saved_weights)
    >>> weights = preprocess(loaded_weights)  # some function
    >>> weights
    tensor([-0.5503,  0.4926, -2.1158, -0.8303])

    >>> # Now, start to record operations done to weights
    >>> weights.requires_grad_()
    >>> out = weights.pow(2).sum()
    >>> out.backward()
    >>> weights.grad
    tensor([-1.1007,  0.9853, -4.2316, -1.6606])

reshapeax  
reshape(*shape) -> Tensor

Returns a tensor with the same data and number of elements as :attr:`self`
but with the specified shape. This method returns a view if :attr:`shape` is
compatible with the current shape. See :meth:`torch.Tensor.view` on when it is
possible to return a view.

See :func:`torch.reshape`

Args:
    shape (tuple of ints or int...): the desired shape


reshape_asa  
reshape_as(other) -> Tensor

Returns this tensor as the same shape as :attr:`other`.
``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``.
This method returns a view if ``other.sizes()`` is compatible with the current
shape. See :meth:`torch.Tensor.view` on when it is possible to return a view.

Please see :meth:`reshape` for more information about ``reshape``.

Args:
    other (:class:`torch.Tensor`): The result tensor has the same shape
        as :attr:`other`.
resize_a  
resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor

Resizes :attr:`self` tensor to the specified size. If the number of elements is
larger than the current storage size, then the underlying storage is resized
to fit the new number of elements. If the number of elements is smaller, the
underlying storage is not changed. Existing elements are preserved but any new
memory is uninitialized.

.. warning::

    This is a low-level method. The storage is reinterpreted as C-contiguous,
    ignoring the current strides (unless the target size equals the current
    size, in which case the tensor is left unchanged). For most purposes, you
    will instead want to use :meth:`~Tensor.view()`, which checks for
    contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To
    change the size in-place with custom strides, see :meth:`~Tensor.set_()`.

.. note::

    If :func:`torch.use_deterministic_algorithms()` and
    :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
    ``True``, new elements are initialized to prevent nondeterministic behavior
    from using the result as an input to an operation. Floating point and
    complex values are set to NaN, and integer values are set to the maximum
    value.

Args:
    sizes (torch.Size or int...): the desired size
    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
        Tensor. Default: ``torch.contiguous_format``. Note that memory format of
        :attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``.

Example::

    >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]])
    >>> x.resize_(2, 2)
    tensor([[ 1,  2],
            [ 3,  4]])

resize_as_a  
resize_as_(tensor, memory_format=torch.contiguous_format) -> Tensor

Resizes the :attr:`self` tensor to be the same size as the specified
:attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``.

Args:
    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
        Tensor. Default: ``torch.contiguous_format``. Note that memory format of
        :attr:`self` is going to be unaffected if ``self.size()`` matches ``tensor.size()``.

rot90z3
rot90(k, dims) -> Tensor

See :func:`torch.rot90`
roundz6
round(decimals=0) -> Tensor

See :func:`torch.round`
round_zI
round_(decimals=0) -> Tensor

In-place version of :meth:`~Tensor.round`
rsqrtz,
rsqrt() -> Tensor

See :func:`torch.rsqrt`
rsqrt_z?
rsqrt_() -> Tensor

In-place version of :meth:`~Tensor.rsqrt`
scatter_a  
scatter_(dim, index, src, *, reduce=None) -> Tensor

Writes all values from the tensor :attr:`src` into :attr:`self` at the indices
specified in the :attr:`index` tensor. For each value in :attr:`src`, its output
index is specified by its index in :attr:`src` for ``dimension != dim`` and by
the corresponding value in :attr:`index` for ``dimension = dim``.

For a 3-D tensor, :attr:`self` is updated as::

    self[index[i][j][k]][j][k] = src[i][j][k]  # if dim == 0
    self[i][index[i][j][k]][k] = src[i][j][k]  # if dim == 1
    self[i][j][index[i][j][k]] = src[i][j][k]  # if dim == 2

This is the reverse operation of the manner described in :meth:`~Tensor.gather`.

:attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have
the same number of dimensions. It is also required that
``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
Note that ``index`` and ``src`` do not broadcast.

Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be
between ``0`` and ``self.size(dim) - 1`` inclusive.

.. warning::

    When indices are not unique, the behavior is non-deterministic (one of the
    values from ``src`` will be picked arbitrarily) and the gradient will be
    incorrect (it will be propagated to all locations in the source that
    correspond to the same index)!

.. note::

    The backward pass is implemented only for ``src.shape == index.shape``.

Additionally accepts an optional :attr:`reduce` argument that allows
specification of an optional reduction operation, which is applied to all
values in the tensor :attr:`src` into :attr:`self` at the indices
specified in the :attr:`index`. For each value in :attr:`src`, the reduction
operation is applied to an index in :attr:`self` which is specified by
its index in :attr:`src` for ``dimension != dim`` and by the corresponding
value in :attr:`index` for ``dimension = dim``.

Given a 3-D tensor and reduction using the multiplication operation, :attr:`self`
is updated as::

    self[index[i][j][k]][j][k] *= src[i][j][k]  # if dim == 0
    self[i][index[i][j][k]][k] *= src[i][j][k]  # if dim == 1
    self[i][j][index[i][j][k]] *= src[i][j][k]  # if dim == 2

Reducing with the addition operation is the same as using
:meth:`~torch.Tensor.scatter_add_`.

.. warning::
    The reduce argument with Tensor ``src`` is deprecated and will be removed in
    a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_`
    instead for more reduction options.

Args:
    dim (int): the axis along which to index
    index (LongTensor): the indices of elements to scatter, can be either empty
        or of the same dimensionality as ``src``. When empty, the operation
        returns ``self`` unchanged.
    src (Tensor): the source element(s) to scatter.

Keyword args:
    reduce (str, optional): reduction operation to apply, can be either
        ``'add'`` or ``'multiply'``.

Example::

    >>> src = torch.arange(1, 11).reshape((2, 5))
    >>> src
    tensor([[ 1,  2,  3,  4,  5],
            [ 6,  7,  8,  9, 10]])
    >>> index = torch.tensor([[0, 1, 2, 0]])
    >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
    tensor([[1, 0, 0, 4, 0],
            [0, 2, 0, 0, 0],
            [0, 0, 3, 0, 0]])
    >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
    >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
    tensor([[1, 2, 3, 0, 0],
            [6, 7, 0, 0, 8],
            [0, 0, 0, 0, 0]])

    >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
    ...            1.23, reduce='multiply')
    tensor([[2.0000, 2.0000, 2.4600, 2.0000],
            [2.0000, 2.0000, 2.0000, 2.4600]])
    >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
    ...            1.23, reduce='add')
    tensor([[2.0000, 2.0000, 3.2300, 2.0000],
            [2.0000, 2.0000, 2.0000, 3.2300]])

.. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor:
   :noindex:

Writes the value from :attr:`value` into :attr:`self` at the indices
specified in the :attr:`index` tensor.  This operation is equivalent to the previous version,
with the :attr:`src` tensor filled entirely with :attr:`value`.

Args:
    dim (int): the axis along which to index
    index (LongTensor): the indices of elements to scatter, can be either empty
        or of the same dimensionality as ``src``. When empty, the operation
        returns ``self`` unchanged.
    value (Scalar): the value to scatter.

Keyword args:
    reduce (str, optional): reduction operation to apply, can be either
        ``'add'`` or ``'multiply'``.

Example::

    >>> index = torch.tensor([[0, 1]])
    >>> value = 2
    >>> torch.zeros(3, 5).scatter_(0, index, value)
    tensor([[2., 0., 0., 0., 0.],
            [0., 2., 0., 0., 0.],
            [0., 0., 0., 0., 0.]])
scatter_add_ap  
scatter_add_(dim, index, src) -> Tensor

Adds all values from the tensor :attr:`src` into :attr:`self` at the indices
specified in the :attr:`index` tensor in a similar fashion as
:meth:`~torch.Tensor.scatter_`. For each value in :attr:`src`, it is added to
an index in :attr:`self` which is specified by its index in :attr:`src`
for ``dimension != dim`` and by the corresponding value in :attr:`index` for
``dimension = dim``.

For a 3-D tensor, :attr:`self` is updated as::

    self[index[i][j][k]][j][k] += src[i][j][k]  # if dim == 0
    self[i][index[i][j][k]][k] += src[i][j][k]  # if dim == 1
    self[i][j][index[i][j][k]] += src[i][j][k]  # if dim == 2

:attr:`self`, :attr:`index` and :attr:`src` should have same number of
dimensions. It is also required that ``index.size(d) <= src.size(d)`` for all
dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions
``d != dim``. Note that ``index`` and ``src`` do not broadcast.

Note:
    {forward_reproducibility_note}

.. note::

    The backward pass is implemented only for ``src.shape == index.shape``.

Args:
    dim (int): the axis along which to index
    index (LongTensor): the indices of elements to scatter and add, can be
        either empty or of the same dimensionality as ``src``. When empty, the
        operation returns ``self`` unchanged.
    src (Tensor): the source elements to scatter and add

Example::

    >>> src = torch.ones((2, 5))
    >>> index = torch.tensor([[0, 1, 2, 0, 0]])
    >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
    tensor([[1., 0., 0., 1., 1.],
            [0., 1., 0., 0., 0.],
            [0., 0., 1., 0., 0.]])
    >>> index = torch.tensor([[0, 1, 2, 0, 0], [0, 1, 2, 2, 2]])
    >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src)
    tensor([[2., 0., 0., 1., 1.],
            [0., 2., 0., 0., 0.],
            [0., 0., 2., 1., 1.]])

scatter_reduce_a	  
scatter_reduce_(dim, index, src, reduce, *, include_self=True) -> Tensor

Reduces all values from the :attr:`src` tensor to the indices specified in
the :attr:`index` tensor in the :attr:`self` tensor using the applied reduction
defined via the :attr:`reduce` argument (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`,
:obj:`"amax"`, :obj:`"amin"`). For each value in :attr:`src`, it is reduced to an
index in :attr:`self` which is specified by its index in :attr:`src` for
``dimension != dim`` and by the corresponding value in :attr:`index` for
``dimension = dim``. If :obj:`include_self="True"`, the values in the :attr:`self`
tensor are included in the reduction.

:attr:`self`, :attr:`index` and :attr:`src` should all have
the same number of dimensions. It is also required that
``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that
``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``.
Note that ``index`` and ``src`` do not broadcast.

For a 3-D tensor with :obj:`reduce="sum"` and :obj:`include_self=True` the
output is given as::

    self[index[i][j][k]][j][k] += src[i][j][k]  # if dim == 0
    self[i][index[i][j][k]][k] += src[i][j][k]  # if dim == 1
    self[i][j][index[i][j][k]] += src[i][j][k]  # if dim == 2

Note:
    {forward_reproducibility_note}

.. note::

    The backward pass is implemented only for ``src.shape == index.shape``.

.. warning::

    This function is in beta and may change in the near future.

Args:
    dim (int): the axis along which to index
    index (LongTensor): the indices of elements to scatter and reduce.
    src (Tensor): the source elements to scatter and reduce
    reduce (str): the reduction operation to apply for non-unique indices
        (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`)
    include_self (bool): whether elements from the :attr:`self` tensor are
        included in the reduction

Example::

    >>> src = torch.tensor([1., 2., 3., 4., 5., 6.])
    >>> index = torch.tensor([0, 1, 0, 1, 2, 1])
    >>> input = torch.tensor([1., 2., 3., 4.])
    >>> input.scatter_reduce(0, index, src, reduce="sum")
    tensor([5., 14., 8., 4.])
    >>> input.scatter_reduce(0, index, src, reduce="sum", include_self=False)
    tensor([4., 12., 5., 4.])
    >>> input2 = torch.tensor([5., 4., 3., 2.])
    >>> input2.scatter_reduce(0, index, src, reduce="amax")
    tensor([5., 6., 5., 2.])
    >>> input2.scatter_reduce(0, index, src, reduce="amax", include_self=False)
    tensor([3., 6., 5., 2.])


selectz8
select(dim, index) -> Tensor

See :func:`torch.select`
select_scatterzM
select_scatter(src, dim, index) -> Tensor

See :func:`torch.select_scatter`
slice_scatterzd
slice_scatter(src, dim=0, start=None, end=None, step=1) -> Tensor

See :func:`torch.slice_scatter`
set_a  
set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor

Sets the underlying storage, size, and strides. If :attr:`source` is a tensor,
:attr:`self` tensor will share the same storage and have the same size and
strides as :attr:`source`. Changes to elements in one tensor will be reflected
in the other.

If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying
storage, offset, size, and stride.

Args:
    source (Tensor or Storage): the tensor or storage to use
    storage_offset (int, optional): the offset in the storage
    size (torch.Size, optional): the desired size. Defaults to the size of the source.
    stride (tuple, optional): the desired stride. Defaults to C-contiguous strides.
sigmoidz0
sigmoid() -> Tensor

See :func:`torch.sigmoid`
sigmoid_zC
sigmoid_() -> Tensor

In-place version of :meth:`~Tensor.sigmoid`
logitz,
logit() -> Tensor

See :func:`torch.logit`
logit_z?
logit_() -> Tensor

In-place version of :meth:`~Tensor.logit`
signz*
sign() -> Tensor

See :func:`torch.sign`
sign_z=
sign_() -> Tensor

In-place version of :meth:`~Tensor.sign`
signbitz0
signbit() -> Tensor

See :func:`torch.signbit`
sgnz(
sgn() -> Tensor

See :func:`torch.sgn`
sgn_z;
sgn_() -> Tensor

In-place version of :meth:`~Tensor.sgn`
sinz(
sin() -> Tensor

See :func:`torch.sin`
sin_z;
sin_() -> Tensor

In-place version of :meth:`~Tensor.sin`
sincz*
sinc() -> Tensor

See :func:`torch.sinc`
sinc_z=
sinc_() -> Tensor

In-place version of :meth:`~Tensor.sinc`
sinhz*
sinh() -> Tensor

See :func:`torch.sinh`
sinh_z=
sinh_() -> Tensor

In-place version of :meth:`~Tensor.sinh`
sizea  
size(dim=None) -> torch.Size or int

Returns the size of the :attr:`self` tensor. If ``dim`` is not specified,
the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`.
If ``dim`` is specified, returns an int holding the size of that dimension.

Args:
  dim (int, optional): The dimension for which to retrieve the size.

Example::

    >>> t = torch.empty(3, 4, 5)
    >>> t.size()
    torch.Size([3, 4, 5])
    >>> t.size(dim=1)
    4

shapez
shape() -> torch.Size

Returns the size of the :attr:`self` tensor. Alias for :attr:`size`.

See also :meth:`Tensor.size`.

Example::

    >>> t = torch.empty(3, 4, 5)
    >>> t.size()
    torch.Size([3, 4, 5])
    >>> t.shape
    torch.Size([3, 4, 5])

sortzP
sort(dim=-1, descending=False) -> (Tensor, LongTensor)

See :func:`torch.sort`
msortz,
msort() -> Tensor

See :func:`torch.msort`
argsortzL
argsort(dim=-1, descending=False) -> LongTensor

See :func:`torch.argsort`

sparse_dima  
sparse_dim() -> int

Return the number of sparse dimensions in a :ref:`sparse tensor <sparse-docs>` :attr:`self`.

.. note::
  Returns ``0`` if :attr:`self` is not a sparse tensor.

See also :meth:`Tensor.dense_dim` and :ref:`hybrid tensors <sparse-hybrid-coo-docs>`.
sparse_resize_a<  
sparse_resize_(size, sparse_dim, dense_dim) -> Tensor

Resizes :attr:`self` :ref:`sparse tensor <sparse-docs>` to the desired
size and the number of sparse and dense dimensions.

.. note::
  If the number of specified elements in :attr:`self` is zero, then
  :attr:`size`, :attr:`sparse_dim`, and :attr:`dense_dim` can be any
  size and positive integers such that ``len(size) == sparse_dim +
  dense_dim``.

  If :attr:`self` specifies one or more elements, however, then each
  dimension in :attr:`size` must not be smaller than the corresponding
  dimension of :attr:`self`, :attr:`sparse_dim` must equal the number
  of sparse dimensions in :attr:`self`, and :attr:`dense_dim` must
  equal the number of dense dimensions in :attr:`self`.

.. warning::
  Throws an error if :attr:`self` is not a sparse tensor.

Args:
    size (torch.Size): the desired size. If :attr:`self` is non-empty
      sparse tensor, the desired size cannot be smaller than the
      original size.
    sparse_dim (int): the number of sparse dimensions
    dense_dim (int): the number of dense dimensions
sparse_resize_and_clear_a  
sparse_resize_and_clear_(size, sparse_dim, dense_dim) -> Tensor

Removes all specified elements from a :ref:`sparse tensor
<sparse-docs>` :attr:`self` and resizes :attr:`self` to the desired
size and the number of sparse and dense dimensions.

.. warning:
  Throws an error if :attr:`self` is not a sparse tensor.

Args:
    size (torch.Size): the desired size.
    sparse_dim (int): the number of sparse dimensions
    dense_dim (int): the number of dense dimensions
sqrtz*
sqrt() -> Tensor

See :func:`torch.sqrt`
sqrt_z=
sqrt_() -> Tensor

In-place version of :meth:`~Tensor.sqrt`
squarez.
square() -> Tensor

See :func:`torch.square`
square_zA
square_() -> Tensor

In-place version of :meth:`~Tensor.square`
squeezez8
squeeze(dim=None) -> Tensor

See :func:`torch.squeeze`
squeeze_zK
squeeze_(dim=None) -> Tensor

In-place version of :meth:`~Tensor.squeeze`
stdzP
std(dim=None, *, correction=1, keepdim=False) -> Tensor

See :func:`torch.std`
storage_offseta
  
storage_offset() -> int

Returns :attr:`self` tensor's offset in the underlying storage in terms of
number of storage elements (not bytes).

Example::

    >>> x = torch.tensor([1, 2, 3, 4, 5])
    >>> x.storage_offset()
    0
    >>> x[3:].storage_offset()
    3

untyped_storagez\
untyped_storage() -> torch.UntypedStorage

Returns the underlying :class:`UntypedStorage`.
strideaG  
stride(dim) -> tuple or int

Returns the stride of :attr:`self` tensor.

Stride is the jump necessary to go from one element to the next one in the
specified dimension :attr:`dim`. A tuple of all strides is returned when no
argument is passed in. Otherwise, an integer value is returned as the stride in
the particular dimension :attr:`dim`.

Args:
    dim (int, optional): the desired dimension in which stride is required

Example::

    >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
    >>> x.stride()
    (5, 1)
    >>> x.stride(0)
    5
    >>> x.stride(-1)
    1

subz:
sub(other, *, alpha=1) -> Tensor

See :func:`torch.sub`.
sub_zL
sub_(other, *, alpha=1) -> Tensor

In-place version of :meth:`~Tensor.sub`
subtractzD
subtract(other, *, alpha=1) -> Tensor

See :func:`torch.subtract`.
	subtract_zW
subtract_(other, *, alpha=1) -> Tensor

In-place version of :meth:`~Tensor.subtract`.
sumzK
sum(dim=None, keepdim=False, dtype=None) -> Tensor

See :func:`torch.sum`
nansumzQ
nansum(dim=None, keepdim=False, dtype=None) -> Tensor

See :func:`torch.nansum`
svdzT
svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor)

See :func:`torch.svd`
swapdimsz<
swapdims(dim0, dim1) -> Tensor

See :func:`torch.swapdims`
	swapdims_zO
swapdims_(dim0, dim1) -> Tensor

In-place version of :meth:`~Tensor.swapdims`
swapaxesz>
swapaxes(axis0, axis1) -> Tensor

See :func:`torch.swapaxes`
	swapaxes_zQ
swapaxes_(axis0, axis1) -> Tensor

In-place version of :meth:`~Tensor.swapaxes`
tz$
t() -> Tensor

See :func:`torch.t`
t_z7
t_() -> Tensor

In-place version of :meth:`~Tensor.t`
tilez.
tile(dims) -> Tensor

See :func:`torch.tile`
toa	  
to(*args, **kwargs) -> Tensor

Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are
inferred from the arguments of ``self.to(*args, **kwargs)``.

.. note::

    If the ``self`` Tensor already
    has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned.
    Otherwise, the returned tensor is a copy of ``self`` with the desired
    :class:`torch.dtype` and :class:`torch.device`.

Here are the ways to call ``to``:

.. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
   :noindex:

    Returns a Tensor with the specified :attr:`dtype`

    Args:
        {memory_format}

.. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor
   :noindex:

    Returns a Tensor with the specified :attr:`device` and (optional)
    :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``.
    When :attr:`non_blocking`, tries to convert asynchronously with respect to
    the host if possible, e.g., converting a CPU Tensor with pinned memory to a
    CUDA Tensor.
    When :attr:`copy` is set, a new Tensor is created even when the Tensor
    already matches the desired conversion.

    Args:
        {memory_format}

.. method:: to(other, non_blocking=False, copy=False) -> Tensor
   :noindex:

    Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as
    the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert
    asynchronously with respect to the host if possible, e.g., converting a CPU
    Tensor with pinned memory to a CUDA Tensor.
    When :attr:`copy` is set, a new Tensor is created even when the Tensor
    already matches the desired conversion.

Example::

    >>> tensor = torch.randn(2, 2)  # Initially dtype=float32, device=cpu
    >>> tensor.to(torch.float64)
    tensor([[-0.5044,  0.0005],
            [ 0.3310, -0.0584]], dtype=torch.float64)

    >>> cuda0 = torch.device('cuda:0')
    >>> tensor.to(cuda0)
    tensor([[-0.5044,  0.0005],
            [ 0.3310, -0.0584]], device='cuda:0')

    >>> tensor.to(cuda0, dtype=torch.float64)
    tensor([[-0.5044,  0.0005],
            [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')

    >>> other = torch.randn((), dtype=torch.float64, device=cuda0)
    >>> tensor.to(other, non_blocking=True)
    tensor([[-0.5044,  0.0005],
            [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0')
bytez
byte(memory_format=torch.preserve_format) -> Tensor

``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`.

Args:
    {memory_format}
boolz
bool(memory_format=torch.preserve_format) -> Tensor

``self.bool()`` is equivalent to ``self.to(torch.bool)``. See :func:`to`.

Args:
    {memory_format}
charz
char(memory_format=torch.preserve_format) -> Tensor

``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`.

Args:
    {memory_format}
bfloat16z
bfloat16(memory_format=torch.preserve_format) -> Tensor
``self.bfloat16()`` is equivalent to ``self.to(torch.bfloat16)``. See :func:`to`.

Args:
    {memory_format}
doublez
double(memory_format=torch.preserve_format) -> Tensor

``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`.

Args:
    {memory_format}
floatz
float(memory_format=torch.preserve_format) -> Tensor

``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`.

Args:
    {memory_format}
cdoublez
cdouble(memory_format=torch.preserve_format) -> Tensor

``self.cdouble()`` is equivalent to ``self.to(torch.complex128)``. See :func:`to`.

Args:
    {memory_format}
cfloatz
cfloat(memory_format=torch.preserve_format) -> Tensor

``self.cfloat()`` is equivalent to ``self.to(torch.complex64)``. See :func:`to`.

Args:
    {memory_format}
chalfz
chalf(memory_format=torch.preserve_format) -> Tensor

``self.chalf()`` is equivalent to ``self.to(torch.complex32)``. See :func:`to`.

Args:
     {memory_format}
 halfz
half(memory_format=torch.preserve_format) -> Tensor

``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`.

Args:
    {memory_format}
intz
int(memory_format=torch.preserve_format) -> Tensor

``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`.

Args:
    {memory_format}
int_reprz
int_repr() -> Tensor

Given a quantized Tensor,
``self.int_repr()`` returns a CPU Tensor with uint8_t as data type that stores the
underlying uint8_t values of the given Tensor.
longz
long(memory_format=torch.preserve_format) -> Tensor

``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`.

Args:
    {memory_format}
shortz
short(memory_format=torch.preserve_format) -> Tensor

``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`.

Args:
    {memory_format}
takez1
take(indices) -> Tensor

See :func:`torch.take`
take_along_dimzJ
take_along_dim(indices, dim) -> Tensor

See :func:`torch.take_along_dim`
tanz(
tan() -> Tensor

See :func:`torch.tan`
tan_z;
tan_() -> Tensor

In-place version of :meth:`~Tensor.tan`
tanhz*
tanh() -> Tensor

See :func:`torch.tanh`
softmaxzH
softmax(dim) -> Tensor

Alias for :func:`torch.nn.functional.softmax`.
tanh_z=
tanh_() -> Tensor

In-place version of :meth:`~Tensor.tanh`
tolista  
tolist() -> list or number

Returns the tensor as a (nested) list. For scalars, a standard
Python number is returned, just like with :meth:`~Tensor.item`.
Tensors are automatically moved to the CPU first if necessary.

This operation is not differentiable.

Examples::

    >>> a = torch.randn(2, 2)
    >>> a.tolist()
    [[0.012766935862600803, 0.5415473580360413],
     [-0.08909505605697632, 0.7729271650314331]]
    >>> a[0,0].tolist()
    0.012766935862600803
topkz^
topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor)

See :func:`torch.topk`
to_densea  
to_dense(dtype=None, *, masked_grad=True) -> Tensor

Creates a strided copy of :attr:`self` if :attr:`self` is not a strided tensor, otherwise returns :attr:`self`.

Keyword args:
    {dtype}
    masked_grad (bool, optional): If set to ``True`` (default) and
      :attr:`self` has a sparse layout then the backward of
      :meth:`to_dense` returns ``grad.sparse_mask(self)``.

Example::

    >>> s = torch.sparse_coo_tensor(
    ...        torch.tensor([[1, 1],
    ...                      [0, 2]]),
    ...        torch.tensor([9, 10]),
    ...        size=(3, 3))
    >>> s.to_dense()
    tensor([[ 0,  0,  0],
            [ 9,  0, 10],
            [ 0,  0,  0]])
	to_sparseak  
to_sparse(sparseDims) -> Tensor

Returns a sparse copy of the tensor.  PyTorch supports sparse tensors in
:ref:`coordinate format <sparse-coo-docs>`.

Args:
    sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor

Example::

    >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]])
    >>> d
    tensor([[ 0,  0,  0],
            [ 9,  0, 10],
            [ 0,  0,  0]])
    >>> d.to_sparse()
    tensor(indices=tensor([[1, 1],
                           [0, 2]]),
           values=tensor([ 9, 10]),
           size=(3, 3), nnz=2, layout=torch.sparse_coo)
    >>> d.to_sparse(1)
    tensor(indices=tensor([[1]]),
           values=tensor([[ 9,  0, 10]]),
           size=(3, 3), nnz=1, layout=torch.sparse_coo)

.. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor
   :noindex:

Returns a sparse tensor with the specified layout and blocksize.  If
the :attr:`self` is strided, the number of dense dimensions could be
specified, and a hybrid sparse tensor will be created, with
`dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch
dimension.

.. note:: If the :attr:`self` layout and blocksize parameters match
          with the specified layout and blocksize, return
          :attr:`self`. Otherwise, return a sparse tensor copy of
          :attr:`self`.

Args:

    layout (:class:`torch.layout`, optional): The desired sparse
      layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``,
      ``torch.sparse_csc``, ``torch.sparse_bsr``, or
      ``torch.sparse_bsc``. Default: if ``None``,
      ``torch.sparse_coo``.

    blocksize (list, tuple, :class:`torch.Size`, optional): Block size
      of the resulting BSR or BSC tensor. For other layouts,
      specifying the block size that is not ``None`` will result in a
      RuntimeError exception.  A block size must be a tuple of length
      two such that its items evenly divide the two sparse dimensions.

    dense_dim (int, optional): Number of dense dimensions of the
      resulting CSR, CSC, BSR or BSC tensor.  This argument should be
      used only if :attr:`self` is a strided tensor, and must be a
      value between 0 and dimension of :attr:`self` tensor minus two.

Example::

    >>> x = torch.tensor([[1, 0], [0, 0], [2, 3]])
    >>> x.to_sparse(layout=torch.sparse_coo)
    tensor(indices=tensor([[0, 2, 2],
                           [0, 0, 1]]),
           values=tensor([1, 2, 3]),
           size=(3, 2), nnz=3, layout=torch.sparse_coo)
    >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2))
    tensor(crow_indices=tensor([0, 1, 1, 2]),
           col_indices=tensor([0, 0]),
           values=tensor([[[1, 0]],
                          [[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr)
    >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1))
    RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2
    >>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1))
    RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize

    >>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]])
    >>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1)
    tensor(crow_indices=tensor([0, 1, 1, 3]),
           col_indices=tensor([0, 0, 1]),
           values=tensor([[1],
                          [2],
                          [3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr)

to_sparse_csrar  
to_sparse_csr(dense_dim=None) -> Tensor

Convert a tensor to compressed row storage format (CSR).  Except for
strided tensors, only works with 2D tensors.  If the :attr:`self` is
strided, then the number of dense dimensions could be specified, and a
hybrid CSR tensor will be created, with `dense_dim` dense dimensions
and `self.dim() - 2 - dense_dim` batch dimension.

Args:

    dense_dim (int, optional): Number of dense dimensions of the
      resulting CSR tensor.  This argument should be used only if
      :attr:`self` is a strided tensor, and must be a value between 0
      and dimension of :attr:`self` tensor minus two.

Example::

    >>> dense = torch.randn(5, 5)
    >>> sparse = dense.to_sparse_csr()
    >>> sparse._nnz()
    25

    >>> dense = torch.zeros(3, 3, 1, 1)
    >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
    >>> dense.to_sparse_csr(dense_dim=2)
    tensor(crow_indices=tensor([0, 1, 2, 3]),
           col_indices=tensor([0, 2, 1]),
           values=tensor([[[1.]],

                          [[1.]],

                          [[1.]]]), size=(3, 3, 1, 1), nnz=3,
           layout=torch.sparse_csr)

to_sparse_cscag  
to_sparse_csc() -> Tensor

Convert a tensor to compressed column storage (CSC) format.  Except
for strided tensors, only works with 2D tensors.  If the :attr:`self`
is strided, then the number of dense dimensions could be specified,
and a hybrid CSC tensor will be created, with `dense_dim` dense
dimensions and `self.dim() - 2 - dense_dim` batch dimension.

Args:

    dense_dim (int, optional): Number of dense dimensions of the
      resulting CSC tensor.  This argument should be used only if
      :attr:`self` is a strided tensor, and must be a value between 0
      and dimension of :attr:`self` tensor minus two.

Example::

    >>> dense = torch.randn(5, 5)
    >>> sparse = dense.to_sparse_csc()
    >>> sparse._nnz()
    25

    >>> dense = torch.zeros(3, 3, 1, 1)
    >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1
    >>> dense.to_sparse_csc(dense_dim=2)
    tensor(ccol_indices=tensor([0, 1, 2, 3]),
           row_indices=tensor([0, 2, 1]),
           values=tensor([[[1.]],

                          [[1.]],

                          [[1.]]]), size=(3, 3, 1, 1), nnz=3,
           layout=torch.sparse_csc)

to_sparse_bsra  
to_sparse_bsr(blocksize, dense_dim) -> Tensor

Convert a tensor to a block sparse row (BSR) storage format of given
blocksize.  If the :attr:`self` is strided, then the number of dense
dimensions could be specified, and a hybrid BSR tensor will be
created, with `dense_dim` dense dimensions and `self.dim() - 2 -
dense_dim` batch dimension.

Args:

    blocksize (list, tuple, :class:`torch.Size`, optional): Block size
      of the resulting BSR tensor. A block size must be a tuple of
      length two such that its items evenly divide the two sparse
      dimensions.

    dense_dim (int, optional): Number of dense dimensions of the
      resulting BSR tensor.  This argument should be used only if
      :attr:`self` is a strided tensor, and must be a value between 0
      and dimension of :attr:`self` tensor minus two.

Example::

    >>> dense = torch.randn(10, 10)
    >>> sparse = dense.to_sparse_csr()
    >>> sparse_bsr = sparse.to_sparse_bsr((5, 5))
    >>> sparse_bsr.col_indices()
    tensor([0, 1, 0, 1])

    >>> dense = torch.zeros(4, 3, 1)
    >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
    >>> dense.to_sparse_bsr((2, 1), 1)
    tensor(crow_indices=tensor([0, 2, 3]),
           col_indices=tensor([0, 2, 1]),
           values=tensor([[[[1.]],

                           [[1.]]],


                          [[[1.]],

                           [[1.]]],


                          [[[1.]],

                           [[1.]]]]), size=(4, 3, 1), nnz=3,
           layout=torch.sparse_bsr)

to_sparse_bsca  
to_sparse_bsc(blocksize, dense_dim) -> Tensor

Convert a tensor to a block sparse column (BSC) storage format of
given blocksize.  If the :attr:`self` is strided, then the number of
dense dimensions could be specified, and a hybrid BSC tensor will be
created, with `dense_dim` dense dimensions and `self.dim() - 2 -
dense_dim` batch dimension.

Args:

    blocksize (list, tuple, :class:`torch.Size`, optional): Block size
      of the resulting BSC tensor. A block size must be a tuple of
      length two such that its items evenly divide the two sparse
      dimensions.

    dense_dim (int, optional): Number of dense dimensions of the
      resulting BSC tensor.  This argument should be used only if
      :attr:`self` is a strided tensor, and must be a value between 0
      and dimension of :attr:`self` tensor minus two.

Example::

    >>> dense = torch.randn(10, 10)
    >>> sparse = dense.to_sparse_csr()
    >>> sparse_bsc = sparse.to_sparse_bsc((5, 5))
    >>> sparse_bsc.row_indices()
    tensor([0, 1, 0, 1])

    >>> dense = torch.zeros(4, 3, 1)
    >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1
    >>> dense.to_sparse_bsc((2, 1), 1)
    tensor(ccol_indices=tensor([0, 1, 2, 3]),
           row_indices=tensor([0, 1, 0]),
           values=tensor([[[[1.]],

                           [[1.]]],


                          [[[1.]],

                           [[1.]]],


                          [[[1.]],

                           [[1.]]]]), size=(4, 3, 1), nnz=3,
           layout=torch.sparse_bsc)

	to_mkldnnzQ
to_mkldnn() -> Tensor
Returns a copy of the tensor in ``torch.mkldnn`` layout.

tracez,
trace() -> Tensor

See :func:`torch.trace`
	transposez>
transpose(dim0, dim1) -> Tensor

See :func:`torch.transpose`

transpose_zQ
transpose_(dim0, dim1) -> Tensor

In-place version of :meth:`~Tensor.transpose`
triangular_solvez
triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)

See :func:`torch.triangular_solve`
trilz4
tril(diagonal=0) -> Tensor

See :func:`torch.tril`
tril_zG
tril_(diagonal=0) -> Tensor

In-place version of :meth:`~Tensor.tril`
triuz4
triu(diagonal=0) -> Tensor

See :func:`torch.triu`
triu_zG
triu_(diagonal=0) -> Tensor

In-place version of :meth:`~Tensor.triu`
true_dividez=
true_divide(value) -> Tensor

See :func:`torch.true_divide`
true_divide_zQ
true_divide_(value) -> Tensor

In-place version of :meth:`~Tensor.true_divide_`
truncz,
trunc() -> Tensor

See :func:`torch.trunc`
fixz)
fix() -> Tensor

See :func:`torch.fix`.
trunc_z?
trunc_() -> Tensor

In-place version of :meth:`~Tensor.trunc`
fix_z;
fix_() -> Tensor

In-place version of :meth:`~Tensor.fix`
typea  
type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor
Returns the type if `dtype` is not provided, else casts this object to
the specified type.

If this is already of the correct type, no copy is performed and the
original object is returned.

Args:
    dtype (dtype or string): The desired type
    non_blocking (bool): If ``True``, and the source is in pinned memory
        and destination is on the GPU or vice versa, the copy is performed
        asynchronously with respect to the host. Otherwise, the argument
        has no effect.
    **kwargs: For compatibility, may contain the key ``async`` in place of
        the ``non_blocking`` argument. The ``async`` arg is deprecated.
type_asa
  
type_as(tensor) -> Tensor

Returns this tensor cast to the type of the given tensor.

This is a no-op if the tensor is already of the correct type. This is
equivalent to ``self.type(tensor.type())``

Args:
    tensor (Tensor): the tensor which has the desired type
unfolda  
unfold(dimension, size, step) -> Tensor

Returns a view of the original tensor which contains all slices of size :attr:`size` from
:attr:`self` tensor in the dimension :attr:`dimension`.

Step between two slices is given by :attr:`step`.

If `sizedim` is the size of dimension :attr:`dimension` for :attr:`self`, the size of
dimension :attr:`dimension` in the returned tensor will be
`(sizedim - size) / step + 1`.

An additional dimension of size :attr:`size` is appended in the returned tensor.

Args:
    dimension (int): dimension in which unfolding happens
    size (int): the size of each slice that is unfolded
    step (int): the step between each slice

Example::

    >>> x = torch.arange(1., 8)
    >>> x
    tensor([ 1.,  2.,  3.,  4.,  5.,  6.,  7.])
    >>> x.unfold(0, 2, 1)
    tensor([[ 1.,  2.],
            [ 2.,  3.],
            [ 3.,  4.],
            [ 4.,  5.],
            [ 5.,  6.],
            [ 6.,  7.]])
    >>> x.unfold(0, 2, 2)
    tensor([[ 1.,  2.],
            [ 3.,  4.],
            [ 5.,  6.]])
uniform_z
uniform_(from=0, to=1, *, generator=None) -> Tensor

Fills :attr:`self` tensor with numbers sampled from the continuous uniform
distribution:

.. math::
    f(x) = \dfrac{1}{\text{to} - \text{from}}
	unsqueezez7
unsqueeze(dim) -> Tensor

See :func:`torch.unsqueeze`

unsqueeze_zJ
unsqueeze_(dim) -> Tensor

In-place version of :meth:`~Tensor.unsqueeze`
varzP
var(dim=None, *, correction=1, keepdim=False) -> Tensor

See :func:`torch.var`
vdotz/
vdot(other) -> Tensor

See :func:`torch.vdot`
viewa  
view(*shape) -> Tensor

Returns a new tensor with the same data as the :attr:`self` tensor but of a
different :attr:`shape`.

The returned tensor shares the same data and must have the same number
of elements, but may have a different size. For a tensor to be viewed, the new
view size must be compatible with its original size and stride, i.e., each new
view dimension must either be a subspace of an original dimension, or only span
across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following
contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`,

.. math::

  \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1]

Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape`
without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a
:meth:`view` can be performed, it is advisable to use :meth:`reshape`, which
returns a view if the shapes are compatible, and copies (equivalent to calling
:meth:`contiguous`) otherwise.

Args:
    shape (torch.Size or int...): the desired size

Example::

    >>> x = torch.randn(4, 4)
    >>> x.size()
    torch.Size([4, 4])
    >>> y = x.view(16)
    >>> y.size()
    torch.Size([16])
    >>> z = x.view(-1, 8)  # the size -1 is inferred from other dimensions
    >>> z.size()
    torch.Size([2, 8])

    >>> a = torch.randn(1, 2, 3, 4)
    >>> a.size()
    torch.Size([1, 2, 3, 4])
    >>> b = a.transpose(1, 2)  # Swaps 2nd and 3rd dimension
    >>> b.size()
    torch.Size([1, 3, 2, 4])
    >>> c = a.view(1, 3, 2, 4)  # Does not change tensor layout in memory
    >>> c.size()
    torch.Size([1, 3, 2, 4])
    >>> torch.equal(b, c)
    False


.. method:: view(dtype) -> Tensor
   :noindex:

Returns a new tensor with the same data as the :attr:`self` tensor but of a
different :attr:`dtype`.

If the element size of :attr:`dtype` is different than that of ``self.dtype``,
then the size of the last dimension of the output will be scaled
proportionally.  For instance, if :attr:`dtype` element size is twice that of
``self.dtype``, then each pair of elements in the last dimension of
:attr:`self` will be combined, and the size of the last dimension of the output
will be half that of :attr:`self`. If :attr:`dtype` element size is half that
of ``self.dtype``, then each element in the last dimension of :attr:`self` will
be split in two, and the size of the last dimension of the output will be
double that of :attr:`self`. For this to be possible, the following conditions
must be true:

    * ``self.dim()`` must be greater than 0.
    * ``self.stride(-1)`` must be 1.

Additionally, if the element size of :attr:`dtype` is greater than that of
``self.dtype``, the following conditions must be true as well:

    * ``self.size(-1)`` must be divisible by the ratio between the element
      sizes of the dtypes.
    * ``self.storage_offset()`` must be divisible by the ratio between the
      element sizes of the dtypes.
    * The strides of all dimensions, except the last dimension, must be
      divisible by the ratio between the element sizes of the dtypes.

If any of the above conditions are not met, an error is thrown.

.. warning::

    This overload is not supported by TorchScript, and using it in a Torchscript
    program will cause undefined behavior.


Args:
    dtype (:class:`torch.dtype`): the desired dtype

Example::

    >>> x = torch.randn(4, 4)
    >>> x
    tensor([[ 0.9482, -0.0310,  1.4999, -0.5316],
            [-0.1520,  0.7472,  0.5617, -0.8649],
            [-2.4724, -0.0334, -0.2976, -0.8499],
            [-0.2109,  1.9913, -0.9607, -0.6123]])
    >>> x.dtype
    torch.float32

    >>> y = x.view(torch.int32)
    >>> y
    tensor([[ 1064483442, -1124191867,  1069546515, -1089989247],
            [-1105482831,  1061112040,  1057999968, -1084397505],
            [-1071760287, -1123489973, -1097310419, -1084649136],
            [-1101533110,  1073668768, -1082790149, -1088634448]],
        dtype=torch.int32)
    >>> y[0, 0] = 1000000000
    >>> x
    tensor([[ 0.0047, -0.0310,  1.4999, -0.5316],
            [-0.1520,  0.7472,  0.5617, -0.8649],
            [-2.4724, -0.0334, -0.2976, -0.8499],
            [-0.2109,  1.9913, -0.9607, -0.6123]])

    >>> x.view(torch.cfloat)
    tensor([[ 0.0047-0.0310j,  1.4999-0.5316j],
            [-0.1520+0.7472j,  0.5617-0.8649j],
            [-2.4724-0.0334j, -0.2976-0.8499j],
            [-0.2109+1.9913j, -0.9607-0.6123j]])
    >>> x.view(torch.cfloat).size()
    torch.Size([4, 2])

    >>> x.view(torch.uint8)
    tensor([[  0, 202, 154,  59, 182, 243, 253, 188, 185, 252, 191,  63, 240,  22,
               8, 191],
            [227, 165,  27, 190, 128,  72,  63,  63, 146, 203,  15,  63,  22, 106,
              93, 191],
            [205,  59,  30, 192, 112, 206,   8, 189,   7,  95, 152, 190,  12, 147,
              89, 191],
            [ 43, 246,  87, 190, 235, 226, 254,  63, 111, 240, 117, 191, 177, 191,
              28, 191]], dtype=torch.uint8)
    >>> x.view(torch.uint8).size()
    torch.Size([4, 16])
view_asaC  
view_as(other) -> Tensor

View this tensor as the same size as :attr:`other`.
``self.view_as(other)`` is equivalent to ``self.view(other.size())``.

Please see :meth:`~Tensor.view` for more information about ``view``.

Args:
    other (:class:`torch.Tensor`): The result tensor has the same size
        as :attr:`other`.
expandal  
expand(*sizes) -> Tensor

Returns a new view of the :attr:`self` tensor with singleton dimensions expanded
to a larger size.

Passing -1 as the size for a dimension means not changing the size of
that dimension.

Tensor can be also expanded to a larger number of dimensions, and the
new ones will be appended at the front. For the new dimensions, the
size cannot be set to -1.

Expanding a tensor does not allocate new memory, but only creates a
new view on the existing tensor where a dimension of size one is
expanded to a larger size by setting the ``stride`` to 0. Any dimension
of size 1 can be expanded to an arbitrary value without allocating new
memory.

Args:
    *sizes (torch.Size or int...): the desired expanded size

.. warning::

    More than one element of an expanded tensor may refer to a single
    memory location. As a result, in-place operations (especially ones that
    are vectorized) may result in incorrect behavior. If you need to write
    to the tensors, please clone them first.

Example::

    >>> x = torch.tensor([[1], [2], [3]])
    >>> x.size()
    torch.Size([3, 1])
    >>> x.expand(3, 4)
    tensor([[ 1,  1,  1,  1],
            [ 2,  2,  2,  2],
            [ 3,  3,  3,  3]])
    >>> x.expand(-1, 4)   # -1 means not changing the size of that dimension
    tensor([[ 1,  1,  1,  1],
            [ 2,  2,  2,  2],
            [ 3,  3,  3,  3]])
	expand_asaO  
expand_as(other) -> Tensor

Expand this tensor to the same size as :attr:`other`.
``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.

Please see :meth:`~Tensor.expand` for more information about ``expand``.

Args:
    other (:class:`torch.Tensor`): The result tensor has the same size
        as :attr:`other`.
sum_to_sizez
sum_to_size(*size) -> Tensor

Sum ``this`` tensor to :attr:`size`.
:attr:`size` must be broadcastable to ``this`` tensor size.

Args:
    size (int...): a sequence of integers defining the shape of the output tensor.
zero_z:
zero_() -> Tensor

Fills :attr:`self` tensor with zeros.
matmulz5
matmul(tensor2) -> Tensor

See :func:`torch.matmul`
chunkzB
chunk(chunks, dim=0) -> List of Tensors

See :func:`torch.chunk`
unsafe_chunkzP
unsafe_chunk(chunks, dim=0) -> List of Tensors

See :func:`torch.unsafe_chunk`
unsafe_splitzT
unsafe_split(split_size, dim=0) -> List of Tensors

See :func:`torch.unsafe_split`
tensor_splitz]
tensor_split(indices_or_sections, dim=0) -> List of Tensors

See :func:`torch.tensor_split`
hsplitzM
hsplit(split_size_or_sections) -> List of Tensors

See :func:`torch.hsplit`
vsplitzM
vsplit(split_size_or_sections) -> List of Tensors

See :func:`torch.vsplit`
dsplitzM
dsplit(split_size_or_sections) -> List of Tensors

See :func:`torch.dsplit`
stftzx
stft(frame_length, hop, fft_size=None, return_onesided=True, window=None, pad_end=0) -> Tensor

See :func:`torch.stft`
istftz
istft(n_fft, hop_length=None, win_length=None, window=None,
 center=True, normalized=False, onesided=True, length=None) -> Tensor

See :func:`torch.istft`
detz(
det() -> Tensor

See :func:`torch.det`
wherez
where(condition, y) -> Tensor

``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``.
See :func:`torch.where`
logdetz.
logdet() -> Tensor

See :func:`torch.logdet`
slogdetz:
slogdet() -> (Tensor, Tensor)

See :func:`torch.slogdet`
unbindz0
unbind(dim=0) -> seq

See :func:`torch.unbind`

pin_memoryzY
pin_memory() -> Tensor

Copies the tensor to pinned memory, if it's not already pinned.
pinversez2
pinverse() -> Tensor

See :func:`torch.pinverse`
	index_addzo
index_add(dim, index, source, *, alpha=1) -> Tensor

Out-of-place version of :meth:`torch.Tensor.index_add_`.

index_copyzf
index_copy(dim, index, tensor2) -> Tensor

Out-of-place version of :meth:`torch.Tensor.index_copy_`.

index_fillzd
index_fill(dim, index, value) -> Tensor

Out-of-place version of :meth:`torch.Tensor.index_fill_`.
scatterz[
scatter(dim, index, src) -> Tensor

Out-of-place version of :meth:`torch.Tensor.scatter_`
scatter_addzc
scatter_add(dim, index, src) -> Tensor

Out-of-place version of :meth:`torch.Tensor.scatter_add_`
scatter_reducez
scatter_reduce(dim, index, src, reduce, *, include_self=True) -> Tensor

Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
masked_scattera   
masked_scatter(mask, tensor) -> Tensor

Out-of-place version of :meth:`torch.Tensor.masked_scatter_`

.. note::

    The inputs :attr:`self` and :attr:`mask`
    :ref:`broadcast <broadcasting-semantics>`.

Example:

    >>> self = torch.tensor([0, 0, 0, 0, 0])
    >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=torch.bool)
    >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
    >>> self.masked_scatter(mask, source)
    tensor([[0, 0, 0, 0, 1],
            [2, 3, 0, 4, 5]])

xlogyz1
xlogy(other) -> Tensor

See :func:`torch.xlogy`
xlogy_zD
xlogy_(other) -> Tensor

In-place version of :meth:`~Tensor.xlogy`
masked_fillz_
masked_fill(mask, value) -> Tensor

Out-of-place version of :meth:`torch.Tensor.masked_fill_`
grada  
This attribute is ``None`` by default and becomes a Tensor the first time a call to
:func:`backward` computes gradients for ``self``.
The attribute will then contain the gradients computed and future calls to
:func:`backward` will accumulate (add) gradients into it.
retain_gradz
retain_grad() -> None

Enables this Tensor to have their :attr:`grad` populated during
:func:`backward`. This is a no-op for leaf tensors.
retains_gradz
Is ``True`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be
populated during :func:`backward`, ``False`` otherwise.
requires_grada  
Is ``True`` if gradients need to be computed for this Tensor, ``False`` otherwise.

.. note::

    The fact that gradients need to be computed for a Tensor do not mean that the :attr:`grad`
    attribute will be populated, see :attr:`is_leaf` for more details.

is_leafa%  
All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention.

For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were
created by the user. This means that they are not the result of an operation and so
:attr:`grad_fn` is None.

Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`.
To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`.

Example::

    >>> a = torch.rand(10, requires_grad=True)
    >>> a.is_leaf
    True
    >>> b = torch.rand(10, requires_grad=True).cuda()
    >>> b.is_leaf
    False
    # b was created by the operation that cast a cpu Tensor into a cuda Tensor
    >>> c = torch.rand(10, requires_grad=True) + 2
    >>> c.is_leaf
    False
    # c was created by the addition operation
    >>> d = torch.rand(10).cuda()
    >>> d.is_leaf
    True
    # d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)
    >>> e = torch.rand(10).cuda().requires_grad_()
    >>> e.is_leaf
    True
    # e requires gradients and has no operations creating it
    >>> f = torch.rand(10, requires_grad=True, device="cuda")
    >>> f.is_leaf
    True
    # f requires grad, has no operation creating it


namesa  
Stores names for each of this tensor's dimensions.

``names[idx]`` corresponds to the name of tensor dimension ``idx``.
Names are either a string if the dimension is named or ``None`` if the
dimension is unnamed.

Dimension names may contain characters or underscore. Furthermore, a dimension
name must be a valid Python variable name (i.e., does not start with underscore).

Tensors may not have two named dimensions with the same name.

.. warning::
    The named tensor API is experimental and subject to change.

is_cudazF
Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise.
is_cpuzF
Is ``True`` if the Tensor is stored on the CPU, ``False`` otherwise.
is_xlazL
Is ``True`` if the Tensor is stored on an XLA device, ``False`` otherwise.
is_ipuzF
Is ``True`` if the Tensor is stored on the IPU, ``False`` otherwise.
is_xpuzF
Is ``True`` if the Tensor is stored on the XPU, ``False`` otherwise.
is_quantizedz>
Is ``True`` if the Tensor is quantized, ``False`` otherwise.
is_metaz
Is ``True`` if the Tensor is a meta tensor, ``False`` otherwise.  Meta tensors
are like normal tensors, but they carry no data.
is_mpszM
Is ``True`` if the Tensor is stored on the MPS device, ``False`` otherwise.
	is_sparsezP
Is ``True`` if the Tensor uses sparse COO storage layout, ``False`` otherwise.
is_sparse_csrzP
Is ``True`` if the Tensor uses sparse CSR storage layout, ``False`` otherwise.
devicez4
Is the :class:`torch.device` where this Tensor is.
ndimz!
Alias for :meth:`~Tensor.dim()`
itemsizez*
Alias for :meth:`~Tensor.element_size()`
nbytesz
Returns the number of bytes consumed by the "view" of elements of the Tensor
if the Tensor does not use sparse storage layout.
Defined to be :meth:`~Tensor.numel()` * :meth:`~Tensor.element_size()`
Ta  
Returns a view of this tensor with its dimensions reversed.

If ``n`` is the number of dimensions in ``x``,
``x.T`` is equivalent to ``x.permute(n-1, n-2, ..., 0)``.

.. warning::
    The use of :func:`Tensor.T` on tensors of dimension other than 2 to reverse their shape
    is deprecated and it will throw an error in a future release. Consider :attr:`~.Tensor.mT`
    to transpose batches of matrices or `x.permute(*torch.arange(x.ndim - 1, -1, -1))` to reverse
    the dimensions of a tensor.
Ha  
Returns a view of a matrix (2-D tensor) conjugated and transposed.

``x.H`` is equivalent to ``x.transpose(0, 1).conj()`` for complex matrices and
``x.transpose(0, 1)`` for real matrices.

.. seealso::

        :attr:`~.Tensor.mH`: An attribute that also works on batches of matrices.
mTz|
Returns a view of this tensor with the last two dimensions transposed.

``x.mT`` is equivalent to ``x.transpose(-2, -1)``.
mHzC
Accessing this property is equivalent to calling :func:`adjoint`.
adjointz0
adjoint() -> Tensor

Alias for :func:`adjoint`
reala  
Returns a new tensor containing real values of the :attr:`self` tensor for a complex-valued input tensor.
The returned tensor and :attr:`self` share the same underlying storage.

Returns :attr:`self` if :attr:`self` is a real-valued tensor tensor.

Example::
    >>> x=torch.randn(4, dtype=torch.cfloat)
    >>> x
    tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
    >>> x.real
    tensor([ 0.3100, -0.5445, -1.6492, -0.0638])

imaga  
Returns a new tensor containing imaginary values of the :attr:`self` tensor.
The returned tensor and :attr:`self` share the same underlying storage.

.. warning::
    :func:`imag` is only supported for tensors with complex dtypes.

Example::
    >>> x=torch.randn(4, dtype=torch.cfloat)
    >>> x
    tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
    >>> x.imag
    tensor([ 0.3553, -0.7896, -0.0633, -0.8119])

as_subclassz
as_subclass(cls) -> Tensor

Makes a ``cls`` instance with the same data pointer as ``self``. Changes
in the output mirror changes in ``self``, and the output stays attached
to the autograd graph. ``cls`` must be a subclass of ``Tensor``.
crow_indicesaU  
crow_indices() -> IntTensor

Returns the tensor containing the compressed row indices of the :attr:`self`
tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
The ``crow_indices`` tensor is strictly of shape (:attr:`self`.size(0) + 1)
and of type ``int32`` or ``int64``. When using MKL routines such as sparse
matrix multiplication, it is necessary to use ``int32`` indexing in order
to avoid downcasting and potentially losing information.

Example::
    >>> csr = torch.eye(5,5).to_sparse_csr()
    >>> csr.crow_indices()
    tensor([0, 1, 2, 3, 4, 5], dtype=torch.int32)

col_indicesaB  
col_indices() -> IntTensor

Returns the tensor containing the column indices of the :attr:`self`
tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``.
The ``col_indices`` tensor is strictly of shape (:attr:`self`.nnz())
and of type ``int32`` or ``int64``.  When using MKL routines such as sparse
matrix multiplication, it is necessary to use ``int32`` indexing in order
to avoid downcasting and potentially losing information.

Example::
    >>> csr = torch.eye(5,5).to_sparse_csr()
    >>> csr.col_indices()
    tensor([0, 1, 2, 3, 4], dtype=torch.int32)

to_padded_tensorzT
to_padded_tensor(padding, output_size=None) -> Tensor
See :func:`to_padded_tensor`
 )__doc__torch._Cr	   r   r   torch._torch_docsr   r   r   common_argsnew_common_argsformatr4  r   r   <module>r;     s_*   )  . A=  " ' 'N FO'N O'	+.`  0 F10 1	B  0 F10 1	B  0 F10 1	B  0 F10 1	B  0 F10 1	B 	 
   
        	" 
            	 
  ,/b 	   	 
 
          
               *                       	   
       
    F	 	 
 
 ( 
       	 
 
  	  F    	   
  F ( 
  F ( 	  F ( 	  F (          	 
      %(T   
   	 
 	 
   	 "  	  	 
 
    	 
 
    *   
   
  
  
     	 
  	   $  	      	     $ $ $  	         / /^ F_/$^ #_/$2h !$L . *  @ @@ FA@$@ #A@$CJ  03j         
 	         
& 
    	 
  	   
    	 
     
    "   	 
   

  D    	  	 
  
   
    	 
   
   
   	 
             	   	  
      /2h 
   4       	 
   
 
> 	       
 
  	        MPd     B  !$L $ $ '*X        z}~ 1 1b Fc1$b #c1$4l = =z F{=$z #{=$@D    
*     
   	 
 	 
 
  
  
. ( 
   	 @ & 
      	 &  8 	 
   	  	       
 C CF FGCF GCFP 
  F 
 
  F 
 
  F 
   F 	   F 
   F 
   F 
   F 
   V 
 
  F 
 	  F 
 	 
  F 
   F 
 
  	 
 
   , 
 6 UXt #&P #&P 14l 14l      
  
     	  
 
* 
 #&P    	 
 
HKZ   *-^             
  	             0    
    %(T (            
     	    
$ 
& 	 ( ( r   