九、Reduction Ops(规约/简化操作)
torch.argmax(input, dim=None, keepdim=False)
torch.argmin(input, dim=None, keepdim=False)
torch.cumprod(input, dim, dtype=None) → Tensor
torch.cumsum(input, dim, out=None, dtype=None) → Tensor
torch.dist(input, other, p=2) → Tensor
torch.logsumexp(input, dim, keepdim=False, out=None)
torch.mean(input, dim, keepdim=False, out=None) → Tensor
torch.median()
torch.median(input) → Tensor
torch.mode(input, dim=-1, keepdim=False, values=None, indices=None) -> (Tensor, LongTensor)
torch.norm(input, p='fro', dim=None, keepdim=False, out=None)
torch.prod(input, dim, keepdim=False, dtype=None) → Tensor
torch.std()
torch.std(input, unbiased=True) → Tensor
torch.std(input, dim, keepdim=False, unbiased=True, out=None) → Tensor
torch.sum()
torch.sum(input, dtype=None) → Tensor
torch.sum(input, dim, keepdim=False, dtype=None) → Tensor
torch.unique(input, sorted=False, return_inverse=False, dim=None)[SOURCE]
torch.var()
torch.var(input, unbiased=True) → Tensor
torch.var(input, dim, keepdim=False, unbiased=True, out=None) → Tensor
十、Comparison Ops(比较操作)
torch.allclose(self, other, rtol=1e-05, atol=1e-08, equal_nan=False) → bool
torch.argsort(input, dim=None, descending=False)
torch.eq(input, other, out=None) → Tensor
torch.equal(tensor1, tensor2) → bool
torch.ge(input, other, out=None) → Tensor
torch.gt(input, other, out=None) → Tensor
torch.isfinite(tensor)
torch.isinf(tensor)
torch.isnan(tensor)
torch.kthvalue(input, k, dim=None, keepdim=False, out=None) -> (Tensor, LongTensor)
torch.le(input, other, out=None) → Tensor
torch.lt(input, other, out=None) → Tensor
torch.max()
torch.max(input) → Tensor
torch.max(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor)
torch.max(input, other, out=None) → Tensor
torch.min()
torch.min(input) → Tensor
torch.min(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor)
torch.min(input, other, out=None) → Tensor
torch.ne(input, other, out=None) → Tensor
torch.sort(input, dim=None, descending=False, out=None) -> (Tensor, LongTensor)
torch.topk(input, k, dim=None, largest=True, sorted=True, out=None) -> (Tensor, LongTensor)
十一、Spectral Ops(信号处理相关的谱运算)
torch.fft(input, signal_ndim, normalized=False) → Tensor
torch.ifft(input, signal_ndim, normalized=False) → Tensor
torch.rfft(input, signal_ndim, normalized=False, onesided=True) → Tensor
torch.irfft(input, signal_ndim, normalized=False, onesided=True, signal_sizes=None) → Tensor
torch.stft(input, n_fft, hop_length=None, win_length=None, window=None, center=True, pad_mode='reflect', normalized=False, onesided=True)
torch.bartlett_window(window_length, periodic=True, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
torch.blackman_window(window_length, periodic=True, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
torch.hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
torch.hann_window(window_length, periodic=True, dtype=None, layout=torch.strided, device=None, requires_grad=False) → Tensor
十二、Other Operations(其他操作)
torch.bincount(self, weights=None, minlength=0) → Tensor
torch.broadcast_tensors(*tensors) → List of Tensors[SOURCE]
torch.cross(input, other, dim=-1, out=None) → Tensor
torch.diag(input, diagonal=0, out=None) → Tensor
torch.diagonal() always returns the diagonal of its input.
torch.diagflat() always constructs a tensor with diagonal elements specified by the input.
torch.diag_embed(input, offset=0, dim1=-2, dim2=-1) → Tensor
torch.diagflat(input, diagonal=0) → Tensor
torch.diagonal(input, offset=0, dim1=0, dim2=1) → Tensor
torch.einsum(equation, *operands) → Tensor[SOURCE]
torch.flatten(input, start_dim=0, end_dim=-1) → Tensor
torch.flip(input, dims) → Tensor
torch.histc(input, bins=100, min=0, max=0, out=None) → Tensor
torch.meshgrid(*tensors, **kwargs)[SOURCE]
torch.renorm(input, p, dim, maxnorm, out=None) → Tensor
torch.roll(input, shifts, dims=None) → Tensor
torch.tensordot(a, b, dims=2)[SOURCE]
torch.trace(input) → Tensor
torch.tril(input, diagonal=0, out=None) → Tensor
torch.triu(input, diagonal=0, out=None) → Tensor
十三、BLAS and LAPACK Operations(线性代数相关的运算)
BLAS即 basic linear algebra subprogram
LAPACK即 Linear Algebra PACKage
torch.addbmm(beta=1, mat, alpha=1, batch1, batch2, out=None) → Tensor
torch.addmm(beta=1, mat, alpha=1, mat1, mat2, out=None) → Tensor
torch.addmv(beta=1, tensor, alpha=1, mat, vec, out=None) → Tensor
torch.addr(beta=1, mat, alpha=1, vec1, vec2, out=None) → Tensor
torch.baddbmm(beta=1, mat, alpha=1, batch1, batch2, out=None) → Tensor
torch.bmm(batch1, batch2, out=None) → Tensor
torch.btrifact(A, info=None, pivot=True)
torch.btrifact_with_info(A, pivot=True) -> (Tensor, IntTensor, IntTensor)
torch.btrisolve(b, LU_data, LU_pivots) → Tensor
torch.btriunpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True)
torch.chain_matmul(*matrices)
torch.cholesky(A, upper=False, out=None) → Tensor
torch.dot(tensor1, tensor2) → Tensor
torch.eig(a, eigenvectors=False, out=None) -> (Tensor, Tensor)
torch.gels(B, A, out=None) → Tensor
torch.geqrf(input, out=None) -> (Tensor, Tensor)
torch.ger(vec1, vec2, out=None) → Tensor
torch.gesv(B, A) -> (Tensor, Tensor)
torch.inverse(input, out=None) → Tensor
torch.det(A) → Tensor
torch.logdet(A) → Tensor
torch.slogdet(A) -> (Tensor, Tensor)
torch.matmul(tensor1, tensor2, out=None) → Tensor
torch.matrix_power(input, n) → Tensor
torch.matrix_rank(input, tol=None, bool symmetric=False) → Tensor
torch.mm(mat1, mat2, out=None) → Tensor
torch.mv(mat, vec, out=None) → Tensor
torch.orgqr(a, tau) → Tensor
torch.pinverse(input, rcond=1e-15) → Tensor
torch.potrf(a, upper=True, out=None)
torch.potrs(b, u, upper=True, out=None) → Tensor
torch.pstrf(a, upper=True, out=None) -> (Tensor, Tensor)
torch.qr(input, out=None) -> (Tensor, Tensor)
torch.svd(input, some=True, compute_uv=True, out=None) -> (Tensor, Tensor, Tensor)
torch.symeig(input, eigenvectors=False, upper=True, out=None) -> (Tensor, Tensor)