Shortcuts

IRs

PyTorch 2.0 offers two set of IRs for backends to interface with: Core Aten IR and Prims IR.

Core Aten IR

Core aten ops is the core subset of aten operators that can be used to compose other operators. Core aten IR is fully functional, and there is no inplace or _out variants in this opset. In contrast to Prims IR, core aten ops reuses the existing aten ops in “native_functions.yaml”, and it doesn’t further decompose ops into explicit type promotion and broadcasting ops. This opset is designed to serve as the functional IR to interface with backends.

Warning

This opset is still under active development, more ops will be added in the future.

Operator

Schema

aten._adaptive_avg_pool2d

_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor

aten._adaptive_avg_pool2d_backward

_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor

aten._log_softmax

_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor

aten._native_batch_norm_legit.no_stats

_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)

aten._softmax

_softmax(Tensor self, int dim, bool half_to_float) -> Tensor

aten._to_copy

_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor

aten.abs

abs(Tensor self) -> Tensor

aten.acos

acos(Tensor self) -> Tensor

aten.acosh

acosh(Tensor self) -> Tensor

aten.add.Scalar

add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor

aten.add.Tensor

add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor

aten.addmm

addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor

aten.alias

alias(Tensor(a) self) -> Tensor(a)

aten.amax

amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor

aten.amin

amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor

aten.arange.start_step

arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

aten.argmax

argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor

aten.argmin

argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor

aten.as_strided

as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)

aten.asin

asin(Tensor self) -> Tensor

aten.asinh

asinh(Tensor self) -> Tensor

aten.atan

atan(Tensor self) -> Tensor

aten.atanh

atanh(Tensor self) -> Tensor

aten.avg_pool2d

avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor

aten.avg_pool2d_backward

avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor

aten.bitwise_and.Tensor

bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor

aten.bitwise_not

bitwise_not(Tensor self) -> Tensor

aten.bitwise_or.Tensor

bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor

aten.bitwise_xor.Tensor

bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor

aten.bmm

bmm(Tensor self, Tensor mat2) -> Tensor

aten.cat

cat(Tensor[] tensors, int dim=0) -> Tensor

aten.clamp

clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor

aten.clone

clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor

aten.col2im

col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor

aten.constant_pad_nd

constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor

aten.convolution

convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups) -> Tensor

aten.convolution_backward

convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, int[] stride, SymInt[] padding, int[] dilation, bool transposed, SymInt[] output_padding, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)

aten.cos

cos(Tensor self) -> Tensor

aten.cosh

cosh(Tensor self) -> Tensor

aten.div.Scalar

div.Scalar(Tensor self, Scalar other) -> Tensor

aten.div.Tensor

div.Tensor(Tensor self, Tensor other) -> Tensor

aten.embedding_dense_backward

embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor

aten.empty.memory_format

empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor

aten.empty_strided

empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

aten.eq.Scalar

eq.Scalar(Tensor self, Scalar other) -> Tensor

aten.eq.Tensor

eq.Tensor(Tensor self, Tensor other) -> Tensor

aten.erf

erf(Tensor self) -> Tensor

aten.exp

exp(Tensor self) -> Tensor

aten.expand

expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)

aten.fill.Scalar

fill.Scalar(Tensor self, Scalar value) -> Tensor

aten.flip

flip(Tensor self, int[] dims) -> Tensor

aten.floor

floor(Tensor self) -> Tensor

aten.fmod.Tensor

fmod.Tensor(Tensor self, Tensor other) -> Tensor

aten.full

full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

aten.gather

gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor

aten.ge.Scalar

ge.Scalar(Tensor self, Scalar other) -> Tensor

aten.ge.Tensor

ge.Tensor(Tensor self, Tensor other) -> Tensor

aten.gelu

gelu(Tensor self, *, str approximate=’none’) -> Tensor

aten.grid_sampler_2d

grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor

aten.gt.Scalar

gt.Scalar(Tensor self, Scalar other) -> Tensor

aten.gt.Tensor

gt.Tensor(Tensor self, Tensor other) -> Tensor

aten.hardtanh

hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor

aten.index_select

index_select(Tensor self, int dim, Tensor index) -> Tensor

aten.isinf

isinf(Tensor self) -> Tensor

aten.isnan

isnan(Tensor self) -> Tensor

aten.le.Scalar

le.Scalar(Tensor self, Scalar other) -> Tensor

aten.le.Tensor

le.Tensor(Tensor self, Tensor other) -> Tensor

aten.leaky_relu

leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor

aten.log

log(Tensor self) -> Tensor

aten.logical_and

logical_and(Tensor self, Tensor other) -> Tensor

aten.logical_not

logical_not(Tensor self) -> Tensor

aten.logical_or

logical_or(Tensor self, Tensor other) -> Tensor

aten.lt.Scalar

lt.Scalar(Tensor self, Scalar other) -> Tensor

aten.lt.Tensor

lt.Tensor(Tensor self, Tensor other) -> Tensor

aten.max.dim

max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)

aten.max_pool2d_with_indices

max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)

aten.max_pool2d_with_indices_backward

max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor

aten.max_pool3d_with_indices

max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)

aten.maximum

maximum(Tensor self, Tensor other) -> Tensor

aten.mean.dim

mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor

aten.min.dim

min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)

aten.minimum

minimum(Tensor self, Tensor other) -> Tensor

aten.mm

mm(Tensor self, Tensor mat2) -> Tensor

aten.mul.Scalar

mul.Scalar(Tensor self, Scalar other) -> Tensor

aten.mul.Tensor

mul.Tensor(Tensor self, Tensor other) -> Tensor

aten.native_batch_norm

native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)

aten.native_dropout

native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor)

aten.native_group_norm

native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor)

aten.native_group_norm_backward

native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor)

aten.native_layer_norm

native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)

aten.native_layer_norm_backward

native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor)

aten.ne.Scalar

ne.Scalar(Tensor self, Scalar other) -> Tensor

aten.ne.Tensor

ne.Tensor(Tensor self, Tensor other) -> Tensor

aten.neg

neg(Tensor self) -> Tensor

aten.nonzero

nonzero(Tensor self) -> Tensor

aten.permute

permute(Tensor(a) self, int[] dims) -> Tensor(a)

aten.pow.Tensor_Scalar

pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor

aten.pow.Tensor_Tensor

pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor

aten.rand

rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

aten.randn

randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

aten.reciprocal

reciprocal(Tensor self) -> Tensor

aten.reflection_pad2d

reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor

aten.relu

relu(Tensor self) -> Tensor

aten.remainder.Tensor

remainder.Tensor(Tensor self, Tensor other) -> Tensor

aten.repeat

repeat(Tensor self, SymInt[] repeats) -> Tensor

aten.replication_pad2d

replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor

aten.replication_pad3d

replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor

aten.rsqrt

rsqrt(Tensor self) -> Tensor

aten.scalar_tensor

scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor

aten.scatter_add

scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor

aten.scatter_reduce.two

scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor

aten.select.int

select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)

aten.sigmoid

sigmoid(Tensor self) -> Tensor

aten.sign

sign(Tensor self) -> Tensor

aten.sin

sin(Tensor self) -> Tensor

aten.sinh

sinh(Tensor self) -> Tensor

aten.slice.Tensor

slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)

aten.slice_scatter

slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor

aten.sqrt

sqrt(Tensor self) -> Tensor

aten.squeeze.dim

squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)

aten.squeeze.dims

squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)

aten.sub.Scalar

sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor

aten.sub.Tensor

sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor

aten.sum.dim_IntList

sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor

aten.tanh

tanh(Tensor self) -> Tensor

aten.topk

topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)

aten.unsqueeze

unsqueeze(Tensor(a) self, int dim) -> Tensor(a)

aten.upsample_bilinear2d.vec

upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor

aten.upsample_nearest2d.vec

upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor

aten.var.dim

var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor

aten.view

view(Tensor(a) self, SymInt[] size) -> Tensor(a)

aten.where.self

where.self(Tensor condition, Tensor self, Tensor other) -> Tensor

Prims IR

Prims IR is a set of primitive operators that can be used to compose other operators. Prims IR is a lower level opset than core aten IR, and it further decomposes ops into explicit type promotion and broadcasting ops: prims.convert_element_type and prims.broadcast_in_dim. This opset is designed to interface with compiler backends.

Warning

This opset is still under active development, more ops will be added in the future.

Operator

Schema

prims.abs

abs(Tensor self) -> Tensor

prims.acos

acos(Tensor self) -> Tensor

prims.acosh

acosh(Tensor self) -> Tensor

prims.asin

asin(Tensor self) -> Tensor

prims.asinh

asinh(Tensor self) -> Tensor

prims.atan

atan(Tensor self) -> Tensor

prims.atanh

atanh(Tensor self) -> Tensor

prims.cos

cos(Tensor self) -> Tensor

prims.cosh

cosh(Tensor self) -> Tensor

prims.bessel_i0

bessel_i0(Tensor self) -> Tensor

prims.bessel_i0e

bessel_i0e(Tensor self) -> Tensor

prims.bessel_i1

bessel_i1(Tensor self) -> Tensor

prims.bessel_i1e

bessel_i1e(Tensor self) -> Tensor

prims.bessel_j0

bessel_j0(Tensor self) -> Tensor

prims.bessel_j1

bessel_j1(Tensor self) -> Tensor

prims.bitwise_not

bitwise_not(Tensor self) -> Tensor

prims.cbrt

cbrt(Tensor self) -> Tensor

prims.ceil

ceil(Tensor self) -> Tensor

prims.conj_physical

conj_physical(Tensor self) -> Tensor

prims.digamma

digamma(Tensor self) -> Tensor

prims.erf

erf(Tensor self) -> Tensor

prims.erf_inv

erf_inv(Tensor self) -> Tensor

prims.erfc

erfc(Tensor self) -> Tensor

prims.erfcx

erfcx(Tensor self) -> Tensor

prims.exp

exp(Tensor self) -> Tensor

prims.expm1

expm1(Tensor self) -> Tensor

prims.exp2

exp2(Tensor self) -> Tensor

prims.fill

fill(Tensor self, Scalar value) -> Tensor

prims.floor

floor(Tensor self) -> Tensor

prims.imag

imag(Tensor self) -> Tensor

prims.isfinite

isfinite(Tensor self) -> Tensor

prims.lgamma

lgamma(Tensor self) -> Tensor

prims.log

log(Tensor self) -> Tensor

prims.log1p

log1p(Tensor self) -> Tensor

prims.log2

log2(Tensor self) -> Tensor

prims.log10

log10(Tensor self) -> Tensor

prims.ndtri

ndtri(Tensor self) -> Tensor

prims.neg

neg(Tensor self) -> Tensor

prims.real

real(Tensor self) -> Tensor

prims.reciprocal

reciprocal(Tensor self) -> Tensor

prims.round

round(Tensor self) -> Tensor

prims.sign

sign(Tensor self) -> Tensor

prims.signbit

signbit(Tensor self) -> Tensor

prims.sin

sin(Tensor self) -> Tensor

prims.sinh

sinh(Tensor self) -> Tensor

prims.spherical_bessel_j0

spherical_bessel_j0(Tensor self) -> Tensor

prims.sqrt

sqrt(Tensor self) -> Tensor

prims.tan

tan(Tensor self) -> Tensor

prims.tanh

tanh(Tensor self) -> Tensor

prims.trunc

trunc(Tensor self) -> Tensor

prims.add

add(Tensor self, Tensor other) -> Tensor

prims.atan2

atan2(Tensor self, Tensor other) -> Tensor

prims.bitwise_and

bitwise_and(Tensor self, Tensor other) -> Tensor

prims.bitwise_or

bitwise_or(Tensor self, Tensor other) -> Tensor

prims.bitwise_xor

bitwise_xor(Tensor self, Tensor other) -> Tensor

prims.div

div(Tensor self, Tensor other) -> Tensor

prims.eq

eq(Tensor self, Tensor other) -> Tensor

prims.fmax

fmax(Tensor self, Tensor other) -> Tensor

prims.fmin

fmin(Tensor self, Tensor other) -> Tensor

prims.fmod

fmod(Tensor self, Tensor other) -> Tensor

prims.gcd

gcd(Tensor self, Tensor other) -> Tensor

prims.ge

ge(Tensor self, Tensor other) -> Tensor

prims.gt

gt(Tensor self, Tensor other) -> Tensor

prims.hypot

hypot(Tensor self, Tensor other) -> Tensor

prims.igamma

igamma(Tensor self, Tensor other) -> Tensor

prims.igammac

igammac(Tensor self, Tensor other) -> Tensor

prims.le

le(Tensor self, Tensor other) -> Tensor

prims.lt

lt(Tensor self, Tensor other) -> Tensor

prims.maximum

maximum(Tensor self, Tensor other) -> Tensor

prims.minimum

minimum(Tensor self, Tensor other) -> Tensor

prims.mul

mul(Tensor self, Tensor other) -> Tensor

prims.ne

ne(Tensor self, Tensor other) -> Tensor

prims.nextafter

nextafter(Tensor self, Tensor other) -> Tensor

prims.pow

pow(Tensor self, Tensor other) -> Tensor

prims.remainder

remainder(Tensor self, Tensor other) -> Tensor

prims.rsqrt

rsqrt(Tensor self) -> Tensor

prims.shift_left

shift_left(Tensor self, Tensor other) -> Tensor

prims.shift_right_arithmetic

shift_right_arithmetic(Tensor self, Tensor other) -> Tensor

prims.sub

sub(Tensor self, Tensor other) -> Tensor

prims.zeta

zeta(Tensor self, Tensor other) -> Tensor

prims.as_strided

as_strided(Tensor(a!) a, SymInt[] size, SymInt[] stride, SymInt storage_offset) -> Tensor(a!)

prims.broadcast_in_dim

broadcast_in_dim(Tensor(a) a, SymInt[] shape, int[] broadcast_dimensions) -> Tensor(a)

prims.collapse_view

collapse_view(Tensor(a) a, int start, int end) -> Tensor(a)

prims.conj

conj(Tensor(a) a) -> Tensor(a)

prims.slice

slice(Tensor(a) a, SymInt[] start_indices, SymInt[] limit_indices, SymInt[]? strides=None) -> Tensor(a)

prims.slice_in_dim

slice_in_dim(Tensor(a) a, SymInt start_index, SymInt limit_index, int stride=1, int axis=0) -> Tensor(a)

prims.split_dim

split_dim(Tensor(a) a, int dim, SymInt outer_length) -> Tensor(a)

prims.squeeze

squeeze(Tensor(a) a, int[] dimensions) -> Tensor(a)

prims.transpose

transpose(Tensor(a) a, int[] permutation) -> Tensor(a)

prims.view_of

view_of(Tensor(a) a) -> Tensor

prims.as_strided_scatter

as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt storage_offset) -> Tensor

prims.collapse

collapse(Tensor a, int start, int end) -> Tensor

prims.cat

cat(Tensor[] tensors, int dim) -> Tensor

prims.reshape

reshape(Tensor a, SymInt[] shape) -> Tensor

prims.rev

rev(Tensor a, int[] dims) -> Tensor

prims.where

where(Tensor pred, Tensor a, Tensor b) -> Tensor

prims.clone

clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor

prims.convert_element_type

convert_element_type(Tensor a, ScalarType dtype) -> Tensor

prims.device_put

device_put(Tensor a, Device device) -> Tensor

prims.item

item(Tensor a) -> Scalar

prims.maximum_value

maximum_value(ScalarType dtype) -> Scalar

prims.minimum_value

minimum_value(ScalarType dtype) -> Scalar

prims.copy_strided

copy_strided(Tensor a, SymInt[] stride) -> Tensor

prims.copy_to

copy_to(Tensor(a!) a, Tensor b) -> Tensor(a!)

prims.resize

resize(Tensor(a!) a, SymInt[] shape) -> Tensor(a!)

prims.amax

amax(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor

prims.amin

amin(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor

prims.prod

prod(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor

prims.sum

sum(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor

prims.var

var(Tensor inp, int[]? dims, *, float correction, ScalarType? output_dtype=None) -> Tensor

prims.empty_strided

empty_strided(SymInt[] shape, SymInt[] strides, *, ScalarType dtype, Device device, bool requires_grad) -> Tensor

prims.empty_permuted

empty_permuted(SymInt[] shape, int[] physical_layout, *, ScalarType dtype, Device device, bool requires_grad) -> Tensor

prims.scalar_tensor

scalar_tensor(Scalar s, *, ScalarType? dtype=None, Device? device=None) -> Tensor

prims.iota

iota(SymInt length, *, SymInt start, SymInt step, ScalarType dtype, Device device, bool requires_grad) -> Tensor

prims.svd

svd(Tensor A, *, bool full_matrices) -> (Tensor U, Tensor S, Tensor Vh)

prims.normal

normal(SymInt[] shape, *, Scalar mean, Scalar std, ScalarType dtype, Device device, bool requires_grad) -> Tensor

prims.uniform

uniform(SymInt[] shape, *, Scalar low, Scalar high, ScalarType dtype, Device device) -> Tensor

prims.fft_r2c

fft_r2c(Tensor self, *, int[] dim, bool onesided) -> Tensor

prims.fft_c2c

fft_c2c(Tensor self, *, int[] dim, bool forward) -> Tensor

prims.fft_c2r

fft_c2r(Tensor self, *, int[] dim, SymInt last_dim_size) -> Tensor

Docs

Access comprehensive developer documentation for PyTorch

View Docs

Tutorials

Get in-depth tutorials for beginners and advanced developers

View Tutorials

Resources

Find development resources and get your questions answered

View Resources