#pragma once // @generated by torchgen/gen.py from Function.h #include #include #include #include #include #include #include #include #include #include #include #include #include namespace at { namespace symint { template ::value>> at::Tensor new_empty(const at::Tensor & self, at::IntArrayRef size, at::TensorOptions options={}) { return at::_ops::new_empty::call(self, c10::fromIntArrayRefSlow(size), c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); } } namespace symint { template ::value>> at::Tensor new_empty(const at::Tensor & self, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { return at::_ops::new_empty::call(self, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory); } } namespace symint { template ::value>> at::Tensor new_empty(const at::Tensor & self, c10::SymIntArrayRef size, at::TensorOptions options={}) { return at::_ops::new_empty::call(self, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); } } namespace symint { template ::value>> at::Tensor new_empty(const at::Tensor & self, c10::SymIntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { return at::_ops::new_empty::call(self, size, dtype, layout, device, pin_memory); } } // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) inline at::Tensor & new_empty_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { return at::_ops::new_empty_out::call(self, c10::fromIntArrayRefSlow(size), out); } namespace symint { template ::value>> at::Tensor & new_empty_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef size) { return at::_ops::new_empty_out::call(self, c10::fromIntArrayRefSlow(size), out); } } // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) inline at::Tensor & new_empty_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { return at::_ops::new_empty_out::call(self, c10::fromIntArrayRefSlow(size), out); } namespace symint { template ::value>> at::Tensor & new_empty_outf(const at::Tensor & self, at::IntArrayRef size, at::Tensor & out) { return at::_ops::new_empty_out::call(self, c10::fromIntArrayRefSlow(size), out); } } // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) inline at::Tensor & new_empty_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { return at::_ops::new_empty_out::call(self, size, out); } namespace symint { template ::value>> at::Tensor & new_empty_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef size) { return at::_ops::new_empty_out::call(self, size, out); } } // aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) inline at::Tensor & new_empty_symint_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { return at::_ops::new_empty_out::call(self, size, out); } namespace symint { template ::value>> at::Tensor & new_empty_outf(const at::Tensor & self, c10::SymIntArrayRef size, at::Tensor & out) { return at::_ops::new_empty_out::call(self, size, out); } } }