#pragma once // @generated by torchgen/gen.py from Operator.h #include #include // Forward declarations of any types needed in the operator signatures. // We can't directly include these classes because it will cause circular include dependencies. // This file is included by TensorBody.h, which defines the Tensor class. #include namespace at { namespace _ops { struct TORCH_API set__source_Storage { using schema = at::Tensor & (at::Tensor &, at::Storage); using ptr_schema = schema*; // See Note [static constexpr char* members for windows NVCC] STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set_") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "source_Storage") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!)") static at::Tensor & call(at::Tensor & self, at::Storage source); static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source); }; struct TORCH_API set__source_Storage_storage_offset { using schema = at::Tensor & (at::Tensor &, at::Storage, c10::SymInt, c10::SymIntArrayRef, c10::SymIntArrayRef); using ptr_schema = schema*; // See Note [static constexpr char* members for windows NVCC] STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set_") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "source_Storage_storage_offset") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)") static at::Tensor & call(at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); }; struct TORCH_API set__source_Tensor_storage_offset { using schema = at::Tensor & (at::Tensor &, const at::Tensor &, c10::SymInt, c10::SymIntArrayRef, c10::SymIntArrayRef); using ptr_schema = schema*; // See Note [static constexpr char* members for windows NVCC] STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set_") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "source_Tensor_storage_offset") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set_.source_Tensor_storage_offset(Tensor(a!) self, Tensor source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor(a!)") static at::Tensor & call(at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); }; struct TORCH_API set__source_Tensor { using schema = at::Tensor & (at::Tensor &, const at::Tensor &); using ptr_schema = schema*; // See Note [static constexpr char* members for windows NVCC] STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set_") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "source_Tensor") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!)") static at::Tensor & call(at::Tensor & self, const at::Tensor & source); static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & source); }; struct TORCH_API set_ { using schema = at::Tensor & (at::Tensor &); using ptr_schema = schema*; // See Note [static constexpr char* members for windows NVCC] STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set_") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set_(Tensor(a!) self) -> Tensor(a!)") static at::Tensor & call(at::Tensor & self); static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self); }; struct TORCH_API set_source_Storage_out { using schema = at::Tensor & (const at::Tensor &, at::Storage, at::Tensor &); using ptr_schema = schema*; // See Note [static constexpr char* members for windows NVCC] STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "source_Storage_out") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set.source_Storage_out(Tensor self, Storage source, *, Tensor(a!) out) -> Tensor(a!)") static at::Tensor & call(const at::Tensor & self, at::Storage source, at::Tensor & out); static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, at::Tensor & out); }; struct TORCH_API set_source_Storage { using schema = at::Tensor (const at::Tensor &, at::Storage); using ptr_schema = schema*; // See Note [static constexpr char* members for windows NVCC] STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "source_Storage") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set.source_Storage(Tensor self, Storage source) -> Tensor") static at::Tensor call(const at::Tensor & self, at::Storage source); static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source); }; struct TORCH_API set_source_Storage_storage_offset_out { using schema = at::Tensor & (const at::Tensor &, at::Storage, c10::SymInt, c10::SymIntArrayRef, c10::SymIntArrayRef, at::Tensor &); using ptr_schema = schema*; // See Note [static constexpr char* members for windows NVCC] STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "source_Storage_storage_offset_out") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set.source_Storage_storage_offset_out(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[], *, Tensor(a!) out) -> Tensor(a!)") static at::Tensor & call(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out); static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::Tensor & out); }; struct TORCH_API set_source_Storage_storage_offset { using schema = at::Tensor (const at::Tensor &, at::Storage, c10::SymInt, c10::SymIntArrayRef, c10::SymIntArrayRef); using ptr_schema = schema*; // See Note [static constexpr char* members for windows NVCC] STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "source_Storage_storage_offset") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set.source_Storage_storage_offset(Tensor self, Storage source, SymInt storage_offset, SymInt[] size, SymInt[] stride=[]) -> Tensor") static at::Tensor call(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride); }; struct TORCH_API set_source_Tensor_out { using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); using ptr_schema = schema*; // See Note [static constexpr char* members for windows NVCC] STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "source_Tensor_out") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set.source_Tensor_out(Tensor self, Tensor source, *, Tensor(a!) out) -> Tensor(a!)") static at::Tensor & call(const at::Tensor & self, const at::Tensor & source, at::Tensor & out); static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & source, at::Tensor & out); }; struct TORCH_API set_source_Tensor { using schema = at::Tensor (const at::Tensor &, const at::Tensor &); using ptr_schema = schema*; // See Note [static constexpr char* members for windows NVCC] STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "source_Tensor") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set.source_Tensor(Tensor self, Tensor source) -> Tensor") static at::Tensor call(const at::Tensor & self, const at::Tensor & source); static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & source); }; struct TORCH_API set_out { using schema = at::Tensor & (const at::Tensor &, at::Tensor &); using ptr_schema = schema*; // See Note [static constexpr char* members for windows NVCC] STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") static at::Tensor & call(const at::Tensor & self, at::Tensor & out); static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); }; struct TORCH_API set { using schema = at::Tensor (const at::Tensor &); using ptr_schema = schema*; // See Note [static constexpr char* members for windows NVCC] STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::set") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "set(Tensor self) -> Tensor") static at::Tensor call(const at::Tensor & self); static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); }; }} // namespace at::_ops