Struct MultiheadAttentionForwardFuncOptions¶
Defined in File activation.h
Page Contents
Struct Documentation¶
-
struct MultiheadAttentionForwardFuncOptions¶
Options for
torch::nn::functional::multi_head_attention_forward
Public Functions
-
MultiheadAttentionForwardFuncOptions(int64_t embed_dim_to_check, int64_t num_heads, Tensor in_proj_weight, Tensor in_proj_bias, Tensor bias_k, Tensor bias_v, bool add_zero_attn, double dropout_p, Tensor out_proj_weight, Tensor out_proj_bias)¶
-
inline auto embed_dim_to_check(const int64_t &new_embed_dim_to_check) -> decltype(*this)¶
-
inline auto embed_dim_to_check(int64_t &&new_embed_dim_to_check) -> decltype(*this)¶
-
inline const int64_t &embed_dim_to_check() const noexcept¶
-
inline int64_t &embed_dim_to_check() noexcept¶
-
inline auto num_heads(const int64_t &new_num_heads) -> decltype(*this)¶
-
inline auto num_heads(int64_t &&new_num_heads) -> decltype(*this)¶
-
inline const int64_t &num_heads() const noexcept¶
-
inline int64_t &num_heads() noexcept¶
-
inline auto in_proj_weight(const Tensor &new_in_proj_weight) -> decltype(*this)¶
-
inline auto in_proj_weight(Tensor &&new_in_proj_weight) -> decltype(*this)¶
-
inline const Tensor &in_proj_weight() const noexcept¶
-
inline Tensor &in_proj_weight() noexcept¶
-
inline auto in_proj_bias(const Tensor &new_in_proj_bias) -> decltype(*this)¶
-
inline auto in_proj_bias(Tensor &&new_in_proj_bias) -> decltype(*this)¶
-
inline const Tensor &in_proj_bias() const noexcept¶
-
inline Tensor &in_proj_bias() noexcept¶
-
inline auto bias_k(const Tensor &new_bias_k) -> decltype(*this)¶
-
inline auto bias_k(Tensor &&new_bias_k) -> decltype(*this)¶
-
inline const Tensor &bias_k() const noexcept¶
-
inline Tensor &bias_k() noexcept¶
-
inline auto bias_v(const Tensor &new_bias_v) -> decltype(*this)¶
-
inline auto bias_v(Tensor &&new_bias_v) -> decltype(*this)¶
-
inline const Tensor &bias_v() const noexcept¶
-
inline Tensor &bias_v() noexcept¶
-
inline auto add_zero_attn(const bool &new_add_zero_attn) -> decltype(*this)¶
-
inline auto add_zero_attn(bool &&new_add_zero_attn) -> decltype(*this)¶
-
inline const bool &add_zero_attn() const noexcept¶
-
inline bool &add_zero_attn() noexcept¶
-
inline auto dropout_p(const double &new_dropout_p) -> decltype(*this)¶
-
inline auto dropout_p(double &&new_dropout_p) -> decltype(*this)¶
-
inline const double &dropout_p() const noexcept¶
-
inline double &dropout_p() noexcept¶
-
inline auto out_proj_weight(const Tensor &new_out_proj_weight) -> decltype(*this)¶
-
inline auto out_proj_weight(Tensor &&new_out_proj_weight) -> decltype(*this)¶
-
inline const Tensor &out_proj_weight() const noexcept¶
-
inline Tensor &out_proj_weight() noexcept¶
-
inline auto out_proj_bias(const Tensor &new_out_proj_bias) -> decltype(*this)¶
-
inline auto out_proj_bias(Tensor &&new_out_proj_bias) -> decltype(*this)¶
-
inline const Tensor &out_proj_bias() const noexcept¶
-
inline Tensor &out_proj_bias() noexcept¶
-
inline auto training(const bool &new_training) -> decltype(*this)¶
-
inline auto training(bool &&new_training) -> decltype(*this)¶
-
inline const bool &training() const noexcept¶
-
inline bool &training() noexcept¶
-
inline auto key_padding_mask(const Tensor &new_key_padding_mask) -> decltype(*this)¶
-
inline auto key_padding_mask(Tensor &&new_key_padding_mask) -> decltype(*this)¶
-
inline const Tensor &key_padding_mask() const noexcept¶
-
inline Tensor &key_padding_mask() noexcept¶
-
inline auto need_weights(const bool &new_need_weights) -> decltype(*this)¶
-
inline auto need_weights(bool &&new_need_weights) -> decltype(*this)¶
-
inline const bool &need_weights() const noexcept¶
-
inline bool &need_weights() noexcept¶
-
inline auto attn_mask(const Tensor &new_attn_mask) -> decltype(*this)¶
-
inline auto attn_mask(Tensor &&new_attn_mask) -> decltype(*this)¶
-
inline const Tensor &attn_mask() const noexcept¶
-
inline Tensor &attn_mask() noexcept¶
-
inline auto use_separate_proj_weight(const bool &new_use_separate_proj_weight) -> decltype(*this)¶
-
inline auto use_separate_proj_weight(bool &&new_use_separate_proj_weight) -> decltype(*this)¶
-
inline const bool &use_separate_proj_weight() const noexcept¶
-
inline bool &use_separate_proj_weight() noexcept¶
-
inline auto q_proj_weight(const Tensor &new_q_proj_weight) -> decltype(*this)¶
-
inline auto q_proj_weight(Tensor &&new_q_proj_weight) -> decltype(*this)¶
-
inline const Tensor &q_proj_weight() const noexcept¶
-
inline Tensor &q_proj_weight() noexcept¶
-
inline auto k_proj_weight(const Tensor &new_k_proj_weight) -> decltype(*this)¶
-
inline auto k_proj_weight(Tensor &&new_k_proj_weight) -> decltype(*this)¶
-
inline const Tensor &k_proj_weight() const noexcept¶
-
inline Tensor &k_proj_weight() noexcept¶
-
inline auto v_proj_weight(const Tensor &new_v_proj_weight) -> decltype(*this)¶
-
inline auto v_proj_weight(Tensor &&new_v_proj_weight) -> decltype(*this)¶
-
inline const Tensor &v_proj_weight() const noexcept¶
-
inline Tensor &v_proj_weight() noexcept¶
-
inline auto static_k(const Tensor &new_static_k) -> decltype(*this)¶
-
inline auto static_k(Tensor &&new_static_k) -> decltype(*this)¶
-
inline const Tensor &static_k() const noexcept¶
-
inline Tensor &static_k() noexcept¶
-
inline auto static_v(const Tensor &new_static_v) -> decltype(*this)¶
-
inline auto static_v(Tensor &&new_static_v) -> decltype(*this)¶
-
inline const Tensor &static_v() const noexcept¶
-
inline Tensor &static_v() noexcept¶
-
inline auto average_attn_weights(const bool &new_average_attn_weights) -> decltype(*this)¶
-
inline auto average_attn_weights(bool &&new_average_attn_weights) -> decltype(*this)¶
-
inline const bool &average_attn_weights() const noexcept¶
-
inline bool &average_attn_weights() noexcept¶
-
MultiheadAttentionForwardFuncOptions(int64_t embed_dim_to_check, int64_t num_heads, Tensor in_proj_weight, Tensor in_proj_bias, Tensor bias_k, Tensor bias_v, bool add_zero_attn, double dropout_p, Tensor out_proj_weight, Tensor out_proj_bias)¶