BlackCat_Tensors
A GPU-supported autograd and linear algebra library, designed for neural network construction
Public Types | Public Member Functions | Public Attributes | List of all members
bc::nn::Adam::Optimizer< Tensor > Struct Template Reference

#include <adam.h>

Inheritance diagram for bc::nn::Adam::Optimizer< Tensor >:
Inheritance graph
[legend]
Collaboration diagram for bc::nn::Adam::Optimizer< Tensor >:
Collaboration graph
[legend]

Public Types

using value_type = typename Tensor::value_type
 
using system_tag = typename Tensor::system_tag
 

Public Member Functions

template<class... Args>
 Optimizer (Args &&... args)
 
template<class TensorX , class Gradients >
void update (TensorX &tensor, Gradients &&delta)
 
void set_learning_rate (value_type lr)
 
void save (Layer_Loader &loader, std::string name) const
 
void load (Layer_Loader &loader, std::string name)
 
- Public Member Functions inherited from bc::nn::Optimizer_Base
void save (Layer_Loader &loader, std::string name) const
 
void load (Layer_Loader &loader, std::string name)
 

Public Attributes

value_type alpha = bc::nn::default_learning_rate
 
value_type beta_1 = 0.9
 
value_type beta_2 = 0.999
 
value_type epsilon = 1e-8
 
value_type time_stamp = 0
 
Tensor m_t
 
Tensor v_t
 

Member Typedef Documentation

◆ system_tag

template<class Tensor >
using bc::nn::Adam::Optimizer< Tensor >::system_tag = typename Tensor::system_tag

◆ value_type

template<class Tensor >
using bc::nn::Adam::Optimizer< Tensor >::value_type = typename Tensor::value_type

Constructor & Destructor Documentation

◆ Optimizer()

template<class Tensor >
template<class... Args>
bc::nn::Adam::Optimizer< Tensor >::Optimizer ( Args &&...  args)
inline

Member Function Documentation

◆ load()

template<class Tensor >
void bc::nn::Adam::Optimizer< Tensor >::load ( Layer_Loader loader,
std::string  name 
)
inline

◆ save()

template<class Tensor >
void bc::nn::Adam::Optimizer< Tensor >::save ( Layer_Loader loader,
std::string  name 
) const
inline

◆ set_learning_rate()

template<class Tensor >
void bc::nn::Adam::Optimizer< Tensor >::set_learning_rate ( value_type  lr)
inline

◆ update()

template<class Tensor >
template<class TensorX , class Gradients >
void bc::nn::Adam::Optimizer< Tensor >::update ( TensorX &  tensor,
Gradients &&  delta 
)
inline

Member Data Documentation

◆ alpha

template<class Tensor >
value_type bc::nn::Adam::Optimizer< Tensor >::alpha = bc::nn::default_learning_rate

◆ beta_1

template<class Tensor >
value_type bc::nn::Adam::Optimizer< Tensor >::beta_1 = 0.9

◆ beta_2

template<class Tensor >
value_type bc::nn::Adam::Optimizer< Tensor >::beta_2 = 0.999

◆ epsilon

template<class Tensor >
value_type bc::nn::Adam::Optimizer< Tensor >::epsilon = 1e-8

◆ m_t

template<class Tensor >
Tensor bc::nn::Adam::Optimizer< Tensor >::m_t

◆ time_stamp

template<class Tensor >
value_type bc::nn::Adam::Optimizer< Tensor >::time_stamp = 0

◆ v_t

template<class Tensor >
Tensor bc::nn::Adam::Optimizer< Tensor >::v_t

The documentation for this struct was generated from the following file: