Skip to content

nn.optimizers #

fn adam_optimizer #

fn adam_optimizer[T](config AdamOptimizerConfig) &AdamOptimizer[T]

fn sgd #

fn sgd[T](config SgdOptimizerConfig) &SgdOptimizer[T]

fn (AdamOptimizer[T]) build_params #

fn (mut o AdamOptimizer[T]) build_params(layers []types.Layer[T])

fn (AdamOptimizer[T]) update #

fn (mut o AdamOptimizer[T]) update() !

fn (SgdOptimizer[T]) build_params #

fn (mut o SgdOptimizer[T]) build_params(layers []types.Layer[T])

fn (SgdOptimizer[T]) update #

fn (mut o SgdOptimizer[T]) update() !

struct AdamOptimizer #

struct AdamOptimizer[T] {
	learning_rate f64
	epsilon       f64
pub mut:
	beta1          f64
	beta2          f64
	beta1_t        f64
	beta2_t        f64
	params         []&autograd.Variable[T]
	first_moments  []&vtl.Tensor[T]
	second_moments []&vtl.Tensor[T]
}

struct AdamOptimizerConfig #

@[params]
struct AdamOptimizerConfig {
	learning_rate f64 = 0.001
	beta1         f64 = 0.9
	beta2         f64 = 0.999
	epsilon       f64 = 1e-8
}

struct SgdOptimizer #

struct SgdOptimizer[T] {
	learning_rate f64
pub mut:
	params []&autograd.Variable[T]
}

struct SgdOptimizerConfig #

@[params]
struct SgdOptimizerConfig {
pub:
	learning_rate f64 = 0.001
}