nn.internal #
fn compute_fans #
fn compute_fans(shape []int) (int, int)
fn deriv_elu #
fn deriv_elu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T], alpha T) !&vtl.Tensor[T]
deriv_elu computes the derivative of elu
fn deriv_leaky_relu #
fn deriv_leaky_relu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T], alpha T) !&vtl.Tensor[T]
deriv_leaky_relu computes the derivative of leaky_relu
fn deriv_relu #
fn deriv_relu[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T]) !&vtl.Tensor[T]
deriv_relu computes the derivate of relu
fn deriv_sigmoid #
fn deriv_sigmoid[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T]) !&vtl.Tensor[T]
deriv_sigmoid computes the derivative of sigmoid
fn deriv_tanh #
fn deriv_tanh[T](gradient &vtl.Tensor[T], cached &vtl.Tensor[T]) !&vtl.Tensor[T]
deriv_tanh computes the derivative of tanh
fn dropout #
fn dropout[T](input &vtl.Tensor[T], mask &vtl.Tensor[T], prob f64) !&vtl.Tensor[T]
fn dropout_backwards #
fn dropout_backwards[T](gradient &vtl.Tensor[T], mask &vtl.Tensor[T], prob f64) !&vtl.Tensor[T]
fn elu #
fn elu[T](x &vtl.Tensor[T], alpha T) &vtl.Tensor[T]
elu activation function
fn kaiming_normal #
fn kaiming_normal[T](shape []int) &vtl.Tensor[T]
fn kaiming_uniform #
fn kaiming_uniform[T](shape []int) &vtl.Tensor[T]
fn leaky_relu #
fn leaky_relu[T](x &vtl.Tensor[T], alpha T) &vtl.Tensor[T]
leaky_relu activation function
fn maxpool2d #
fn maxpool2d[T](input &vtl.Tensor[T], kernel []int, padding []int, stride []int) (&vtl.Tensor[int], &vtl.Tensor[T])
fn maxpool2d_backward #
fn maxpool2d_backward[T](shape []int, max_indices &vtl.Tensor[int], grad_output &vtl.Tensor[T]) &vtl.Tensor[T]
fn mse #
fn mse[T](input &vtl.Tensor[T], target &vtl.Tensor[T]) !&vtl.Tensor[T]
mse squared error between the labels and the predictions
fn mse_backward #
fn mse_backward[T](gradient &vtl.Tensor[T], cache &vtl.Tensor[T], target &vtl.Tensor[T]) ![]&vtl.Tensor[T]
fn relu #
fn relu[T](x &vtl.Tensor[T]) &vtl.Tensor[T]
relu activation function
fn sgd_optimize #
fn sgd_optimize[T](mut value vtl.Tensor[T], gradient &vtl.Tensor[T], learning_rate f64) !
fn sigmoid #
fn sigmoid[T](x &vtl.Tensor[T]) &vtl.Tensor[T]
sigmoid takes a real-valued number and squashes it to the range [0, 1]
fn sigmoid_cross_entropy #
fn sigmoid_cross_entropy[T](input &vtl.Tensor[T], target &vtl.Tensor[T]) !&vtl.Tensor[T]
sigmoid_cross_entropy computes the sigmoid cross entropy between the labels and the predictions
fn sigmoid_cross_entropy_backward #
fn sigmoid_cross_entropy_backward[T](gradient &vtl.Tensor[T], cache &vtl.Tensor[T], target &vtl.Tensor[T]) ![]&vtl.Tensor[T]
fn softmax_cross_entropy_backward #
fn softmax_cross_entropy_backward[T](gradient &vtl.Tensor[T], cache &vtl.Tensor[T], target &vtl.Tensor[T]) ![]&vtl.Tensor[T]
fn tanh #
fn tanh[T](x &vtl.Tensor[T]) &vtl.Tensor[T]
tanh squashes a real-valued number to the range [-1, 1]
fn variance_scaled #
fn variance_scaled[T](shape []int, scale T, fan_mode FanMode, distribution Distribution) &vtl.Tensor[T]
enum Distribution #
enum Distribution {
uniform
normal
}
enum FanMode #
enum FanMode {
fan_avg
fan_in
fan_out
}
- fn compute_fans
- fn deriv_elu
- fn deriv_leaky_relu
- fn deriv_relu
- fn deriv_sigmoid
- fn deriv_tanh
- fn dropout
- fn dropout_backwards
- fn elu
- fn kaiming_normal
- fn kaiming_uniform
- fn leaky_relu
- fn maxpool2d
- fn maxpool2d_backward
- fn mse
- fn mse_backward
- fn relu
- fn sgd_optimize
- fn sigmoid
- fn sigmoid_cross_entropy
- fn sigmoid_cross_entropy_backward
- fn softmax_cross_entropy_backward
- fn tanh
- fn variance_scaled
- enum Distribution
- enum FanMode