Skip to content

nn.layers #

fn dropout_layer #

fn dropout_layer[T](ctx &autograd.Context[T], output_shape []int, data DropoutLayerConfig) types.Layer[T]

fn elu_layer #

fn elu_layer[T](ctx &autograd.Context[T], output_shape []int, data EluLayerConfig) types.Layer[T]

fn flatten_layer #

fn flatten_layer[T](ctx &autograd.Context[T], shape []int) types.Layer[T]

fn input_layer #

fn input_layer[T](ctx &autograd.Context[T], shape []int) types.Layer[T]

fn leaky_relu_layer #

fn leaky_relu_layer[T](ctx &autograd.Context[T], output_shape []int) types.Layer[T]

fn linear_layer #

fn linear_layer[T](ctx &autograd.Context[T], input_dim int, output_dim int) types.Layer[T]

fn maxpool2d_layer #

fn maxpool2d_layer[T](ctx &autograd.Context[T], input_shape []int, kernel []int, padding []int, stride []int) types.Layer[T]

fn relu_layer #

fn relu_layer[T](ctx &autograd.Context[T], output_shape []int) types.Layer[T]

fn sigmoid_layer #

fn sigmoid_layer[T](ctx &autograd.Context[T], output_shape []int) types.Layer[T]

fn (DropoutLayer[T]) output_shape #

fn (layer &DropoutLayer[T]) output_shape() []int

fn (DropoutLayer[T]) variables #

fn (_ &DropoutLayer[T]) variables() []&autograd.Variable[T]

fn (DropoutLayer[T]) forward #

fn (layer &DropoutLayer[T]) forward(mut input autograd.Variable[T]) !&autograd.Variable[T]

fn (EluLayer[T]) output_shape #

fn (layer &EluLayer[T]) output_shape() []int

fn (EluLayer[T]) variables #

fn (_ &EluLayer[T]) variables() []&autograd.Variable[T]

fn (EluLayer[T]) forward #

fn (layer &EluLayer[T]) forward(mut input autograd.Variable[T]) !&autograd.Variable[T]

fn (FlattenLayer[T]) output_shape #

fn (layer &FlattenLayer[T]) output_shape() []int

fn (FlattenLayer[T]) variables #

fn (_ &FlattenLayer[T]) variables() []&autograd.Variable[T]

fn (FlattenLayer[T]) forward #

fn (layer &FlattenLayer[T]) forward(mut input autograd.Variable[T]) !&autograd.Variable[T]

fn (InputLayer[T]) output_shape #

fn (layer &InputLayer[T]) output_shape() []int

fn (InputLayer[T]) variables #

fn (_ &InputLayer[T]) variables() []&autograd.Variable[T]

fn (InputLayer[T]) forward #

fn (layer &InputLayer[T]) forward(mut input autograd.Variable[T]) !&autograd.Variable[T]

fn (LeakyReluLayer[T]) output_shape #

fn (layer &LeakyReluLayer[T]) output_shape() []int

fn (LeakyReluLayer[T]) variables #

fn (_ &LeakyReluLayer[T]) variables() []&autograd.Variable[T]

fn (LeakyReluLayer[T]) forward #

fn (layer &LeakyReluLayer[T]) forward(mut input autograd.Variable[T]) !&autograd.Variable[T]

fn (LinearLayer[T]) output_shape #

fn (layer &LinearLayer[T]) output_shape() []int

fn (LinearLayer[T]) variables #

fn (layer &LinearLayer[T]) variables() []&autograd.Variable[T]

fn (LinearLayer[T]) forward #

fn (layer &LinearLayer[T]) forward(mut input autograd.Variable[T]) !&autograd.Variable[T]

fn (MaxPool2DLayer[T]) output_shape #

fn (layer &MaxPool2DLayer[T]) output_shape() []int

fn (MaxPool2DLayer[T]) variables #

fn (layer &MaxPool2DLayer[T]) variables() []&autograd.Variable[T]

fn (MaxPool2DLayer[T]) forward #

fn (layer &MaxPool2DLayer[T]) forward(mut input autograd.Variable[T]) !&autograd.Variable[T]

fn (ReLULayer[T]) output_shape #

fn (layer &ReLULayer[T]) output_shape() []int

fn (ReLULayer[T]) variables #

fn (_ &ReLULayer[T]) variables() []&autograd.Variable[T]

fn (ReLULayer[T]) forward #

fn (layer &ReLULayer[T]) forward(mut input autograd.Variable[T]) !&autograd.Variable[T]

fn (SigmoidLayer[T]) output_shape #

fn (layer &SigmoidLayer[T]) output_shape() []int

fn (SigmoidLayer[T]) variables #

fn (_ &SigmoidLayer[T]) variables() []&autograd.Variable[T]

fn (SigmoidLayer[T]) forward #

fn (layer &SigmoidLayer[T]) forward(mut input autograd.Variable[T]) !&autograd.Variable[T]

struct DropoutLayer #

struct DropoutLayer[T] {
	output_shape []int
	prob         f64
}

DropoutLayer is a dropout layer.

struct DropoutLayerConfig #

@[params]
struct DropoutLayerConfig {
	prob f64 = 0.5
}

struct EluLayer #

struct EluLayer[T] {
	output_shape []int
	alpha        f64
}

EluLayer is an activation layer that applies the element-wise function f(x) = x > 0 ? x : alpha * (exp(x) - 1)

struct EluLayerConfig #

@[params]
struct EluLayerConfig {
	alpha f64 = 0.01
}

struct FlattenLayer #

struct FlattenLayer[T] {
	shape []int
}

FlattenLayer is a layer

struct InputLayer #

struct InputLayer[T] {
	shape []int
}

InputLayer is a layer that takes a single input tensor and returns the same tensor.

This layer is used as the first layer in a model.

struct LeakyReluLayer #

struct LeakyReluLayer[T] {
	output_shape []int
}

LeakyReluLayer is an activation layer that applies the leaky elu function to the input.

struct LinearLayer #

struct LinearLayer[T] {
	weights &autograd.Variable[T] = unsafe { nil }
	bias    &autograd.Variable[T] = unsafe { nil }
}

LinearLayer is a layer that applies a linear transformation to its input.

struct MaxPool2DLayer #

struct MaxPool2DLayer[T] {
	input_shape []int
	kernel      []int
	padding     []int
	stride      []int
}

MaxPool2DLayer is a layer that implements the maxpooling operation.

struct ReLULayer #

struct ReLULayer[T] {
	output_shape []int
}

ReLULayer is a layer that applies the rectified linear unit function element-wise.

struct SigmoidLayer #

struct SigmoidLayer[T] {
	output_shape []int
}

SigmoidLayer is a layer that applies the sigmoid function to its input.