nn.gates.layers
fn dropout_gate #
fn dropout_gate[T](mask &vtl.Tensor[T], prob f64) &DropoutGate[T]
fn flatten_gate #
fn flatten_gate[T](input &autograd.Variable[T], cached_shape []int) &FlattenGate[T]
fn input_gate #
fn input_gate[T]() &InputGate[T]
fn linear_gate #
fn linear_gate[T](input &autograd.Variable[T], weight &autograd.Variable[T], bias &autograd.Variable[T]) &LinearGate[T]
fn lstm_gate #
fn lstm_gate[T](input_ &vtl.Tensor[T],
hidden_ &vtl.Tensor[T],
cell_ &vtl.Tensor[T],
w_ih &vtl.Tensor[T],
w_hh &vtl.Tensor[T],
b_ih &vtl.Tensor[T],
b_hh &vtl.Tensor[T]) &LSTMGate[T]
fn maxpool2d_gate #
fn maxpool2d_gate[T](max_indices &vtl.Tensor[int], kernel []int, shape []int, stride []int, padding []int) &MaxPool2DGate[T]
fn (DropoutGate[T]) backward #
fn (g &DropoutGate[T]) backward[T](payload &autograd.Payload[T]) ![]&vtl.Tensor[T]
fn (DropoutGate[T]) cache #
fn (g &DropoutGate[T]) cache[T](mut result autograd.Variable[T], args ...autograd.CacheParam) !
fn (FlattenGate[T]) backward #
fn (g &FlattenGate[T]) backward[T](payload &autograd.Payload[T]) ![]&vtl.Tensor[T]
fn (FlattenGate[T]) cache #
fn (g &FlattenGate[T]) cache[T](mut result autograd.Variable[T], args ...autograd.CacheParam) !
fn (InputGate[T]) backward #
fn (g &InputGate[T]) backward[T](payload &autograd.Payload[T]) ![]&vtl.Tensor[T]
fn (InputGate[T]) cache #
fn (g &InputGate[T]) cache[T](mut result autograd.Variable[T], args ...autograd.CacheParam) !
fn (LSTMGate[T]) backward #
fn (g &LSTMGate[T]) backward[T](payload &autograd.Payload[T]) ![]&vtl.Tensor[T]
fn (LSTMGate[T]) cache #
fn (g &LSTMGate[T]) cache[T](mut result autograd.Variable[T], args ...autograd.CacheParam) !
fn (LinearGate[T]) backward #
fn (g &LinearGate[T]) backward[T](payload &autograd.Payload[T]) ![]&vtl.Tensor[T]
fn (LinearGate[T]) cache #
fn (g &LinearGate[T]) cache[T](mut result autograd.Variable[T], args ...autograd.CacheParam) !
fn (MaxPool2DGate[T]) backward #
fn (g &MaxPool2DGate[T]) backward[T](payload &autograd.Payload[T]) ![]&vtl.Tensor[T]
fn (MaxPool2DGate[T]) cache #
fn (g &MaxPool2DGate[T]) cache[T](mut result autograd.Variable[T], args ...autograd.CacheParam) !
struct DropoutGate #
struct DropoutGate[T] {
pub:
prob f64
mask &vtl.Tensor[T] = unsafe { nil }
}
struct FlattenGate #
struct FlattenGate[T] {
pub:
input &autograd.Variable[T] = unsafe { nil }
cached_shape []int
}
struct InputGate #
struct InputGate[T] {}
struct LSTMGate #
struct LSTMGate[T] {
input_ &vtl.Tensor[T] = unsafe { nil }
hidden_ &vtl.Tensor[T] = unsafe { nil }
cell_ &vtl.Tensor[T] = unsafe { nil }
w_ih &vtl.Tensor[T] = unsafe { nil }
w_hh &vtl.Tensor[T] = unsafe { nil }
b_ih &vtl.Tensor[T] = unsafe { nil }
b_hh &vtl.Tensor[T] = unsafe { nil }
}
LSTMGate implements the forward pass of LSTM with full gradients. This is a basic implementation; for production use an optimized version.
struct LinearGate #
struct LinearGate[T] {
pub:
input &autograd.Variable[T] = unsafe { nil }
weight &autograd.Variable[T] = unsafe { nil }
bias &autograd.Variable[T] = unsafe { nil }
}
struct MaxPool2DGate #
struct MaxPool2DGate[T] {
pub:
max_indices &vtl.Tensor[int] = unsafe { nil }
kernel []int
shape []int
stride []int
padding []int
}
- fn dropout_gate
- fn flatten_gate
- fn input_gate
- fn linear_gate
- fn lstm_gate
- fn maxpool2d_gate
- type DropoutGate[T]
- type FlattenGate[T]
- type InputGate[T]
- type LSTMGate[T]
- type LinearGate[T]
- type MaxPool2DGate[T]
- struct DropoutGate
- struct FlattenGate
- struct InputGate
- struct LSTMGate
- struct LinearGate
- struct MaxPool2DGate