nn.loss #
Re-export all loss structs and constructors from their respective files:- mse.v: MSELoss, mse_loss
- sigmoid_cross_entropy.v: SigmoidCrossEntropyLoss, sigmoid_cross_entropy_loss
- softmax_cross_entropy.v: SoftmaxCrossEntropyLoss, softmax_cross_entropy_loss
- bce.v: BCELoss, BCELossConfig, bce_loss, BCELossGate, bce_loss_gate
- cross_entropy.v: CrossEntropyLoss, cross_entropy_loss, CrossEntropyLossGate, cross_entropy_loss_gate
- huber.v: HuberLoss, HuberLossConfig, huber_loss, HuberLossGate, huber_loss_gate
- nll.v: NLLLoss, nll_loss, NLLLossGate, nll_loss_gate
- kl.v: KLDivLoss, kl_div_loss, KLDivLossGate, kl_div_loss_gate
fn bce_loss #
fn bce_loss[T](config BCELossConfig) &BCELoss[T]
bce_loss creates a new BCELoss instance.
fn cross_entropy_loss #
fn cross_entropy_loss[T]() &CrossEntropyLoss[T]
fn cross_entropy_loss_gate #
fn cross_entropy_loss_gate[T](input &vtl.Tensor[T], target &vtl.Tensor[T]) &CrossEntropyLossGate[T]
fn huber_loss #
fn huber_loss[T](config HuberLossConfig) &HuberLoss[T]
huber_loss creates a new HuberLoss instance.
fn kl_div_loss #
fn kl_div_loss[T]() &KLDivLoss[T]
kl_div_loss creates a new KLDivLoss instance with reduction = 'mean'.
fn mse_loss #
fn mse_loss[T]() &MSELoss[T]
mse_loss creates a new MSELoss instance.
fn nll_loss #
fn nll_loss[T](weight &vtl.Tensor[T]) &NLLLoss[T]
nll_loss creates a new NLLLoss instance.
weight — optional per-class weight tensor; pass unsafe { nil } for uniform weights.
fn sigmoid_cross_entropy_loss #
fn sigmoid_cross_entropy_loss[T]() &SigmoidCrossEntropyLoss[T]
fn softmax_cross_entropy_loss #
fn softmax_cross_entropy_loss[T]() &SoftmaxCrossEntropyLoss[T]
fn (BCELoss[T]) loss #
fn (l &BCELoss[T]) loss(input &autograd.Variable[T], target &vtl.Tensor[T]) !&autograd.Variable[T]
fn (CrossEntropyLossGate[T]) backward #
fn (g &CrossEntropyLossGate[T]) backward[T](payload &autograd.Payload[T]) ![]&vtl.Tensor[T]
fn (CrossEntropyLossGate[T]) cache #
fn (g &CrossEntropyLossGate[T]) cache[T](mut result autograd.Variable[T], args ...autograd.CacheParam) !
fn (CrossEntropyLoss[T]) loss #
fn (_ &CrossEntropyLoss[T]) loss(input &autograd.Variable[T], target &vtl.Tensor[T]) !&autograd.Variable[T]
fn (HuberLoss[T]) loss #
fn (l &HuberLoss[T]) loss(input &autograd.Variable[T], target &vtl.Tensor[T]) !&autograd.Variable[T]
fn (KLDivLoss[T]) loss #
fn (_ &KLDivLoss[T]) loss(input &autograd.Variable[T], target &vtl.Tensor[T]) !&autograd.Variable[T]
fn (MSELoss[T]) loss #
fn (_ &MSELoss[T]) loss(input &autograd.Variable[T], target &vtl.Tensor[T]) !&autograd.Variable[T]
fn (NLLLoss[T]) loss #
fn (_ &NLLLoss[T]) loss(input &autograd.Variable[T], target &vtl.Tensor[T]) !&autograd.Variable[T]
fn (SigmoidCrossEntropyLoss[T]) loss #
fn (_ &SigmoidCrossEntropyLoss[T]) loss(input &autograd.Variable[T], target &vtl.Tensor[T]) !&autograd.Variable[T]
fn (SoftmaxCrossEntropyLoss[T]) loss #
fn (_ &SoftmaxCrossEntropyLoss[T]) loss(input &autograd.Variable[T], target &vtl.Tensor[T]) !&autograd.Variable[T]
struct BCELoss #
struct BCELoss[T] {
from_logits bool
}
BCELoss computes Binary Cross-Entropy loss per element and returns the mean.
Formula (when from_logits = false): L = -mean(y·log(p) + (1-y)·log(1-p))
When from_logits = true (default), a sigmoid is applied to input first, which is numerically more stable than computing sigmoid outside the loss.
Shape: input and target are both [batch, ...] with values in (0, 1) for target.
Example
import vtl.nn.loss
l := loss.bce_loss[f64]() // from_logits: true
out := l.loss(logits, labels)!
struct BCELossConfig #
struct BCELossConfig {
from_logits bool = true // apply sigmoid to input (recommended for numerical stability)
}
BCELossConfig configures BCELoss.
Fields:- from_logits — when true (default) a sigmoid is applied to the raw model output before computing the loss. Recommended for numerical stability.
struct CrossEntropyLoss #
struct CrossEntropyLoss[T] {
pub:
weight &vtl.Tensor[T] = unsafe { nil }
ignore_index int = -1
reduction string = 'mean' // 'mean' | 'sum' | 'none'
}
CrossEntropyLoss combines LogSoftmax + NLLLoss in one forward pass. This is more numerically stable than applying softmax then log separately. input: [batch_size, n_classes] raw logits target: [batch_size, n_classes] one-hot targets OR [batch_size] class indices
struct CrossEntropyLossGate #
struct CrossEntropyLossGate[T] {
pub:
target &vtl.Tensor[T] = unsafe { nil }
}
struct HuberLoss #
struct HuberLoss[T] {
delta T
}
HuberLoss (also known as Smooth L1 Loss) combines L1 and L2 loss, controlled by delta.
Formula: L = mean(0.5·(x-y)²) if |x-y| ≤ delta L = mean(delta·(|x-y| - 0.5·delta)) if |x-y| > delta
Behaves like MSE for small errors and like MAE for large errors, making it more robust to outliers than pure MSE.
Example
import vtl.nn.loss
l := loss.huber_loss[f64](delta: 1.0)
out := l.loss(prediction, target)!
struct HuberLossConfig #
struct HuberLossConfig {
delta f64 = 1.0
}
HuberLossConfig configures HuberLoss.
Fields:- delta — transition threshold between L2 (below) and L1 (above) behaviour. Default: 1.0.
struct KLDivLoss #
struct KLDivLoss[T] {
pub:
reduction string = 'mean'
}
KLDivLoss computes the Kullback-Leibler divergence loss.
Formula: D_KL(P ‖ Q) = sum(P · log(P / Q))
Input: [batch, n_classes] — Q log-probabilities (e.g. log-softmax output) Target: [batch, n_classes] — P probabilities (non-negative, sum to 1 per sample)
Fields:- reduction — 'mean' | 'sum' | 'none' (default: 'mean')
Example
import vtl.nn.loss
l := loss.kl_div_loss[f64]()
out := l.loss(log_q, p_target)!
struct MSELoss #
struct MSELoss[T] {}
MSELoss computes the Mean Squared Error between the model output and the target.
Formula: L = mean((input - target)²)
Typically used for regression tasks.
Example
import vtl.nn.loss
l := loss.mse_loss[f64]()
out := l.loss(prediction, target)!
struct NLLLoss #
struct NLLLoss[T] {
pub:
weight &vtl.Tensor[T] = unsafe { nil }
ignore_index int = -1
reduction string = 'mean'
}
NLLLoss computes the Negative Log Likelihood loss.
Intended to be used after a log-softmax layer for multi-class classification.
Input shape: [batch, n_classes] — log-probabilities (log-softmax output) Target shape: [batch, n_classes] — one-hot class labels
Fields:- weight — optional per-class weight tensor (nil = uniform weights)
ignore_index— class index to ignore in the loss computation (default: -1 = none)reduction—'mean'|'sum'|'none'(default:'mean')
Example
import vtl.nn.loss
l := loss.nll_loss[f64](unsafe { nil })
log_probs := log_softmax_output // shape [batch, n_classes]
out := l.loss(log_probs, one_hot_target)!
struct SigmoidCrossEntropyLoss #
struct SigmoidCrossEntropyLoss[T] {}
SigmoidCrossEntropyLoss
struct SoftmaxCrossEntropyLoss #
struct SoftmaxCrossEntropyLoss[T] {}
- README
- fn bce_loss
- fn cross_entropy_loss
- fn cross_entropy_loss_gate
- fn huber_loss
- fn kl_div_loss
- fn mse_loss
- fn nll_loss
- fn sigmoid_cross_entropy_loss
- fn softmax_cross_entropy_loss
- type BCELoss[T]
- type CrossEntropyLossGate[T]
- type CrossEntropyLoss[T]
- type HuberLoss[T]
- type KLDivLoss[T]
- type MSELoss[T]
- type NLLLoss[T]
- type SigmoidCrossEntropyLoss[T]
- type SoftmaxCrossEntropyLoss[T]
- struct BCELoss
- struct BCELossConfig
- struct CrossEntropyLoss
- struct CrossEntropyLossGate
- struct HuberLoss
- struct HuberLossConfig
- struct KLDivLoss
- struct MSELoss
- struct NLLLoss
- struct SigmoidCrossEntropyLoss
- struct SoftmaxCrossEntropyLoss