Table Of Contents
Table Of Contents

Source code for gluonts.distribution.neg_binomial

# Copyright 2018, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# or in the "license" file accompanying this file. This file is distributed
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.

# Standard library imports
from typing import Dict, Optional, Tuple

# First-party imports
from gluonts.model.common import Tensor

# Relative imports
from .distribution import Distribution, _sample_multiple, getF, softplus
from .distribution_output import DistributionOutput

[docs]class NegativeBinomial(Distribution): r""" Negative binomial distribution, i.e. the distribution of the number of successes in a sequence of independet Bernoulli trials. Parameters ---------- mu Tensor containing the means, of shape `(*batch_shape, *event_shape)`. alpha Ratio between the success probability `p` of a single experiment, and the maximum number of failures allowed. F """ is_reparameterizable = False def __init__(self, mu: Tensor, alpha: Tensor, F=None) -> None: = mu self.alpha = alpha self.F = F if F else getF(mu) @property def batch_shape(self) -> Tuple: return @property def event_shape(self) -> Tuple: return () @property def event_dim(self) -> int: return 0
[docs] def log_prob(self, x: Tensor) -> Tensor: alphaInv = 1.0 / self.alpha alpha_times_mu = self.alpha * F = self.F ll = ( x * F.log(alpha_times_mu / (1.0 + alpha_times_mu)) - alphaInv * F.log1p(alpha_times_mu) + F.gammaln(x + alphaInv) - F.gammaln(x + 1.0) - F.gammaln(alphaInv) ) return ll
@property def mean(self) -> Tensor: return @property def stddev(self) -> Tensor: return self.F.sqrt( * (1.0 + * self.alpha))
[docs] def sample(self, num_samples: Optional[int] = None) -> Tensor: def s(mu: Tensor, alpha: Tensor) -> Tensor: F = self.F tol = 1e-5 r = 1.0 / alpha theta = alpha * mu r = F.minimum(F.maximum(tol, r), 1e10) theta = F.minimum(F.maximum(tol, theta), 1e10) x = F.minimum(F.random.gamma(r, theta), 1e6) return F.random.poisson(lam=x) return _sample_multiple( s,, alpha=self.alpha, num_samples=num_samples )
[docs]class NegativeBinomialOutput(DistributionOutput): args_dim: Dict[str, int] = {"mu": 1, "alpha": 1} distr_cls: type = NegativeBinomial
[docs] @classmethod def domain_map(cls, F, mu, alpha): mu = softplus(F, mu) alpha = softplus(F, alpha) return mu.squeeze(axis=-1), alpha.squeeze(axis=-1)
# Overwrites the parent class method. # We cannot scale using the affine transformation since negative binomial should return integers. # Instead we scale the parameters.
[docs] def distribution(self, distr_args, scale=None) -> NegativeBinomial: mu, alpha = distr_args if scale is None: return NegativeBinomial(mu, alpha) else: F = getF(mu) mu = F.broadcast_mul(mu, scale) alpha = F.broadcast_mul(alpha, F.sqrt(scale + 1.0)) return NegativeBinomial(mu, alpha, F)
@property def event_shape(self) -> Tuple: return ()