diff --git a/docs/build/html/.buildinfo b/docs/build/html/.buildinfo index 299fd67..78a50b8 100644 --- a/docs/build/html/.buildinfo +++ b/docs/build/html/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: e4be416fe1fade80beef75b5aa42bfc9 +config: e005a9a01c1ee86b9ab7af5d4e430bec tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.BBQ.html b/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.BBQ.html index 7bc505e..fc3ae89 100644 --- a/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.BBQ.html +++ b/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.BBQ.html @@ -4,7 +4,7 @@ - netcal.binning.BBQ — calibration-framework 1.2.0 documentation + netcal.binning.BBQ — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

Navigation

  • previous |
  • - + @@ -351,7 +351,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.ENIR.html b/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.ENIR.html index 81be203..f69f595 100644 --- a/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.ENIR.html +++ b/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.ENIR.html @@ -4,7 +4,7 @@ - netcal.binning.ENIR — calibration-framework 1.2.0 documentation + netcal.binning.ENIR — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -350,7 +350,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.HistogramBinning.html b/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.HistogramBinning.html index 27933c8..74993a5 100644 --- a/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.HistogramBinning.html +++ b/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.HistogramBinning.html @@ -4,7 +4,7 @@ - netcal.binning.HistogramBinning — calibration-framework 1.2.0 documentation + netcal.binning.HistogramBinning — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -367,7 +367,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.IsotonicRegression.html b/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.IsotonicRegression.html index 2d71ac8..2730ce3 100644 --- a/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.IsotonicRegression.html +++ b/docs/build/html/_autosummary/_autosummary_binning/netcal.binning.IsotonicRegression.html @@ -4,7 +4,7 @@ - netcal.binning.IsotonicRegression — calibration-framework 1.2.0 documentation + netcal.binning.IsotonicRegression — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -324,7 +324,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.ACE.html b/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.ACE.html index 01c9cb9..88a705b 100644 --- a/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.ACE.html +++ b/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.ACE.html @@ -4,7 +4,7 @@ - netcal.metrics.ACE — calibration-framework 1.2.0 documentation + netcal.metrics.ACE — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -297,7 +297,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.ECE.html b/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.ECE.html index 25747c1..32e59fb 100644 --- a/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.ECE.html +++ b/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.ECE.html @@ -4,7 +4,7 @@ - netcal.metrics.ECE — calibration-framework 1.2.0 documentation + netcal.metrics.ECE — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -298,7 +298,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.MCE.html b/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.MCE.html index 871eac3..d50d65d 100644 --- a/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.MCE.html +++ b/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.MCE.html @@ -4,7 +4,7 @@ - netcal.metrics.MCE — calibration-framework 1.2.0 documentation + netcal.metrics.MCE — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -296,7 +296,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.MMCE.html b/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.MMCE.html index 4fa4754..83d6898 100644 --- a/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.MMCE.html +++ b/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.MMCE.html @@ -4,7 +4,7 @@ - netcal.metrics.MMCE — calibration-framework 1.2.0 documentation + netcal.metrics.MMCE — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -177,7 +177,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.PICP.html b/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.PICP.html index de55d2c..1fed1d5 100644 --- a/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.PICP.html +++ b/docs/build/html/_autosummary/_autosummary_metric/netcal.metrics.PICP.html @@ -4,7 +4,7 @@ - netcal.metrics.PICP — calibration-framework 1.2.0 documentation + netcal.metrics.PICP — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -295,7 +295,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_presentation/netcal.presentation.ReliabilityDiagram.html b/docs/build/html/_autosummary/_autosummary_presentation/netcal.presentation.ReliabilityDiagram.html index 3ef42de..dddbfbd 100644 --- a/docs/build/html/_autosummary/_autosummary_presentation/netcal.presentation.ReliabilityDiagram.html +++ b/docs/build/html/_autosummary/_autosummary_presentation/netcal.presentation.ReliabilityDiagram.html @@ -4,7 +4,7 @@ - netcal.presentation.ReliabilityDiagram — calibration-framework 1.2.0 documentation + netcal.presentation.ReliabilityDiagram — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -228,7 +228,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.ConfidencePenalty.html b/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.ConfidencePenalty.html index a420c81..22e890e 100644 --- a/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.ConfidencePenalty.html +++ b/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.ConfidencePenalty.html @@ -4,7 +4,7 @@ - netcal.regularization.ConfidencePenalty — calibration-framework 1.2.0 documentation + netcal.regularization.ConfidencePenalty — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -118,7 +118,7 @@

    netcal.regularization.ConfidencePenalty

    float()

    -

    Casts all floating point parameters and buffers to float datatype.

    +

    Casts all floating point parameters and buffers to float datatype.

    forward(input)

    Forward call.

    @@ -138,7 +138,7 @@

    netcal.regularization.ConfidencePenalty

    named_children()

    Returns an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

    -

    named_modules([memo, prefix])

    +

    named_modules([memo, prefix, remove_duplicate])

    Returns an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

    named_parameters([prefix, recurse])

    @@ -150,8 +150,8 @@

    netcal.regularization.ConfidencePenalty

    register_backward_hook(hook)

    Registers a backward hook on the module.

    -

    register_buffer(name, tensor)

    -

    Adds a persistent buffer to the module.

    +

    register_buffer(name, tensor[, persistent])

    +

    Adds a buffer to the module.

    register_forward_hook(hook)

    Registers a forward hook on the module.

    @@ -166,7 +166,7 @@

    netcal.regularization.ConfidencePenalty

    share_memory()

    -

    +

    See torch.Tensor.share_memory_()

    state_dict([destination, prefix, keep_vars])

    Returns a dictionary containing a whole state of the module.

    @@ -180,7 +180,7 @@

    netcal.regularization.ConfidencePenalty

    type(dst_type)

    Casts all parameters and buffers to dst_type.

    -

    zero_grad()

    +

    zero_grad([set_to_none])

    Sets gradients of all model parameters to zero.

    @@ -200,6 +200,14 @@

    netcal.regularization.ConfidencePenalty +
    +_get_backward_hooks()
    +

    Returns the backward hooks for use in the call function. +It returns two lists, one with the full backward hooks and one with the non-full +backward hooks.

    +
    +
    _load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
    @@ -286,7 +294,7 @@

    netcal.regularization.ConfidencePenalty
    -add_module(name, module)
    +add_module(name: str, module: Optional[Module]) → None

    Adds a child module to the current module.

    The module can be accessed as an attribute using the given name.

    @@ -302,7 +310,7 @@

    netcal.regularization.ConfidencePenalty
    -apply(fn)
    +apply(fn: Callable[Module, None]) → T

    Applies fn recursively to every submodule (as returned by .children()) as well as self. Typical use includes initializing the parameters of a model (see also nn-init-doc).

    @@ -348,8 +356,12 @@

    netcal.regularization.ConfidencePenalty
    -bfloat16()
    +bfloat16() → T

    Casts all floating point parameters and buffers to bfloat16 datatype.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -362,7 +374,7 @@

    netcal.regularization.ConfidencePenalty
    -buffers(recurse=True)
    +buffers(recurse: bool = True) → Iterator[torch.Tensor]

    Returns an iterator over module buffers.

    Parameters
    @@ -385,7 +397,7 @@

    netcal.regularization.ConfidencePenalty
    -children()
    +children() → Iterator[torch.nn.modules.module.Module]

    Returns an iterator over immediate children modules.

    Yields
    @@ -396,8 +408,12 @@

    netcal.regularization.ConfidencePenalty
    -cpu()
    +cpu() → T

    Moves all model parameters and buffers to the CPU.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -410,11 +426,15 @@

    netcal.regularization.ConfidencePenalty
    -cuda(device=None)
    +cuda(device: Union[int, torch.device, None] = None) → T

    Moves all model parameters and buffers to the GPU.

    This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Parameters

    device (int, optional) – if specified, all parameters will be @@ -431,8 +451,12 @@

    netcal.regularization.ConfidencePenalty
    -double()
    +double() → T

    Casts all floating point parameters and buffers to double datatype.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -445,13 +469,15 @@

    netcal.regularization.ConfidencePenalty
    -eval()
    +eval() → T

    Sets the module in evaluation mode.

    This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. Dropout, BatchNorm, etc.

    This is equivalent with self.train(False).

    +

    See locally-disable-grad-doc for a comparison between +.eval() and several similar mechanisms that may be confused with it.

    Returns

    self

    @@ -464,17 +490,21 @@

    netcal.regularization.ConfidencePenalty
    -extra_repr()
    +extra_repr() → str

    Set the extra representation of the module

    -

    To print customized extra information, you should reimplement +

    To print customized extra information, you should re-implement this method in your own modules. Both single-line and multi-line strings are acceptable.

    -float()
    -

    Casts all floating point parameters and buffers to float datatype.

    +float() → T +

    Casts all floating point parameters and buffers to float datatype.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -491,10 +521,110 @@

    netcal.regularization.ConfidencePenalty +
    +get_buffer(target: str) → torch.Tensor
    +

    Returns the buffer given by target if it exists, +otherwise throws an error.

    +

    See the docstring for get_submodule for a more detailed +explanation of this method’s functionality as well as how to +correctly specify target.

    +
    +
    Parameters
    +

    target – The fully-qualified string name of the buffer +to look for. (See get_submodule for how to specify a +fully-qualified string.)

    +
    +
    Returns
    +

    The buffer referenced by target

    +
    +
    Return type
    +

    torch.Tensor

    +
    +
    Raises
    +

    AttributeError – If the target string references an invalid + path or resolves to something that is not a + buffer

    +
    +
    +

    + +
    +
    +get_parameter(target: str) → torch.nn.parameter.Parameter
    +

    Returns the parameter given by target if it exists, +otherwise throws an error.

    +

    See the docstring for get_submodule for a more detailed +explanation of this method’s functionality as well as how to +correctly specify target.

    +
    +
    Parameters
    +

    target – The fully-qualified string name of the Parameter +to look for. (See get_submodule for how to specify a +fully-qualified string.)

    +
    +
    Returns
    +

    The Parameter referenced by target

    +
    +
    Return type
    +

    torch.nn.Parameter

    +
    +
    Raises
    +

    AttributeError – If the target string references an invalid + path or resolves to something that is not an + nn.Parameter

    +
    +
    +
    + +
    +
    +get_submodule(target: str) → torch.nn.modules.module.Module
    +

    Returns the submodule given by target if it exists, +otherwise throws an error.

    +

    For example, let’s say you have an nn.Module A that +looks like this:

    +

    (The diagram shows an nn.Module A. A has a nested +submodule net_b, which itself has two submodules net_c +and linear. net_c then has a submodule conv.)

    +

    To check whether or not we have the linear submodule, we +would call get_submodule("net_b.linear"). To check whether +we have the conv submodule, we would call +get_submodule("net_b.net_c.conv").

    +

    The runtime of get_submodule is bounded by the degree +of module nesting in target. A query against +named_modules achieves the same result, but it is O(N) in +the number of transitive modules. So, for a simple check to see +if some submodule exists, get_submodule should always be +used.

    +
    +
    Parameters
    +

    target – The fully-qualified string name of the submodule +to look for. (See above example for how to specify a +fully-qualified string.)

    +
    +
    Returns
    +

    The submodule referenced by target

    +
    +
    Return type
    +

    torch.nn.Module

    +
    +
    Raises
    +

    AttributeError – If the target string references an invalid + path or resolves to something that is not an + nn.Module

    +
    +
    +
    +
    -half()
    +half() → T

    Casts all floating point parameters and buffers to half datatype.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -507,7 +637,7 @@

    netcal.regularization.ConfidencePenalty
    -load_state_dict(state_dict, strict=True)
    +load_state_dict(state_dict: OrderedDict[str, Tensor], strict: bool = True)

    Copies parameters and buffers from state_dict into this module and its descendants. If strict is True, then the keys of state_dict must exactly match the keys returned @@ -537,7 +667,7 @@

    netcal.regularization.ConfidencePenalty
    -modules()
    +modules() → Iterator[torch.nn.modules.module.Module]

    Returns an iterator over all modules in the network.

    Yields
    @@ -566,7 +696,7 @@

    netcal.regularization.ConfidencePenalty
    -named_buffers(prefix='', recurse=True)
    +named_buffers(prefix: str = '', recurse: bool = True) → Iterator[Tuple[str, torch.Tensor]]

    Returns an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

    @@ -592,7 +722,7 @@

    netcal.regularization.ConfidencePenalty
    -named_children()
    +named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]

    Returns an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

    @@ -610,12 +740,20 @@

    netcal.regularization.ConfidencePenalty
    -named_modules(memo=None, prefix='')
    +named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)

    Returns an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

    -
    Yields
    -

    (string, Module) – Tuple of name and module

    +
    Parameters
    +
      +
    • memo – a memo to store the set of modules already added to the result

    • +
    • prefix – a prefix that will be added to the name of the module

    • +
    • remove_duplicate – whether to remove the duplicated module instances in the result

    • +
    • not (or) –

    • +
    +
    +
    Yields
    +

    (string, Module) – Tuple of name and module

    @@ -640,7 +778,7 @@

    netcal.regularization.ConfidencePenalty
    -named_parameters(prefix='', recurse=True)
    +named_parameters(prefix: str = '', recurse: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]

    Returns an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

    @@ -666,7 +804,7 @@

    netcal.regularization.ConfidencePenalty
    -parameters(recurse=True)
    +parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]

    Returns an iterator over module parameters.

    This is typically passed to an optimizer.

    @@ -690,18 +828,10 @@

    netcal.regularization.ConfidencePenalty
    -register_backward_hook(hook)
    +register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, torch.Tensor]]) → torch.utils.hooks.RemovableHandle

    Registers a backward hook on the module.

    -

    The hook will be called every time the gradients with respect to module -inputs are computed. The hook should have the following signature:

    -
    hook(module, grad_input, grad_output) -> Tensor or None
    -
    -
    -

    The grad_input and grad_output may be tuples if the -module has multiple inputs or outputs. The hook should not modify its -arguments, but it can optionally return a new gradient with respect to -input that will be used in place of grad_input in subsequent -computations.

    +

    This function is deprecated in favor of nn.Module.register_full_backward_hook() and +the behavior of this function will change in future versions.

    Returns

    a handle that can be used to remove the added hook by calling @@ -711,24 +841,20 @@

    netcal.regularization.ConfidencePenalty

    torch.utils.hooks.RemovableHandle

    -
    -

    Warning

    -

    The current implementation will not have the presented behavior -for complex Module that perform many operations. -In some failure cases, grad_input and grad_output will only -contain the gradients for a subset of the inputs and outputs. -For such Module, you should use torch.Tensor.register_hook() -directly on a specific input or output to get the required gradients.

    -

    -register_buffer(name, tensor)
    -

    Adds a persistent buffer to the module.

    +register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None +

    Adds a buffer to the module.

    This is typically used to register a buffer that should not to be considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the persistent state.

    +is not a parameter, but is part of the module’s state. Buffers, by +default, are persistent and will be saved alongside parameters. This +behavior can be changed by setting persistent to False. The +only difference between a persistent buffer and a non-persistent buffer +is that the latter will not be a part of this module’s +state_dict.

    Buffers can be accessed as attributes using given names.

    Parameters
    @@ -736,6 +862,8 @@

    netcal.regularization.ConfidencePenaltystate_dict.

    @@ -747,14 +875,16 @@

    netcal.regularization.ConfidencePenalty
    -register_forward_hook(hook)
    +register_forward_hook(hook: Callable[..., None]) → torch.utils.hooks.RemovableHandle

    Registers a forward hook on the module.

    The hook will be called every time after forward() has computed an output. It should have the following signature:

    hook(module, input, output) -> None or modified output
     
    -

    The hook can modify the output. It can modify the input inplace but +

    The input contains only the positional arguments given to the module. +Keyword arguments won’t be passed to the hooks and only to the forward. +The hook can modify the output. It can modify the input inplace but it will not have effect on forward since this is called after forward() is called.

    @@ -770,14 +900,16 @@

    netcal.regularization.ConfidencePenalty
    -register_forward_pre_hook(hook)
    +register_forward_pre_hook(hook: Callable[..., None]) → torch.utils.hooks.RemovableHandle

    Registers a forward pre-hook on the module.

    The hook will be called every time before forward() is invoked. It should have the following signature:

    hook(module, input) -> None or modified input
     
    -

    The hook can modify the input. User can either return a tuple or a +

    The input contains only the positional arguments given to the module. +Keyword arguments won’t be passed to the hooks and only to the forward. +The hook can modify the input. User can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if a single value is returned(unless that value is already a tuple).

    @@ -791,9 +923,42 @@

    netcal.regularization.ConfidencePenalty +
    +register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, torch.Tensor]]) → torch.utils.hooks.RemovableHandle
    +

    Registers a backward hook on the module.

    +

    The hook will be called every time the gradients with respect to module +inputs are computed. The hook should have the following signature:

    +
    hook(module, grad_input, grad_output) -> tuple(Tensor) or None
    +
    +
    +

    The grad_input and grad_output are tuples that contain the gradients +with respect to the inputs and outputs respectively. The hook should +not modify its arguments, but it can optionally return a new gradient with +respect to the input that will be used in place of grad_input in +subsequent computations. grad_input will only correspond to the inputs given +as positional arguments and all kwarg arguments are ignored. Entries +in grad_input and grad_output will be None for all non-Tensor +arguments.

    +
    +

    Warning

    +

    Modifying inputs or outputs inplace is not allowed when using backward hooks and +will raise an error.

    +
    +
    +
    Returns
    +

    a handle that can be used to remove the added hook by calling +handle.remove()

    +
    +
    Return type
    +

    torch.utils.hooks.RemovableHandle

    +
    +
    +

    +
    -register_parameter(name, param)
    +register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None

    Adds a parameter to the module.

    The parameter can be accessed as an attribute using given name.

    @@ -809,13 +974,15 @@

    netcal.regularization.ConfidencePenalty
    -requires_grad_(requires_grad=True)
    +requires_grad_(requires_grad: bool = True) → T

    Change if autograd should record operations on parameters in this module.

    This method sets the parameters’ requires_grad attributes in-place.

    This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training).

    +

    See locally-disable-grad-doc for a comparison between +.requires_grad_() and several similar mechanisms that may be confused with it.

    Parameters

    requires_grad (bool) – whether autograd should record operations on @@ -830,6 +997,12 @@

    netcal.regularization.ConfidencePenalty +
    +share_memory() → T
    +

    See torch.Tensor.share_memory_()

    +

    +
    state_dict(destination=None, prefix='', keep_vars=False)
    @@ -877,8 +1050,8 @@

    netcal.regularization.ConfidencePenaltytorch.Tensor.to(), but only accepts -floating point desired dtype s. In addition, this method will -only cast the floating point parameters and buffers to dtype +floating point or complex dtype`s. In addition, this method will +only cast the floating point or complex parameters and buffers to :attr:`dtype (if given). The integral parameters and buffers will be moved device, if that is given, but with dtypes unchanged. When non_blocking is set, it tries to convert/move asynchronously @@ -894,8 +1067,8 @@

    netcal.regularization.ConfidencePenalty

    -

    Example:

    +

    Examples:

    +
    +
    +to_empty(*, device: Union[str, torch.device]) → T
    +

    Moves the parameters and buffers to the specified device without copying storage.

    +
    +
    Parameters
    +

    device (torch.device) – The desired device of the parameters +and buffers in this module.

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +
    +
    -train(mode=True)
    +train(mode: bool = True) → T

    Sets the module in training mode.

    This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation @@ -964,8 +1165,12 @@

    netcal.regularization.ConfidencePenalty
    -type(dst_type)
    +type(dst_type: Union[torch.dtype, str]) → T

    Casts all parameters and buffers to dst_type.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Parameters

    dst_type (type or string) – the desired type

    @@ -979,10 +1184,42 @@

    netcal.regularization.ConfidencePenalty +
    +xpu(device: Union[int, torch.device, None] = None) → T
    +

    Moves all model parameters and buffers to the XPU.

    +

    This also makes associated parameters and buffers different objects. So +it should be called before constructing optimizer if the module will +live on XPU while being optimized.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    +
    +
    Parameters
    +

    device (int, optional) – if specified, all parameters will be +copied to that device

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +

    +
    -zero_grad()
    -

    Sets gradients of all model parameters to zero.

    +zero_grad(set_to_none: bool = False) → None +

    Sets gradients of all model parameters to zero. See similar function +under torch.optim.Optimizer for more context.

    +
    +
    Parameters
    +

    set_to_none (bool) – instead of setting to zero, set the grads to None. +See torch.optim.Optimizer.zero_grad() for details.

    +
    +

    @@ -1037,7 +1274,7 @@

    Navigation

  • previous |
  • - +

    diff --git a/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.DCAPenalty.html b/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.DCAPenalty.html index 1fa3904..0538992 100644 --- a/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.DCAPenalty.html +++ b/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.DCAPenalty.html @@ -4,7 +4,7 @@ - netcal.regularization.DCAPenalty — calibration-framework 1.2.0 documentation + netcal.regularization.DCAPenalty — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -113,7 +113,7 @@

    netcal.regularization.DCAPenalty

    float()

    -

    Casts all floating point parameters and buffers to float datatype.

    +

    Casts all floating point parameters and buffers to float datatype.

    forward(input, target)

    Forward call of module.

    @@ -133,7 +133,7 @@

    netcal.regularization.DCAPenalty

    named_children()

    Returns an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

    -

    named_modules([memo, prefix])

    +

    named_modules([memo, prefix, remove_duplicate])

    Returns an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

    named_parameters([prefix, recurse])

    @@ -145,8 +145,8 @@

    netcal.regularization.DCAPenalty

    register_backward_hook(hook)

    Registers a backward hook on the module.

    -

    register_buffer(name, tensor)

    -

    Adds a persistent buffer to the module.

    +

    register_buffer(name, tensor[, persistent])

    +

    Adds a buffer to the module.

    register_forward_hook(hook)

    Registers a forward hook on the module.

    @@ -161,7 +161,7 @@

    netcal.regularization.DCAPenalty

    share_memory()

    -

    +

    See torch.Tensor.share_memory_()

    state_dict([destination, prefix, keep_vars])

    Returns a dictionary containing a whole state of the module.

    @@ -175,7 +175,7 @@

    netcal.regularization.DCAPenalty

    type(dst_type)

    Casts all parameters and buffers to dst_type.

    -

    zero_grad()

    +

    zero_grad([set_to_none])

    Sets gradients of all model parameters to zero.

    @@ -192,6 +192,14 @@

    netcal.regularization.DCAPenalty +
    +_get_backward_hooks()
    +

    Returns the backward hooks for use in the call function. +It returns two lists, one with the full backward hooks and one with the non-full +backward hooks.

    +

    +
    _load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
    @@ -278,7 +286,7 @@

    netcal.regularization.DCAPenalty
    -add_module(name, module)
    +add_module(name: str, module: Optional[Module]) → None

    Adds a child module to the current module.

    The module can be accessed as an attribute using the given name.

    @@ -294,7 +302,7 @@

    netcal.regularization.DCAPenalty
    -apply(fn)
    +apply(fn: Callable[Module, None]) → T

    Applies fn recursively to every submodule (as returned by .children()) as well as self. Typical use includes initializing the parameters of a model (see also nn-init-doc).

    @@ -340,8 +348,12 @@

    netcal.regularization.DCAPenalty
    -bfloat16()
    +bfloat16() → T

    Casts all floating point parameters and buffers to bfloat16 datatype.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -354,7 +366,7 @@

    netcal.regularization.DCAPenalty
    -buffers(recurse=True)
    +buffers(recurse: bool = True) → Iterator[torch.Tensor]

    Returns an iterator over module buffers.

    Parameters
    @@ -377,7 +389,7 @@

    netcal.regularization.DCAPenalty
    -children()
    +children() → Iterator[torch.nn.modules.module.Module]

    Returns an iterator over immediate children modules.

    Yields
    @@ -388,8 +400,12 @@

    netcal.regularization.DCAPenalty
    -cpu()
    +cpu() → T

    Moves all model parameters and buffers to the CPU.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -402,11 +418,15 @@

    netcal.regularization.DCAPenalty
    -cuda(device=None)
    +cuda(device: Union[int, torch.device, None] = None) → T

    Moves all model parameters and buffers to the GPU.

    This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Parameters

    device (int, optional) – if specified, all parameters will be @@ -423,8 +443,12 @@

    netcal.regularization.DCAPenalty
    -double()
    +double() → T

    Casts all floating point parameters and buffers to double datatype.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -437,13 +461,15 @@

    netcal.regularization.DCAPenalty
    -eval()
    +eval() → T

    Sets the module in evaluation mode.

    This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. Dropout, BatchNorm, etc.

    This is equivalent with self.train(False).

    +

    See locally-disable-grad-doc for a comparison between +.eval() and several similar mechanisms that may be confused with it.

    Returns

    self

    @@ -456,17 +482,21 @@

    netcal.regularization.DCAPenalty
    -extra_repr()
    +extra_repr() → str

    Set the extra representation of the module

    -

    To print customized extra information, you should reimplement +

    To print customized extra information, you should re-implement this method in your own modules. Both single-line and multi-line strings are acceptable.

    -float()
    -

    Casts all floating point parameters and buffers to float datatype.

    +float() → T +

    Casts all floating point parameters and buffers to float datatype.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -483,10 +513,110 @@

    netcal.regularization.DCAPenalty +
    +get_buffer(target: str) → torch.Tensor
    +

    Returns the buffer given by target if it exists, +otherwise throws an error.

    +

    See the docstring for get_submodule for a more detailed +explanation of this method’s functionality as well as how to +correctly specify target.

    +
    +
    Parameters
    +

    target – The fully-qualified string name of the buffer +to look for. (See get_submodule for how to specify a +fully-qualified string.)

    +
    +
    Returns
    +

    The buffer referenced by target

    +
    +
    Return type
    +

    torch.Tensor

    +
    +
    Raises
    +

    AttributeError – If the target string references an invalid + path or resolves to something that is not a + buffer

    +
    +
    +

    + +
    +
    +get_parameter(target: str) → torch.nn.parameter.Parameter
    +

    Returns the parameter given by target if it exists, +otherwise throws an error.

    +

    See the docstring for get_submodule for a more detailed +explanation of this method’s functionality as well as how to +correctly specify target.

    +
    +
    Parameters
    +

    target – The fully-qualified string name of the Parameter +to look for. (See get_submodule for how to specify a +fully-qualified string.)

    +
    +
    Returns
    +

    The Parameter referenced by target

    +
    +
    Return type
    +

    torch.nn.Parameter

    +
    +
    Raises
    +

    AttributeError – If the target string references an invalid + path or resolves to something that is not an + nn.Parameter

    +
    +
    +
    + +
    +
    +get_submodule(target: str) → torch.nn.modules.module.Module
    +

    Returns the submodule given by target if it exists, +otherwise throws an error.

    +

    For example, let’s say you have an nn.Module A that +looks like this:

    +

    (The diagram shows an nn.Module A. A has a nested +submodule net_b, which itself has two submodules net_c +and linear. net_c then has a submodule conv.)

    +

    To check whether or not we have the linear submodule, we +would call get_submodule("net_b.linear"). To check whether +we have the conv submodule, we would call +get_submodule("net_b.net_c.conv").

    +

    The runtime of get_submodule is bounded by the degree +of module nesting in target. A query against +named_modules achieves the same result, but it is O(N) in +the number of transitive modules. So, for a simple check to see +if some submodule exists, get_submodule should always be +used.

    +
    +
    Parameters
    +

    target – The fully-qualified string name of the submodule +to look for. (See above example for how to specify a +fully-qualified string.)

    +
    +
    Returns
    +

    The submodule referenced by target

    +
    +
    Return type
    +

    torch.nn.Module

    +
    +
    Raises
    +

    AttributeError – If the target string references an invalid + path or resolves to something that is not an + nn.Module

    +
    +
    +
    +
    -half()
    +half() → T

    Casts all floating point parameters and buffers to half datatype.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -499,7 +629,7 @@

    netcal.regularization.DCAPenalty
    -load_state_dict(state_dict, strict=True)
    +load_state_dict(state_dict: OrderedDict[str, Tensor], strict: bool = True)

    Copies parameters and buffers from state_dict into this module and its descendants. If strict is True, then the keys of state_dict must exactly match the keys returned @@ -529,7 +659,7 @@

    netcal.regularization.DCAPenalty
    -modules()
    +modules() → Iterator[torch.nn.modules.module.Module]

    Returns an iterator over all modules in the network.

    Yields
    @@ -558,7 +688,7 @@

    netcal.regularization.DCAPenalty
    -named_buffers(prefix='', recurse=True)
    +named_buffers(prefix: str = '', recurse: bool = True) → Iterator[Tuple[str, torch.Tensor]]

    Returns an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

    @@ -584,7 +714,7 @@

    netcal.regularization.DCAPenalty
    -named_children()
    +named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]

    Returns an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

    @@ -602,12 +732,20 @@

    netcal.regularization.DCAPenalty
    -named_modules(memo=None, prefix='')
    +named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)

    Returns an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

    -
    Yields
    -

    (string, Module) – Tuple of name and module

    +
    Parameters
    +
      +
    • memo – a memo to store the set of modules already added to the result

    • +
    • prefix – a prefix that will be added to the name of the module

    • +
    • remove_duplicate – whether to remove the duplicated module instances in the result

    • +
    • not (or) –

    • +
    +
    +
    Yields
    +

    (string, Module) – Tuple of name and module

    @@ -632,7 +770,7 @@

    netcal.regularization.DCAPenalty
    -named_parameters(prefix='', recurse=True)
    +named_parameters(prefix: str = '', recurse: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]

    Returns an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

    @@ -658,7 +796,7 @@

    netcal.regularization.DCAPenalty
    -parameters(recurse=True)
    +parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]

    Returns an iterator over module parameters.

    This is typically passed to an optimizer.

    @@ -682,18 +820,10 @@

    netcal.regularization.DCAPenalty
    -register_backward_hook(hook)
    +register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, torch.Tensor]]) → torch.utils.hooks.RemovableHandle

    Registers a backward hook on the module.

    -

    The hook will be called every time the gradients with respect to module -inputs are computed. The hook should have the following signature:

    -
    hook(module, grad_input, grad_output) -> Tensor or None
    -
    -
    -

    The grad_input and grad_output may be tuples if the -module has multiple inputs or outputs. The hook should not modify its -arguments, but it can optionally return a new gradient with respect to -input that will be used in place of grad_input in subsequent -computations.

    +

    This function is deprecated in favor of nn.Module.register_full_backward_hook() and +the behavior of this function will change in future versions.

    Returns

    a handle that can be used to remove the added hook by calling @@ -703,24 +833,20 @@

    netcal.regularization.DCAPenalty

    torch.utils.hooks.RemovableHandle

    -
    -

    Warning

    -

    The current implementation will not have the presented behavior -for complex Module that perform many operations. -In some failure cases, grad_input and grad_output will only -contain the gradients for a subset of the inputs and outputs. -For such Module, you should use torch.Tensor.register_hook() -directly on a specific input or output to get the required gradients.

    -

    -register_buffer(name, tensor)
    -

    Adds a persistent buffer to the module.

    +register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None +

    Adds a buffer to the module.

    This is typically used to register a buffer that should not to be considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the persistent state.

    +is not a parameter, but is part of the module’s state. Buffers, by +default, are persistent and will be saved alongside parameters. This +behavior can be changed by setting persistent to False. The +only difference between a persistent buffer and a non-persistent buffer +is that the latter will not be a part of this module’s +state_dict.

    Buffers can be accessed as attributes using given names.

    Parameters
    @@ -728,6 +854,8 @@

    netcal.regularization.DCAPenaltystate_dict.

    @@ -739,14 +867,16 @@

    netcal.regularization.DCAPenalty
    -register_forward_hook(hook)
    +register_forward_hook(hook: Callable[..., None]) → torch.utils.hooks.RemovableHandle

    Registers a forward hook on the module.

    The hook will be called every time after forward() has computed an output. It should have the following signature:

    hook(module, input, output) -> None or modified output
     
    -

    The hook can modify the output. It can modify the input inplace but +

    The input contains only the positional arguments given to the module. +Keyword arguments won’t be passed to the hooks and only to the forward. +The hook can modify the output. It can modify the input inplace but it will not have effect on forward since this is called after forward() is called.

    @@ -762,14 +892,16 @@

    netcal.regularization.DCAPenalty
    -register_forward_pre_hook(hook)
    +register_forward_pre_hook(hook: Callable[..., None]) → torch.utils.hooks.RemovableHandle

    Registers a forward pre-hook on the module.

    The hook will be called every time before forward() is invoked. It should have the following signature:

    hook(module, input) -> None or modified input
     
    -

    The hook can modify the input. User can either return a tuple or a +

    The input contains only the positional arguments given to the module. +Keyword arguments won’t be passed to the hooks and only to the forward. +The hook can modify the input. User can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if a single value is returned(unless that value is already a tuple).

    @@ -783,9 +915,42 @@

    netcal.regularization.DCAPenalty +
    +register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, torch.Tensor]]) → torch.utils.hooks.RemovableHandle
    +

    Registers a backward hook on the module.

    +

    The hook will be called every time the gradients with respect to module +inputs are computed. The hook should have the following signature:

    +
    hook(module, grad_input, grad_output) -> tuple(Tensor) or None
    +
    +
    +

    The grad_input and grad_output are tuples that contain the gradients +with respect to the inputs and outputs respectively. The hook should +not modify its arguments, but it can optionally return a new gradient with +respect to the input that will be used in place of grad_input in +subsequent computations. grad_input will only correspond to the inputs given +as positional arguments and all kwarg arguments are ignored. Entries +in grad_input and grad_output will be None for all non-Tensor +arguments.

    +
    +

    Warning

    +

    Modifying inputs or outputs inplace is not allowed when using backward hooks and +will raise an error.

    +
    +
    +
    Returns
    +

    a handle that can be used to remove the added hook by calling +handle.remove()

    +
    +
    Return type
    +

    torch.utils.hooks.RemovableHandle

    +
    +
    +

    +
    -register_parameter(name, param)
    +register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None

    Adds a parameter to the module.

    The parameter can be accessed as an attribute using given name.

    @@ -801,13 +966,15 @@

    netcal.regularization.DCAPenalty
    -requires_grad_(requires_grad=True)
    +requires_grad_(requires_grad: bool = True) → T

    Change if autograd should record operations on parameters in this module.

    This method sets the parameters’ requires_grad attributes in-place.

    This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training).

    +

    See locally-disable-grad-doc for a comparison between +.requires_grad_() and several similar mechanisms that may be confused with it.

    Parameters

    requires_grad (bool) – whether autograd should record operations on @@ -822,6 +989,12 @@

    netcal.regularization.DCAPenalty +
    +share_memory() → T
    +

    See torch.Tensor.share_memory_()

    +

    +
    state_dict(destination=None, prefix='', keep_vars=False)
    @@ -869,8 +1042,8 @@

    netcal.regularization.DCAPenaltytorch.Tensor.to(), but only accepts -floating point desired dtype s. In addition, this method will -only cast the floating point parameters and buffers to dtype +floating point or complex dtype`s. In addition, this method will +only cast the floating point or complex parameters and buffers to :attr:`dtype (if given). The integral parameters and buffers will be moved device, if that is given, but with dtypes unchanged. When non_blocking is set, it tries to convert/move asynchronously @@ -886,8 +1059,8 @@

    netcal.regularization.DCAPenalty

    -

    Example:

    +

    Examples:

    +
    +
    +to_empty(*, device: Union[str, torch.device]) → T
    +

    Moves the parameters and buffers to the specified device without copying storage.

    +
    +
    Parameters
    +

    device (torch.device) – The desired device of the parameters +and buffers in this module.

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +
    +
    -train(mode=True)
    +train(mode: bool = True) → T

    Sets the module in training mode.

    This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation @@ -956,8 +1157,12 @@

    netcal.regularization.DCAPenalty
    -type(dst_type)
    +type(dst_type: Union[torch.dtype, str]) → T

    Casts all parameters and buffers to dst_type.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Parameters

    dst_type (type or string) – the desired type

    @@ -971,10 +1176,42 @@

    netcal.regularization.DCAPenalty +
    +xpu(device: Union[int, torch.device, None] = None) → T
    +

    Moves all model parameters and buffers to the XPU.

    +

    This also makes associated parameters and buffers different objects. So +it should be called before constructing optimizer if the module will +live on XPU while being optimized.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    +
    +
    Parameters
    +

    device (int, optional) – if specified, all parameters will be +copied to that device

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +

    +
    -zero_grad()
    -

    Sets gradients of all model parameters to zero.

    +zero_grad(set_to_none: bool = False) → None +

    Sets gradients of all model parameters to zero. See similar function +under torch.optim.Optimizer for more context.

    +
    +
    Parameters
    +

    set_to_none (bool) – instead of setting to zero, set the grads to None. +See torch.optim.Optimizer.zero_grad() for details.

    +
    +

    @@ -1029,7 +1266,7 @@

    Navigation

  • previous |
  • - +

    diff --git a/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.MMCEPenalty.html b/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.MMCEPenalty.html index e0fea94..cb8657e 100644 --- a/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.MMCEPenalty.html +++ b/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.MMCEPenalty.html @@ -4,7 +4,7 @@ - netcal.regularization.MMCEPenalty — calibration-framework 1.2.0 documentation + netcal.regularization.MMCEPenalty — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -114,7 +114,7 @@

    netcal.regularization.MMCEPenalty

    float()

    -

    Casts all floating point parameters and buffers to float datatype.

    +

    Casts all floating point parameters and buffers to float datatype.

    forward(input, target)

    Forward call of module.

    @@ -137,7 +137,7 @@

    netcal.regularization.MMCEPenalty

    named_children()

    Returns an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

    -

    named_modules([memo, prefix])

    +

    named_modules([memo, prefix, remove_duplicate])

    Returns an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

    named_parameters([prefix, recurse])

    @@ -149,8 +149,8 @@

    netcal.regularization.MMCEPenalty

    register_backward_hook(hook)

    Registers a backward hook on the module.

    -

    register_buffer(name, tensor)

    -

    Adds a persistent buffer to the module.

    +

    register_buffer(name, tensor[, persistent])

    +

    Adds a buffer to the module.

    register_forward_hook(hook)

    Registers a forward hook on the module.

    @@ -165,7 +165,7 @@

    netcal.regularization.MMCEPenalty

    share_memory()

    -

    +

    See torch.Tensor.share_memory_()

    state_dict([destination, prefix, keep_vars])

    Returns a dictionary containing a whole state of the module.

    @@ -179,7 +179,7 @@

    netcal.regularization.MMCEPenalty

    type(dst_type)

    Casts all parameters and buffers to dst_type.

    -

    zero_grad()

    +

    zero_grad([set_to_none])

    Sets gradients of all model parameters to zero.

    @@ -199,6 +199,14 @@

    netcal.regularization.MMCEPenalty +
    +_get_backward_hooks()
    +

    Returns the backward hooks for use in the call function. +It returns two lists, one with the full backward hooks and one with the non-full +backward hooks.

    +

    +
    _load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
    @@ -285,7 +293,7 @@

    netcal.regularization.MMCEPenalty
    -add_module(name, module)
    +add_module(name: str, module: Optional[Module]) → None

    Adds a child module to the current module.

    The module can be accessed as an attribute using the given name.

    @@ -301,7 +309,7 @@

    netcal.regularization.MMCEPenalty
    -apply(fn)
    +apply(fn: Callable[Module, None]) → T

    Applies fn recursively to every submodule (as returned by .children()) as well as self. Typical use includes initializing the parameters of a model (see also nn-init-doc).

    @@ -347,8 +355,12 @@

    netcal.regularization.MMCEPenalty
    -bfloat16()
    +bfloat16() → T

    Casts all floating point parameters and buffers to bfloat16 datatype.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -361,7 +373,7 @@

    netcal.regularization.MMCEPenalty
    -buffers(recurse=True)
    +buffers(recurse: bool = True) → Iterator[torch.Tensor]

    Returns an iterator over module buffers.

    Parameters
    @@ -384,7 +396,7 @@

    netcal.regularization.MMCEPenalty
    -children()
    +children() → Iterator[torch.nn.modules.module.Module]

    Returns an iterator over immediate children modules.

    Yields
    @@ -395,8 +407,12 @@

    netcal.regularization.MMCEPenalty
    -cpu()
    +cpu() → T

    Moves all model parameters and buffers to the CPU.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -409,11 +425,15 @@

    netcal.regularization.MMCEPenalty
    -cuda(device=None)
    +cuda(device: Union[int, torch.device, None] = None) → T

    Moves all model parameters and buffers to the GPU.

    This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Parameters

    device (int, optional) – if specified, all parameters will be @@ -430,8 +450,12 @@

    netcal.regularization.MMCEPenalty
    -double()
    +double() → T

    Casts all floating point parameters and buffers to double datatype.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -444,13 +468,15 @@

    netcal.regularization.MMCEPenalty
    -eval()
    +eval() → T

    Sets the module in evaluation mode.

    This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. Dropout, BatchNorm, etc.

    This is equivalent with self.train(False).

    +

    See locally-disable-grad-doc for a comparison between +.eval() and several similar mechanisms that may be confused with it.

    Returns

    self

    @@ -463,17 +489,21 @@

    netcal.regularization.MMCEPenalty
    -extra_repr()
    +extra_repr() → str

    Set the extra representation of the module

    -

    To print customized extra information, you should reimplement +

    To print customized extra information, you should re-implement this method in your own modules. Both single-line and multi-line strings are acceptable.

    -float()
    -

    Casts all floating point parameters and buffers to float datatype.

    +float() → T +

    Casts all floating point parameters and buffers to float datatype.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -490,10 +520,110 @@

    netcal.regularization.MMCEPenalty +
    +get_buffer(target: str) → torch.Tensor
    +

    Returns the buffer given by target if it exists, +otherwise throws an error.

    +

    See the docstring for get_submodule for a more detailed +explanation of this method’s functionality as well as how to +correctly specify target.

    +
    +
    Parameters
    +

    target – The fully-qualified string name of the buffer +to look for. (See get_submodule for how to specify a +fully-qualified string.)

    +
    +
    Returns
    +

    The buffer referenced by target

    +
    +
    Return type
    +

    torch.Tensor

    +
    +
    Raises
    +

    AttributeError – If the target string references an invalid + path or resolves to something that is not a + buffer

    +
    +
    +

    + +
    +
    +get_parameter(target: str) → torch.nn.parameter.Parameter
    +

    Returns the parameter given by target if it exists, +otherwise throws an error.

    +

    See the docstring for get_submodule for a more detailed +explanation of this method’s functionality as well as how to +correctly specify target.

    +
    +
    Parameters
    +

    target – The fully-qualified string name of the Parameter +to look for. (See get_submodule for how to specify a +fully-qualified string.)

    +
    +
    Returns
    +

    The Parameter referenced by target

    +
    +
    Return type
    +

    torch.nn.Parameter

    +
    +
    Raises
    +

    AttributeError – If the target string references an invalid + path or resolves to something that is not an + nn.Parameter

    +
    +
    +
    + +
    +
    +get_submodule(target: str) → torch.nn.modules.module.Module
    +

    Returns the submodule given by target if it exists, +otherwise throws an error.

    +

    For example, let’s say you have an nn.Module A that +looks like this:

    +

    (The diagram shows an nn.Module A. A has a nested +submodule net_b, which itself has two submodules net_c +and linear. net_c then has a submodule conv.)

    +

    To check whether or not we have the linear submodule, we +would call get_submodule("net_b.linear"). To check whether +we have the conv submodule, we would call +get_submodule("net_b.net_c.conv").

    +

    The runtime of get_submodule is bounded by the degree +of module nesting in target. A query against +named_modules achieves the same result, but it is O(N) in +the number of transitive modules. So, for a simple check to see +if some submodule exists, get_submodule should always be +used.

    +
    +
    Parameters
    +

    target – The fully-qualified string name of the submodule +to look for. (See above example for how to specify a +fully-qualified string.)

    +
    +
    Returns
    +

    The submodule referenced by target

    +
    +
    Return type
    +

    torch.nn.Module

    +
    +
    Raises
    +

    AttributeError – If the target string references an invalid + path or resolves to something that is not an + nn.Module

    +
    +
    +
    +
    -half()
    +half() → T

    Casts all floating point parameters and buffers to half datatype.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Returns

    self

    @@ -512,7 +642,7 @@

    netcal.regularization.MMCEPenalty
    -load_state_dict(state_dict, strict=True)
    +load_state_dict(state_dict: OrderedDict[str, Tensor], strict: bool = True)

    Copies parameters and buffers from state_dict into this module and its descendants. If strict is True, then the keys of state_dict must exactly match the keys returned @@ -542,7 +672,7 @@

    netcal.regularization.MMCEPenalty
    -modules()
    +modules() → Iterator[torch.nn.modules.module.Module]

    Returns an iterator over all modules in the network.

    Yields
    @@ -571,7 +701,7 @@

    netcal.regularization.MMCEPenalty
    -named_buffers(prefix='', recurse=True)
    +named_buffers(prefix: str = '', recurse: bool = True) → Iterator[Tuple[str, torch.Tensor]]

    Returns an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

    @@ -597,7 +727,7 @@

    netcal.regularization.MMCEPenalty
    -named_children()
    +named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]

    Returns an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

    @@ -615,12 +745,20 @@

    netcal.regularization.MMCEPenalty
    -named_modules(memo=None, prefix='')
    +named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)

    Returns an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

    -
    Yields
    -

    (string, Module) – Tuple of name and module

    +
    Parameters
    +
      +
    • memo – a memo to store the set of modules already added to the result

    • +
    • prefix – a prefix that will be added to the name of the module

    • +
    • remove_duplicate – whether to remove the duplicated module instances in the result

    • +
    • not (or) –

    • +
    +
    +
    Yields
    +

    (string, Module) – Tuple of name and module

    @@ -645,7 +783,7 @@

    netcal.regularization.MMCEPenalty
    -named_parameters(prefix='', recurse=True)
    +named_parameters(prefix: str = '', recurse: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]

    Returns an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

    @@ -671,7 +809,7 @@

    netcal.regularization.MMCEPenalty
    -parameters(recurse=True)
    +parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]

    Returns an iterator over module parameters.

    This is typically passed to an optimizer.

    @@ -695,18 +833,10 @@

    netcal.regularization.MMCEPenalty
    -register_backward_hook(hook)
    +register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, torch.Tensor]]) → torch.utils.hooks.RemovableHandle

    Registers a backward hook on the module.

    -

    The hook will be called every time the gradients with respect to module -inputs are computed. The hook should have the following signature:

    -
    hook(module, grad_input, grad_output) -> Tensor or None
    -
    -
    -

    The grad_input and grad_output may be tuples if the -module has multiple inputs or outputs. The hook should not modify its -arguments, but it can optionally return a new gradient with respect to -input that will be used in place of grad_input in subsequent -computations.

    +

    This function is deprecated in favor of nn.Module.register_full_backward_hook() and +the behavior of this function will change in future versions.

    Returns

    a handle that can be used to remove the added hook by calling @@ -716,24 +846,20 @@

    netcal.regularization.MMCEPenalty

    torch.utils.hooks.RemovableHandle

    -
    -

    Warning

    -

    The current implementation will not have the presented behavior -for complex Module that perform many operations. -In some failure cases, grad_input and grad_output will only -contain the gradients for a subset of the inputs and outputs. -For such Module, you should use torch.Tensor.register_hook() -directly on a specific input or output to get the required gradients.

    -

    -register_buffer(name, tensor)
    -

    Adds a persistent buffer to the module.

    +register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None +

    Adds a buffer to the module.

    This is typically used to register a buffer that should not to be considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the persistent state.

    +is not a parameter, but is part of the module’s state. Buffers, by +default, are persistent and will be saved alongside parameters. This +behavior can be changed by setting persistent to False. The +only difference between a persistent buffer and a non-persistent buffer +is that the latter will not be a part of this module’s +state_dict.

    Buffers can be accessed as attributes using given names.

    Parameters
    @@ -741,6 +867,8 @@

    netcal.regularization.MMCEPenaltystate_dict.

    @@ -752,14 +880,16 @@

    netcal.regularization.MMCEPenalty
    -register_forward_hook(hook)
    +register_forward_hook(hook: Callable[..., None]) → torch.utils.hooks.RemovableHandle

    Registers a forward hook on the module.

    The hook will be called every time after forward() has computed an output. It should have the following signature:

    hook(module, input, output) -> None or modified output
     
    -

    The hook can modify the output. It can modify the input inplace but +

    The input contains only the positional arguments given to the module. +Keyword arguments won’t be passed to the hooks and only to the forward. +The hook can modify the output. It can modify the input inplace but it will not have effect on forward since this is called after forward() is called.

    @@ -775,14 +905,16 @@

    netcal.regularization.MMCEPenalty
    -register_forward_pre_hook(hook)
    +register_forward_pre_hook(hook: Callable[..., None]) → torch.utils.hooks.RemovableHandle

    Registers a forward pre-hook on the module.

    The hook will be called every time before forward() is invoked. It should have the following signature:

    hook(module, input) -> None or modified input
     
    -

    The hook can modify the input. User can either return a tuple or a +

    The input contains only the positional arguments given to the module. +Keyword arguments won’t be passed to the hooks and only to the forward. +The hook can modify the input. User can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if a single value is returned(unless that value is already a tuple).

    @@ -796,9 +928,42 @@

    netcal.regularization.MMCEPenalty +
    +register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, torch.Tensor]]) → torch.utils.hooks.RemovableHandle
    +

    Registers a backward hook on the module.

    +

    The hook will be called every time the gradients with respect to module +inputs are computed. The hook should have the following signature:

    +
    hook(module, grad_input, grad_output) -> tuple(Tensor) or None
    +
    +
    +

    The grad_input and grad_output are tuples that contain the gradients +with respect to the inputs and outputs respectively. The hook should +not modify its arguments, but it can optionally return a new gradient with +respect to the input that will be used in place of grad_input in +subsequent computations. grad_input will only correspond to the inputs given +as positional arguments and all kwarg arguments are ignored. Entries +in grad_input and grad_output will be None for all non-Tensor +arguments.

    +
    +

    Warning

    +

    Modifying inputs or outputs inplace is not allowed when using backward hooks and +will raise an error.

    +
    +
    +
    Returns
    +

    a handle that can be used to remove the added hook by calling +handle.remove()

    +
    +
    Return type
    +

    torch.utils.hooks.RemovableHandle

    +
    +
    +

    +
    -register_parameter(name, param)
    +register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None

    Adds a parameter to the module.

    The parameter can be accessed as an attribute using given name.

    @@ -814,13 +979,15 @@

    netcal.regularization.MMCEPenalty
    -requires_grad_(requires_grad=True)
    +requires_grad_(requires_grad: bool = True) → T

    Change if autograd should record operations on parameters in this module.

    This method sets the parameters’ requires_grad attributes in-place.

    This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training).

    +

    See locally-disable-grad-doc for a comparison between +.requires_grad_() and several similar mechanisms that may be confused with it.

    Parameters

    requires_grad (bool) – whether autograd should record operations on @@ -835,6 +1002,12 @@

    netcal.regularization.MMCEPenalty +
    +share_memory() → T
    +

    See torch.Tensor.share_memory_()

    +

    +
    state_dict(destination=None, prefix='', keep_vars=False)
    @@ -882,8 +1055,8 @@

    netcal.regularization.MMCEPenaltytorch.Tensor.to(), but only accepts -floating point desired dtype s. In addition, this method will -only cast the floating point parameters and buffers to dtype +floating point or complex dtype`s. In addition, this method will +only cast the floating point or complex parameters and buffers to :attr:`dtype (if given). The integral parameters and buffers will be moved device, if that is given, but with dtypes unchanged. When non_blocking is set, it tries to convert/move asynchronously @@ -899,8 +1072,8 @@

    netcal.regularization.MMCEPenalty

    -

    Example:

    +

    Examples:

    +
    +
    +to_empty(*, device: Union[str, torch.device]) → T
    +

    Moves the parameters and buffers to the specified device without copying storage.

    +
    +
    Parameters
    +

    device (torch.device) – The desired device of the parameters +and buffers in this module.

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +
    +
    -train(mode=True)
    +train(mode: bool = True) → T

    Sets the module in training mode.

    This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation @@ -969,8 +1170,12 @@

    netcal.regularization.MMCEPenalty
    -type(dst_type)
    +type(dst_type: Union[torch.dtype, str]) → T

    Casts all parameters and buffers to dst_type.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    Parameters

    dst_type (type or string) – the desired type

    @@ -984,10 +1189,42 @@

    netcal.regularization.MMCEPenalty +
    +xpu(device: Union[int, torch.device, None] = None) → T
    +

    Moves all model parameters and buffers to the XPU.

    +

    This also makes associated parameters and buffers different objects. So +it should be called before constructing optimizer if the module will +live on XPU while being optimized.

    +
    +

    Note

    +

    This method modifies the module in-place.

    +
    +
    +
    Parameters
    +

    device (int, optional) – if specified, all parameters will be +copied to that device

    +
    +
    Returns
    +

    self

    +
    +
    Return type
    +

    Module

    +
    +
    +

    +
    -zero_grad()
    -

    Sets gradients of all model parameters to zero.

    +zero_grad(set_to_none: bool = False) → None +

    Sets gradients of all model parameters to zero. See similar function +under torch.optim.Optimizer for more context.

    +
    +
    Parameters
    +

    set_to_none (bool) – instead of setting to zero, set the grads to None. +See torch.optim.Optimizer.zero_grad() for details.

    +
    +

    @@ -1042,7 +1279,7 @@

    Navigation

  • previous |
  • - +

    diff --git a/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.confidence_penalty.html b/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.confidence_penalty.html index ad78aa5..8872658 100644 --- a/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.confidence_penalty.html +++ b/docs/build/html/_autosummary/_autosummary_regularization_func/netcal.regularization.confidence_penalty.html @@ -4,7 +4,7 @@ - netcal.regularization.confidence_penalty — calibration-framework 1.2.0 documentation + netcal.regularization.confidence_penalty — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -129,7 +129,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.AbstractLogisticRegression.html b/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.AbstractLogisticRegression.html index 44ed2e8..d2c5bd0 100644 --- a/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.AbstractLogisticRegression.html +++ b/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.AbstractLogisticRegression.html @@ -4,7 +4,7 @@ - netcal.scaling.AbstractLogisticRegression — calibration-framework 1.2.0 documentation + netcal.scaling.AbstractLogisticRegression — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -543,7 +543,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.BetaCalibration.html b/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.BetaCalibration.html index bf315ad..355d921 100644 --- a/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.BetaCalibration.html +++ b/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.BetaCalibration.html @@ -4,7 +4,7 @@ - netcal.scaling.BetaCalibration — calibration-framework 1.2.0 documentation + netcal.scaling.BetaCalibration — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -583,7 +583,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.BetaCalibrationDependent.html b/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.BetaCalibrationDependent.html index 7cb48d9..8c53ca1 100644 --- a/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.BetaCalibrationDependent.html +++ b/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.BetaCalibrationDependent.html @@ -4,7 +4,7 @@ - netcal.scaling.BetaCalibrationDependent — calibration-framework 1.2.0 documentation + netcal.scaling.BetaCalibrationDependent — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -589,7 +589,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.LogisticCalibration.html b/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.LogisticCalibration.html index 2ad5b50..1eb14d9 100644 --- a/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.LogisticCalibration.html +++ b/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.LogisticCalibration.html @@ -4,7 +4,7 @@ - netcal.scaling.LogisticCalibration — calibration-framework 1.2.0 documentation + netcal.scaling.LogisticCalibration — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -585,7 +585,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.LogisticCalibrationDependent.html b/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.LogisticCalibrationDependent.html index ab31934..b08f4f4 100644 --- a/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.LogisticCalibrationDependent.html +++ b/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.LogisticCalibrationDependent.html @@ -4,7 +4,7 @@ - netcal.scaling.LogisticCalibrationDependent — calibration-framework 1.2.0 documentation + netcal.scaling.LogisticCalibrationDependent — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -578,7 +578,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.TemperatureScaling.html b/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.TemperatureScaling.html index c11a866..3c5e506 100644 --- a/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.TemperatureScaling.html +++ b/docs/build/html/_autosummary/_autosummary_scaling/netcal.scaling.TemperatureScaling.html @@ -4,7 +4,7 @@ - netcal.scaling.TemperatureScaling — calibration-framework 1.2.0 documentation + netcal.scaling.TemperatureScaling — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -562,7 +562,7 @@

    Navigation

  • previous |
  • - + diff --git a/docs/build/html/_autosummary/netcal.binning.html b/docs/build/html/_autosummary/netcal.binning.html index ec4acf8..51a69ad 100644 --- a/docs/build/html/_autosummary/netcal.binning.html +++ b/docs/build/html/_autosummary/netcal.binning.html @@ -4,7 +4,7 @@ - netcal.binning — calibration-framework 1.2.0 documentation + netcal.binning — calibration-framework 1.2.1 documentation @@ -35,7 +35,7 @@

    Navigation

  • previous |
  • - + @@ -130,7 +130,7 @@

    Navigation

  • previous |
  • - + @@ -135,7 +135,7 @@

    Navigation

  • previous |
  • - + @@ -121,7 +121,7 @@

    Navigation

  • previous |
  • - + @@ -131,7 +131,7 @@

    Navigation

  • previous |
  • - + @@ -136,7 +136,7 @@

    Navigation

  • previous |
  • - + @@ -336,7 +336,7 @@

    Navigation

  • previous |
  • - + @@ -61,6 +61,7 @@

    Index

    | T | V | W + | X | Z @@ -73,6 +74,14 @@

    _

  • (netcal.regularization.DCAPenalty method)
  • (netcal.regularization.MMCEPenalty method) +
  • + +
  • _get_backward_hooks() (netcal.regularization.ConfidencePenalty method) + +
  • _load_from_state_dict() (netcal.regularization.ConfidencePenalty method) @@ -399,8 +408,24 @@

    F

    G

    +

    X

    + + +
    +

    Z

    @@ -561,7 +561,7 @@

    Navigation

  • next |
  • - + @@ -112,7 +112,7 @@

    Navigation

  • modules |
  • - + @@ -83,7 +83,7 @@

    Navigation

  • modules |
  • - +