diff --git a/docs/source/sparsification/flow-sparsification-model_recipe_sparsezoo-sparseml_transfer_learning.svg b/docs/source/sparsification/flow-sparsification-model_recipe_sparsezoo-sparseml_transfer_learning.svg new file mode 100644 index 00000000000..2381378d5f5 --- /dev/null +++ b/docs/source/sparsification/flow-sparsification-model_recipe_sparsezoo-sparseml_transfer_learning.svg @@ -0,0 +1,255 @@ + + + flow-sparsification-model_sparsezoo-recipe_sparsezoo-sparsifcation_sparseml_training-deployment_deepsparse + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Model + + + + + + SparseZoo + + + + + + + + + + + + + + + + + + + Your Model + + + + + + + + + + + + + + + + + + + Recipe + + + + + + SparseZoo + + + + + + + + + + + + + + + + + + + Sparsify + + + + + + + + + + + + + + + + + + + + + + + + + + SparseML + + + + + + + + + + + + + + + + + + Sparsification + + + + + + SparseML + training + + + + + + + + + + + + + + + SparseML + one shot + + + + + + + + + + + + + + + Sparsify + one shot + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Deployment + + + + + + ONNX Conversion + + DeepSparse Engine + + + + + + + + + + + + Your Current Engine + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/sparseml/_modules/index.html b/sparseml/_modules/index.html index e49298be4f4..234feb41f55 100644 --- a/sparseml/_modules/index.html +++ b/sparseml/_modules/index.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/keras/optim/manager.html b/sparseml/_modules/sparseml/keras/optim/manager.html index a01a65e4944..a826c7ddfe5 100644 --- a/sparseml/_modules/sparseml/keras/optim/manager.html +++ b/sparseml/_modules/sparseml/keras/optim/manager.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -202,6 +203,7 @@

Source code for sparseml.keras.optim.manager

from sparseml.keras.utils.logger import KerasLogger
 from sparseml.optim import BaseManager
 from sparseml.utils import load_recipe_yaml_str
+from sparsezoo.objects import OptimizationRecipe
 
 
 __all__ = ["ScheduledModifierManager"]
@@ -213,15 +215,23 @@ 

Source code for sparseml.keras.optim.manager

    """
 
 
[docs] @staticmethod - def from_yaml(file_path: str, add_modifiers: List[Modifier] = None): + def from_yaml( + file_path: Union[str, OptimizationRecipe], + add_modifiers: List[Modifier] = None, + ): """ - Convenience function used to create the manager of multiple modifiers - from a yaml file. - - :param file_path: the path to the yaml file to load the modifier from + Convenience function used to create the manager of multiple modifiers from a + recipe file. + + :param file_path: the path to the recipe file to load the modifier from, or + a SparseZoo model stub to load a recipe for a model stored in SparseZoo. + SparseZoo stubs should be preceded by 'zoo:', and can contain an optional + '?recipe_type=<type>' parameter. Can also be a SparseZoo OptimizationRecipe + object. i.e. '/path/to/local/recipe.yaml', 'zoo:model/stub/path', + 'zoo:model/stub/path?recipe_type=transfer' :param add_modifiers: additional modifiers that should be added to the - returned manager alongside the ones loaded from the yaml file - :return: ScheduledModifierManager() created from the yaml file + returned manager alongside the ones loaded from the recipe file + :return: ScheduledModifierManager() created from the recipe file """ yaml_str = load_recipe_yaml_str(file_path) modifiers = Modifier.load_list(yaml_str) diff --git a/sparseml/_modules/sparseml/keras/optim/mask_pruning.html b/sparseml/_modules/sparseml/keras/optim/mask_pruning.html index 9d412a9ab74..c944425cb77 100644 --- a/sparseml/_modules/sparseml/keras/optim/mask_pruning.html +++ b/sparseml/_modules/sparseml/keras/optim/mask_pruning.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -191,14 +192,21 @@

Source code for sparseml.keras.optim.mask_pruning

import abc import collections import inspect -from typing import List +from typing import List, Union import tensorflow as tf -from sparseml.keras.optim.mask_pruning_creator import PruningMaskCreator +from sparseml.keras.optim.mask_pruning_creator import ( + PruningMaskCreator, + load_mask_creator, +) -__all__ = ["MaskedLayer", "PruningScheduler", "remove_pruning_masks"] +__all__ = [ + "MaskedLayer", + "PruningScheduler", + "remove_pruning_masks", +]
[docs]class PruningScheduler(abc.ABC): @@ -206,6 +214,12 @@

Source code for sparseml.keras.optim.mask_pruning

Abstract pruning scheduler """ + _REGISTRY = {} + + def __init_subclass__(cls): + super().__init_subclass__() + PruningScheduler._register_class(cls) +
[docs] @abc.abstractmethod def should_prune(self, step: int) -> bool: """ @@ -225,7 +239,33 @@

Source code for sparseml.keras.optim.mask_pruning

:param kwargs: optional keyword params that a specific scheduler might need :return: target sparsity """ - raise NotImplementedError("Not implemented")
+ raise NotImplementedError("Not implemented")
+ +
[docs] @abc.abstractmethod + def get_config(self): + raise NotImplementedError("Not implemented")
+ +
[docs] @classmethod + def deserialize(cls, config): + """ + Deserialize a pruning scheduler from config returned by scheduler's + get_config method + + :param config: a pruning scheduler's config + :return: a pruning scheduler instance + """ + if "class_name" not in config: + raise ValueError("The 'class_name' not found in config: {}".format(config)) + class_name = config["class_name"] + return tf.keras.utils.deserialize_keras_object( + config, + module_objects=globals(), + custom_objects={class_name: PruningScheduler._REGISTRY[class_name]}, + )
+ + @classmethod + def _register_class(cls, target_cls): + PruningScheduler._REGISTRY[target_cls.__name__] = target_cls
MaskedParamInfo = collections.namedtuple( @@ -368,7 +408,7 @@

Source code for sparseml.keras.optim.mask_pruning

self, layer: tf.keras.layers.Layer, pruning_scheduler: PruningScheduler, - mask_creator: PruningMaskCreator, + mask_type: Union[str, List[int]] = "unstructured", **kwargs, ): if not isinstance(layer, MaskedLayer) and not isinstance( @@ -381,7 +421,16 @@

Source code for sparseml.keras.optim.mask_pruning

super(MaskedLayer, self).__init__(layer, **kwargs) self._layer = layer self._pruning_scheduler = pruning_scheduler - self._mask_creator = mask_creator + self._mask_type = mask_type + self._mask_creator = None + self._pruning_vars = [] + self._global_step = None + self._mask_updater = None + +
[docs] def build(self, input_shape): + super(MaskedLayer, self).build(input_shape) + self._mask_creator = load_mask_creator(self._mask_type) + self._pruning_vars = self._reuse_or_create_pruning_vars() self._global_step = self.add_weight( "global_step", shape=[], @@ -389,13 +438,12 @@

Source code for sparseml.keras.optim.mask_pruning

dtype=tf.int64, trainable=False, ) - self._pruning_vars = self._reuse_or_create_pruning_vars() self._mask_updater = MaskAndWeightUpdater( self._pruning_vars, self._pruning_scheduler, self._mask_creator, self._global_step, - ) + )
def _reuse_or_create_pruning_vars( self, @@ -452,6 +500,44 @@

Source code for sparseml.keras.optim.mask_pruning

else: return self._layer.call(inputs)
+
[docs] def get_config(self): + """ + Get layer config + Serialization and deserialization should be done using + tf.keras.serialize/deserialize, which create and retrieve the "class_name" + field automatically. + The resulting config below therefore does not contain the field. + """ + config = super(MaskedLayer, self).get_config() + if "layer" not in config: + raise RuntimeError("Expected 'layer' field not found in config") + config.update( + { + "pruning_scheduler": self._pruning_scheduler.get_config(), + "mask_type": self._mask_type, + } + ) + return config
+ +
[docs] @classmethod + def from_config(cls, config): + config = config.copy() + layer = tf.keras.layers.deserialize( + config.pop("layer"), custom_objects={"MaskedLayer": MaskedLayer} + ) + if not isinstance(layer, MaskedLayer) and not isinstance( + layer, tf.keras.layers.Layer + ): + raise RuntimeError("Unexpected layer created from config") + pruning_scheduler = PruningScheduler.deserialize( + config.pop("pruning_scheduler") + ) + if not isinstance(pruning_scheduler, PruningScheduler): + raise RuntimeError("Unexpected pruning scheduler type created from config") + mask_type = config.pop("mask_type") + masked_layer = MaskedLayer(layer, pruning_scheduler, mask_type, **config) + return masked_layer
+
[docs] def compute_output_shape(self, input_shape): return self._layer.compute_output_shape(input_shape)
@@ -478,7 +564,11 @@

Source code for sparseml.keras.optim.mask_pruning

elif isinstance(self._layer, tf.keras.layers.Layer): return self._layer else: - raise RuntimeError("Unrecognized layer")
+ raise RuntimeError("Unrecognized layer") + + @property + def masked_layer(self): + return self._layer
[docs]def remove_pruning_masks(model: tf.keras.Model): diff --git a/sparseml/_modules/sparseml/keras/optim/mask_pruning_creator.html b/sparseml/_modules/sparseml/keras/optim/mask_pruning_creator.html index ba84cc5c6f4..e5305105cac 100644 --- a/sparseml/_modules/sparseml/keras/optim/mask_pruning_creator.html +++ b/sparseml/_modules/sparseml/keras/optim/mask_pruning_creator.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/keras/optim/modifier.html b/sparseml/_modules/sparseml/keras/optim/modifier.html index ec4b8f040d0..cc6c1707433 100644 --- a/sparseml/_modules/sparseml/keras/optim/modifier.html +++ b/sparseml/_modules/sparseml/keras/optim/modifier.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/keras/optim/modifier_epoch.html b/sparseml/_modules/sparseml/keras/optim/modifier_epoch.html index 925a105650a..231786169ee 100644 --- a/sparseml/_modules/sparseml/keras/optim/modifier_epoch.html +++ b/sparseml/_modules/sparseml/keras/optim/modifier_epoch.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/keras/optim/modifier_lr.html b/sparseml/_modules/sparseml/keras/optim/modifier_lr.html index 8af0dc770c5..688997aae82 100644 --- a/sparseml/_modules/sparseml/keras/optim/modifier_lr.html +++ b/sparseml/_modules/sparseml/keras/optim/modifier_lr.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/keras/optim/modifier_params.html b/sparseml/_modules/sparseml/keras/optim/modifier_params.html index 9eb292b30f0..f6817488c8f 100644 --- a/sparseml/_modules/sparseml/keras/optim/modifier_params.html +++ b/sparseml/_modules/sparseml/keras/optim/modifier_params.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/keras/optim/modifier_pruning.html b/sparseml/_modules/sparseml/keras/optim/modifier_pruning.html index 250a2dff294..997014dfedd 100644 --- a/sparseml/_modules/sparseml/keras/optim/modifier_pruning.html +++ b/sparseml/_modules/sparseml/keras/optim/modifier_pruning.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -202,10 +203,6 @@

Source code for sparseml.keras.optim.modifier_pruning

PruningScheduler, remove_pruning_masks, ) -from sparseml.keras.optim.mask_pruning_creator import ( - PruningMaskCreator, - load_mask_creator, -) from sparseml.keras.optim.modifier import ( KerasModifierYAML, ModifierProp, @@ -249,6 +246,14 @@

Source code for sparseml.keras.optim.modifier_pruning

self._update_frequency_steps = update_frequency_steps self._inter_func = inter_func + @property + def init_sparsity(self): + return self._init_sparsity + + @property + def final_sparsity(self): + return self._final_sparsity + @property def start_step(self): return self._start_step @@ -261,6 +266,10 @@

Source code for sparseml.keras.optim.modifier_pruning

def update_frequency_steps(self): return self._update_frequency_steps + @property + def inter_func(self): + return self._inter_func + @property def exponent(self) -> float: """ @@ -330,6 +339,20 @@

Source code for sparseml.keras.optim.modifier_pruning

sparsity = self._final_sparsity return sparsity + def get_config(self): + config = { + "class_name": self.__class__.__name__, + "config": { + "init_sparsity": self.init_sparsity, + "final_sparsity": self.final_sparsity, + "start_step": self.start_step, + "end_step": self.end_step, + "update_frequency_steps": self.update_frequency_steps, + "inter_func": self.inter_func, + }, + } + return config + class SparsityFreezer(PruningScheduler): """ @@ -348,6 +371,14 @@

Source code for sparseml.keras.optim.modifier_pruning

self._start_step = start_step self._end_step = end_step + @property + def start_step(self): + return self._start_step + + @property + def end_step(self): + return self._ends_step + def should_prune(self, step: int) -> bool: """ Check if the given step is a right time for pruning @@ -379,6 +410,14 @@

Source code for sparseml.keras.optim.modifier_pruning

sparsity = None return sparsity + def get_config(self): + config = { + "class_name": self.__class__.__name__, + "start_step": self.start_step, + "end_step": self.end_step, + } + return config + class PruningModifierCallback(tensorflow.keras.callbacks.Callback): """ @@ -521,7 +560,7 @@

Source code for sparseml.keras.optim.modifier_pruning

[docs]@KerasModifierYAML() -class ConstantPruningModifier(ScheduledModifier, PruningScheduler): +class ConstantPruningModifier(ScheduledModifier): """ Holds the sparsity level and shape for a given param constant while training. Useful for transfer learning use cases. @@ -563,7 +602,7 @@

Source code for sparseml.keras.optim.modifier_pruning

self._masked_layers = [] self._sparsity_scheduler = None - self._mask_creator = load_mask_creator("unstructured") + self._mask_type = "unstructured" @ModifierProp() def params(self) -> Union[str, List[str]]: @@ -632,7 +671,7 @@

Source code for sparseml.keras.optim.modifier_pruning

cloned_layer = layer if layer.name in self.layer_names: # TODO: handle regex params cloned_layer = MaskedLayer( - layer, self._sparsity_scheduler, self._mask_creator, name=layer.name + layer, self._sparsity_scheduler, self._mask_type, name=layer.name ) self._masked_layers.append(cloned_layer) return cloned_layer @@ -729,7 +768,7 @@

Source code for sparseml.keras.optim.modifier_pruning

default is __ALL__ :param mask_type: String to define type of sparsity (options: ['unstructured', 'channel', 'filter']), List to define block shape of a parameter's in and out - channels, or a PruningMaskCreator object. default is 'unstructured' + channels. default is 'unstructured' :param leave_enabled: True to continue masking the weights after end_epoch, False to stop masking. Should be set to False if exporting the result immediately after or doing some other prune @@ -745,7 +784,7 @@

Source code for sparseml.keras.optim.modifier_pruning

update_frequency: float, inter_func: str = "cubic", log_types: Union[str, List[str]] = ALL_TOKEN, - mask_type: Union[str, List[int], PruningMaskCreator] = "unstructured", + mask_type: Union[str, List[int]] = "unstructured", leave_enabled: bool = True, ): super(GMPruningModifier, self).__init__( @@ -767,10 +806,7 @@

Source code for sparseml.keras.optim.modifier_pruning

self._leave_enabled = convert_to_bool(leave_enabled) self._inter_func = inter_func self._mask_type = mask_type - self._mask_creator = mask_type self._leave_enabled = convert_to_bool(leave_enabled) - if not isinstance(mask_type, PruningMaskCreator): - self._mask_creator = load_mask_creator(mask_type) self._prune_op_vars = None self._update_ready = None self._sparsity = None @@ -870,21 +906,18 @@

Source code for sparseml.keras.optim.modifier_pruning

self.validate() @ModifierProp() - def mask_type(self) -> Union[str, List[int], PruningMaskCreator]: + def mask_type(self) -> Union[str, List[int]]: """ - :return: the PruningMaskCreator object used + :return: the mask type used """ return self._mask_type @mask_type.setter - def mask_type(self, value: Union[str, List[int], PruningMaskCreator]): + def mask_type(self, value: Union[str, List[int]]): """ - :param value: the PruningMaskCreator object to use + :param value: the mask type to use """ self._mask_type = value - self._mask_creator = value - if not isinstance(value, PruningMaskCreator): - self._mask_creator = load_mask_creator(value) @ModifierProp() def leave_enabled(self) -> bool: @@ -1010,7 +1043,7 @@

Source code for sparseml.keras.optim.modifier_pruning

layer.name in self.layer_names ): # TODO: handle regex params --- see create_ops in TF version cloned_layer = MaskedLayer( - layer, self._sparsity_scheduler, self._mask_creator, name=layer.name + layer, self._sparsity_scheduler, self._mask_type, name=layer.name ) self._masked_layers.append(cloned_layer) return cloned_layer diff --git a/sparseml/_modules/sparseml/keras/optim/utils.html b/sparseml/_modules/sparseml/keras/optim/utils.html index f1b24b7c4c0..b461df22a29 100644 --- a/sparseml/_modules/sparseml/keras/optim/utils.html +++ b/sparseml/_modules/sparseml/keras/optim/utils.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/keras/utils/callbacks.html b/sparseml/_modules/sparseml/keras/utils/callbacks.html index c06db171900..700944f89eb 100644 --- a/sparseml/_modules/sparseml/keras/utils/callbacks.html +++ b/sparseml/_modules/sparseml/keras/utils/callbacks.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/keras/utils/exporter.html b/sparseml/_modules/sparseml/keras/utils/exporter.html index dd4e44409b6..22c38071af1 100644 --- a/sparseml/_modules/sparseml/keras/utils/exporter.html +++ b/sparseml/_modules/sparseml/keras/utils/exporter.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/keras/utils/logger.html b/sparseml/_modules/sparseml/keras/utils/logger.html index bc97f7f462e..5f2e472691f 100644 --- a/sparseml/_modules/sparseml/keras/utils/logger.html +++ b/sparseml/_modules/sparseml/keras/utils/logger.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/keras/utils/model.html b/sparseml/_modules/sparseml/keras/utils/model.html index a1f014dff27..d1039156644 100644 --- a/sparseml/_modules/sparseml/keras/utils/model.html +++ b/sparseml/_modules/sparseml/keras/utils/model.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/log.html b/sparseml/_modules/sparseml/log.html index d5ddd319439..277d3c40ad0 100644 --- a/sparseml/_modules/sparseml/log.html +++ b/sparseml/_modules/sparseml/log.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/onnx/optim/analyzer_model.html b/sparseml/_modules/sparseml/onnx/optim/analyzer_model.html index f7035036c57..6845ab6a638 100644 --- a/sparseml/_modules/sparseml/onnx/optim/analyzer_model.html +++ b/sparseml/_modules/sparseml/onnx/optim/analyzer_model.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/onnx/optim/quantization/calibration.html b/sparseml/_modules/sparseml/onnx/optim/quantization/calibration.html index ce688a96c53..a5befd5d188 100644 --- a/sparseml/_modules/sparseml/onnx/optim/quantization/calibration.html +++ b/sparseml/_modules/sparseml/onnx/optim/quantization/calibration.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/onnx/optim/quantization/quantize.html b/sparseml/_modules/sparseml/onnx/optim/quantization/quantize.html index 6f34a38be56..ecf9425ea3e 100644 --- a/sparseml/_modules/sparseml/onnx/optim/quantization/quantize.html +++ b/sparseml/_modules/sparseml/onnx/optim/quantization/quantize.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/onnx/optim/quantization/quantize_model_post_training.html b/sparseml/_modules/sparseml/onnx/optim/quantization/quantize_model_post_training.html index b4e79bf4dcc..248f3d32ab9 100644 --- a/sparseml/_modules/sparseml/onnx/optim/quantization/quantize_model_post_training.html +++ b/sparseml/_modules/sparseml/onnx/optim/quantization/quantize_model_post_training.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/onnx/optim/sensitivity_pruning.html b/sparseml/_modules/sparseml/onnx/optim/sensitivity_pruning.html index 64eec13f9c5..25b34d88e41 100644 --- a/sparseml/_modules/sparseml/onnx/optim/sensitivity_pruning.html +++ b/sparseml/_modules/sparseml/onnx/optim/sensitivity_pruning.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/onnx/utils/data.html b/sparseml/_modules/sparseml/onnx/utils/data.html index 8468468b41a..63f67979f34 100644 --- a/sparseml/_modules/sparseml/onnx/utils/data.html +++ b/sparseml/_modules/sparseml/onnx/utils/data.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/onnx/utils/graph_editor.html b/sparseml/_modules/sparseml/onnx/utils/graph_editor.html index 694ee85eb2d..f3c9ae4a055 100644 --- a/sparseml/_modules/sparseml/onnx/utils/graph_editor.html +++ b/sparseml/_modules/sparseml/onnx/utils/graph_editor.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/onnx/utils/graph_optimizer.html b/sparseml/_modules/sparseml/onnx/utils/graph_optimizer.html index 5ae53dfece3..a4251a3b28a 100644 --- a/sparseml/_modules/sparseml/onnx/utils/graph_optimizer.html +++ b/sparseml/_modules/sparseml/onnx/utils/graph_optimizer.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/onnx/utils/helpers.html b/sparseml/_modules/sparseml/onnx/utils/helpers.html index 846040390cc..94b9237e393 100644 --- a/sparseml/_modules/sparseml/onnx/utils/helpers.html +++ b/sparseml/_modules/sparseml/onnx/utils/helpers.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/onnx/utils/loss.html b/sparseml/_modules/sparseml/onnx/utils/loss.html index 65aa26bd840..2ab7ca8eb25 100644 --- a/sparseml/_modules/sparseml/onnx/utils/loss.html +++ b/sparseml/_modules/sparseml/onnx/utils/loss.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/onnx/utils/model.html b/sparseml/_modules/sparseml/onnx/utils/model.html index 174a9b8b066..282e20a9603 100644 --- a/sparseml/_modules/sparseml/onnx/utils/model.html +++ b/sparseml/_modules/sparseml/onnx/utils/model.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/onnx/utils/sparse_tensor.html b/sparseml/_modules/sparseml/onnx/utils/sparse_tensor.html index cb117c3e8de..758c4edd994 100644 --- a/sparseml/_modules/sparseml/onnx/utils/sparse_tensor.html +++ b/sparseml/_modules/sparseml/onnx/utils/sparse_tensor.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/optim/analyzer.html b/sparseml/_modules/sparseml/optim/analyzer.html index ec03b855af0..6ad0db49ccb 100644 --- a/sparseml/_modules/sparseml/optim/analyzer.html +++ b/sparseml/_modules/sparseml/optim/analyzer.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/optim/learning_rate.html b/sparseml/_modules/sparseml/optim/learning_rate.html index 35cb8ee6c5a..a87eecff556 100644 --- a/sparseml/_modules/sparseml/optim/learning_rate.html +++ b/sparseml/_modules/sparseml/optim/learning_rate.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/optim/manager.html b/sparseml/_modules/sparseml/optim/manager.html index 857d89003a3..6037c8a4e61 100644 --- a/sparseml/_modules/sparseml/optim/manager.html +++ b/sparseml/_modules/sparseml/optim/manager.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/optim/modifier.html b/sparseml/_modules/sparseml/optim/modifier.html index c02b192ea96..7eb93702d1f 100644 --- a/sparseml/_modules/sparseml/optim/modifier.html +++ b/sparseml/_modules/sparseml/optim/modifier.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/optim/sensitivity.html b/sparseml/_modules/sparseml/optim/sensitivity.html index fa202af4aa2..f039fedad85 100644 --- a/sparseml/_modules/sparseml/optim/sensitivity.html +++ b/sparseml/_modules/sparseml/optim/sensitivity.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/datasets/classification/cifar.html b/sparseml/_modules/sparseml/pytorch/datasets/classification/cifar.html index ea60e60e725..5c529048104 100644 --- a/sparseml/_modules/sparseml/pytorch/datasets/classification/cifar.html +++ b/sparseml/_modules/sparseml/pytorch/datasets/classification/cifar.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/datasets/classification/imagefolder.html b/sparseml/_modules/sparseml/pytorch/datasets/classification/imagefolder.html index 7704190c1cc..fa83de71554 100644 --- a/sparseml/_modules/sparseml/pytorch/datasets/classification/imagefolder.html +++ b/sparseml/_modules/sparseml/pytorch/datasets/classification/imagefolder.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/datasets/classification/imagenet.html b/sparseml/_modules/sparseml/pytorch/datasets/classification/imagenet.html index 852b95fd48e..49f1be61b65 100644 --- a/sparseml/_modules/sparseml/pytorch/datasets/classification/imagenet.html +++ b/sparseml/_modules/sparseml/pytorch/datasets/classification/imagenet.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/datasets/classification/imagenette.html b/sparseml/_modules/sparseml/pytorch/datasets/classification/imagenette.html index 0c1fd7ed666..4deb918ec3b 100644 --- a/sparseml/_modules/sparseml/pytorch/datasets/classification/imagenette.html +++ b/sparseml/_modules/sparseml/pytorch/datasets/classification/imagenette.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/datasets/classification/mnist.html b/sparseml/_modules/sparseml/pytorch/datasets/classification/mnist.html index ee8c32c89ba..dc48d3b372a 100644 --- a/sparseml/_modules/sparseml/pytorch/datasets/classification/mnist.html +++ b/sparseml/_modules/sparseml/pytorch/datasets/classification/mnist.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/datasets/detection/coco.html b/sparseml/_modules/sparseml/pytorch/datasets/detection/coco.html index 5a9bb1efc06..25f201479a4 100644 --- a/sparseml/_modules/sparseml/pytorch/datasets/detection/coco.html +++ b/sparseml/_modules/sparseml/pytorch/datasets/detection/coco.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/datasets/detection/helpers.html b/sparseml/_modules/sparseml/pytorch/datasets/detection/helpers.html index fde48658893..76f826a5a90 100644 --- a/sparseml/_modules/sparseml/pytorch/datasets/detection/helpers.html +++ b/sparseml/_modules/sparseml/pytorch/datasets/detection/helpers.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/datasets/detection/voc.html b/sparseml/_modules/sparseml/pytorch/datasets/detection/voc.html index 498a858510e..2f53836eb67 100644 --- a/sparseml/_modules/sparseml/pytorch/datasets/detection/voc.html +++ b/sparseml/_modules/sparseml/pytorch/datasets/detection/voc.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/datasets/generic.html b/sparseml/_modules/sparseml/pytorch/datasets/generic.html index 22680a27947..5ec36702dc5 100644 --- a/sparseml/_modules/sparseml/pytorch/datasets/generic.html +++ b/sparseml/_modules/sparseml/pytorch/datasets/generic.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/datasets/registry.html b/sparseml/_modules/sparseml/pytorch/datasets/registry.html index 489afb94c4f..ea6d613ee85 100644 --- a/sparseml/_modules/sparseml/pytorch/datasets/registry.html +++ b/sparseml/_modules/sparseml/pytorch/datasets/registry.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/classification/darknet.html b/sparseml/_modules/sparseml/pytorch/models/classification/darknet.html index e1ab78a8ae6..76d66affbef 100644 --- a/sparseml/_modules/sparseml/pytorch/models/classification/darknet.html +++ b/sparseml/_modules/sparseml/pytorch/models/classification/darknet.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/classification/efficientnet.html b/sparseml/_modules/sparseml/pytorch/models/classification/efficientnet.html index 91572557868..a79f6062a9f 100644 --- a/sparseml/_modules/sparseml/pytorch/models/classification/efficientnet.html +++ b/sparseml/_modules/sparseml/pytorch/models/classification/efficientnet.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/classification/inception_v3.html b/sparseml/_modules/sparseml/pytorch/models/classification/inception_v3.html index 20ee94e64ce..d8ed1ea6277 100644 --- a/sparseml/_modules/sparseml/pytorch/models/classification/inception_v3.html +++ b/sparseml/_modules/sparseml/pytorch/models/classification/inception_v3.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/classification/mnist.html b/sparseml/_modules/sparseml/pytorch/models/classification/mnist.html index b079dbc2df0..8e6bf7fb71e 100644 --- a/sparseml/_modules/sparseml/pytorch/models/classification/mnist.html +++ b/sparseml/_modules/sparseml/pytorch/models/classification/mnist.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/classification/mobilenet.html b/sparseml/_modules/sparseml/pytorch/models/classification/mobilenet.html index 2ac81ae5e29..7e021c4f757 100644 --- a/sparseml/_modules/sparseml/pytorch/models/classification/mobilenet.html +++ b/sparseml/_modules/sparseml/pytorch/models/classification/mobilenet.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/classification/mobilenet_v2.html b/sparseml/_modules/sparseml/pytorch/models/classification/mobilenet_v2.html index 84f2a70f89d..46373c71618 100644 --- a/sparseml/_modules/sparseml/pytorch/models/classification/mobilenet_v2.html +++ b/sparseml/_modules/sparseml/pytorch/models/classification/mobilenet_v2.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/classification/resnet.html b/sparseml/_modules/sparseml/pytorch/models/classification/resnet.html index ac62b4e6724..4ee6f25a162 100644 --- a/sparseml/_modules/sparseml/pytorch/models/classification/resnet.html +++ b/sparseml/_modules/sparseml/pytorch/models/classification/resnet.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/classification/vgg.html b/sparseml/_modules/sparseml/pytorch/models/classification/vgg.html index 48e63945e4a..d4dee421eff 100644 --- a/sparseml/_modules/sparseml/pytorch/models/classification/vgg.html +++ b/sparseml/_modules/sparseml/pytorch/models/classification/vgg.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/detection/ssd.html b/sparseml/_modules/sparseml/pytorch/models/detection/ssd.html index 3b5e907dec4..e78f0363373 100644 --- a/sparseml/_modules/sparseml/pytorch/models/detection/ssd.html +++ b/sparseml/_modules/sparseml/pytorch/models/detection/ssd.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/detection/ssd_lite.html b/sparseml/_modules/sparseml/pytorch/models/detection/ssd_lite.html index 008f05b7d29..9654b86b7ee 100644 --- a/sparseml/_modules/sparseml/pytorch/models/detection/ssd_lite.html +++ b/sparseml/_modules/sparseml/pytorch/models/detection/ssd_lite.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/detection/ssd_mobilenet.html b/sparseml/_modules/sparseml/pytorch/models/detection/ssd_mobilenet.html index 90fa9db8c8b..f6d947277b1 100644 --- a/sparseml/_modules/sparseml/pytorch/models/detection/ssd_mobilenet.html +++ b/sparseml/_modules/sparseml/pytorch/models/detection/ssd_mobilenet.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/detection/ssd_resnet.html b/sparseml/_modules/sparseml/pytorch/models/detection/ssd_resnet.html index 0a5dfbbddf9..cf0d7ed883a 100644 --- a/sparseml/_modules/sparseml/pytorch/models/detection/ssd_resnet.html +++ b/sparseml/_modules/sparseml/pytorch/models/detection/ssd_resnet.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/detection/yolo_v3.html b/sparseml/_modules/sparseml/pytorch/models/detection/yolo_v3.html index ba94229718e..8d3b4981a5b 100644 --- a/sparseml/_modules/sparseml/pytorch/models/detection/yolo_v3.html +++ b/sparseml/_modules/sparseml/pytorch/models/detection/yolo_v3.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/models/registry.html b/sparseml/_modules/sparseml/pytorch/models/registry.html index 5f4ffb3b5fa..ae7688b86bc 100644 --- a/sparseml/_modules/sparseml/pytorch/models/registry.html +++ b/sparseml/_modules/sparseml/pytorch/models/registry.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -484,7 +485,10 @@

Source code for sparseml.pytorch.models.registry

): """ :param pretrained_path: A path to the pretrained weights to load, - if provided will override the pretrained param + if provided will override the pretrained param. May also be + a SparseZoo stub path preceded by 'zoo:' with the optional + `?recipe_type=` argument. If given a recipe type, the base + model weights for that recipe will be loaded :param pretrained: True to load the default pretrained weights, a string to load a specific pretrained weight (ex: base, optim, optim-perf), diff --git a/sparseml/_modules/sparseml/pytorch/nn/activations.html b/sparseml/_modules/sparseml/pytorch/nn/activations.html index 80cfbc572d9..b1af7ca3b29 100644 --- a/sparseml/_modules/sparseml/pytorch/nn/activations.html +++ b/sparseml/_modules/sparseml/pytorch/nn/activations.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/nn/fatrelu.html b/sparseml/_modules/sparseml/pytorch/nn/fatrelu.html index f1b41894de3..50ce0eec033 100644 --- a/sparseml/_modules/sparseml/pytorch/nn/fatrelu.html +++ b/sparseml/_modules/sparseml/pytorch/nn/fatrelu.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/nn/se.html b/sparseml/_modules/sparseml/pytorch/nn/se.html index 9890fe027a8..fcc22a83c7d 100644 --- a/sparseml/_modules/sparseml/pytorch/nn/se.html +++ b/sparseml/_modules/sparseml/pytorch/nn/se.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/analyzer_as.html b/sparseml/_modules/sparseml/pytorch/optim/analyzer_as.html index 0ae7677eb16..98f569e354c 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/analyzer_as.html +++ b/sparseml/_modules/sparseml/pytorch/optim/analyzer_as.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/analyzer_module.html b/sparseml/_modules/sparseml/pytorch/optim/analyzer_module.html index 3820366d171..13e93675254 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/analyzer_module.html +++ b/sparseml/_modules/sparseml/pytorch/optim/analyzer_module.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/analyzer_pruning.html b/sparseml/_modules/sparseml/pytorch/optim/analyzer_pruning.html index e996fbe983e..babdfb2be13 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/analyzer_pruning.html +++ b/sparseml/_modules/sparseml/pytorch/optim/analyzer_pruning.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/manager.html b/sparseml/_modules/sparseml/pytorch/optim/manager.html index 3d5241a7c03..26e11f6f8d3 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/manager.html +++ b/sparseml/_modules/sparseml/pytorch/optim/manager.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -205,6 +206,7 @@

Source code for sparseml.pytorch.optim.manager

from sparseml.pytorch.optim.modifier import Modifier, ScheduledModifier from sparseml.pytorch.utils import PyTorchLogger from sparseml.utils import load_recipe_yaml_str +from sparsezoo.objects import OptimizationRecipe __all__ = ["ScheduledModifierManager", "load_manager"] @@ -232,15 +234,23 @@

Source code for sparseml.pytorch.optim.manager

"""

[docs] @staticmethod - def from_yaml(file_path: str, add_modifiers: List[Modifier] = None): + def from_yaml( + file_path: Union[str, OptimizationRecipe], + add_modifiers: List[Modifier] = None, + ): """ Convenience function used to create the manager of multiple modifiers from a - yaml file. - - :param file_path: the path to the yaml file to load the modifier from + recipe file. + + :param file_path: the path to the recipe file to load the modifier from, or + a SparseZoo model stub to load a recipe for a model stored in SparseZoo. + SparseZoo stubs should be preceded by 'zoo:', and can contain an optional + '?recipe_type=<type>' parameter. Can also be a SparseZoo OptimizationRecipe + object. i.e. '/path/to/local/recipe.yaml', 'zoo:model/stub/path', + 'zoo:model/stub/path?recipe_type=transfer' :param add_modifiers: additional modifiers that should be added to the - returned manager alongside the ones loaded from the yaml file - :return: ScheduledModifierManager() created from the yaml file + returned manager alongside the ones loaded from the recipe file + :return: ScheduledModifierManager() created from the recipe file """ yaml_str = load_recipe_yaml_str(file_path) modifiers = Modifier.load_list(yaml_str) diff --git a/sparseml/_modules/sparseml/pytorch/optim/mask_creator_pruning.html b/sparseml/_modules/sparseml/pytorch/optim/mask_creator_pruning.html index 60d35a9a1d2..0dd39ac1c04 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/mask_creator_pruning.html +++ b/sparseml/_modules/sparseml/pytorch/optim/mask_creator_pruning.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/mask_pruning.html b/sparseml/_modules/sparseml/pytorch/optim/mask_pruning.html index 122dc76cb0e..d8ba570392b 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/mask_pruning.html +++ b/sparseml/_modules/sparseml/pytorch/optim/mask_pruning.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/modifier.html b/sparseml/_modules/sparseml/pytorch/optim/modifier.html index bce42ba1cad..875cd5dc3a0 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/modifier.html +++ b/sparseml/_modules/sparseml/pytorch/optim/modifier.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/modifier_as.html b/sparseml/_modules/sparseml/pytorch/optim/modifier_as.html index 377f2ec25a4..8dea0c1ec10 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/modifier_as.html +++ b/sparseml/_modules/sparseml/pytorch/optim/modifier_as.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/modifier_epoch.html b/sparseml/_modules/sparseml/pytorch/optim/modifier_epoch.html index 6c33d1fcfb6..a8ca4a5a230 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/modifier_epoch.html +++ b/sparseml/_modules/sparseml/pytorch/optim/modifier_epoch.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/modifier_lr.html b/sparseml/_modules/sparseml/pytorch/optim/modifier_lr.html index 8359bb99629..84538abbc4e 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/modifier_lr.html +++ b/sparseml/_modules/sparseml/pytorch/optim/modifier_lr.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/modifier_params.html b/sparseml/_modules/sparseml/pytorch/optim/modifier_params.html index cc054c8f7bd..1636a6bff4d 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/modifier_params.html +++ b/sparseml/_modules/sparseml/pytorch/optim/modifier_params.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/modifier_pruning.html b/sparseml/_modules/sparseml/pytorch/optim/modifier_pruning.html index 2b8e293816d..088721ebefb 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/modifier_pruning.html +++ b/sparseml/_modules/sparseml/pytorch/optim/modifier_pruning.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -253,7 +254,7 @@

Source code for sparseml.pytorch.optim.modifier_pruning

Useful for transfer learning use cases. | Sample yaml: - | !ConstantKSModifier + | !ConstantPruningModifier | start_epoch: 0.0 | end_epoch: 10.0 | params: ['re:.*weight'] @@ -346,7 +347,7 @@

Source code for sparseml.pytorch.optim.modifier_pruning

if param_name not in module_masks: raise RuntimeError( f"Unexpected parameter name when loading state dict for " - f"ConstantKSModifier Manager has parameters " + f"ConstantPruningModifier Manager has parameters " f"{list(module_masks.keys())}, given {param_name}" ) mask_disabled = False @@ -480,7 +481,7 @@

Source code for sparseml.pytorch.optim.modifier_pruning

Applies based on magnitude pruning unless otherwise specified by mask_type. | Sample yaml: - | !GradualKSModifier + | !GMPruningModifier | init_sparsity: 0.05 | final_sparsity: 0.8 | start_epoch: 0.0 @@ -588,7 +589,7 @@

Source code for sparseml.pytorch.optim.modifier_pruning

if param_name not in module_masks: raise RuntimeError( f"Unexpected parameter name when loading state dict for " - f"GradualKSModifier Manager has parameters " + f"GMPruningModifier Manager has parameters " f"{list(module_masks.keys())}, given {param_name}" ) mask_disabled = False diff --git a/sparseml/_modules/sparseml/pytorch/optim/modifier_quantization.html b/sparseml/_modules/sparseml/pytorch/optim/modifier_quantization.html index 45dd60e300e..66d42ca0642 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/modifier_quantization.html +++ b/sparseml/_modules/sparseml/pytorch/optim/modifier_quantization.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -195,7 +196,7 @@

Source code for sparseml.pytorch.optim.modifier_quantization

""" -from typing import List, Union +from typing import Any, Dict, List, Union from torch.nn import Module from torch.optim.optimizer import Optimizer @@ -251,6 +252,8 @@

Source code for sparseml.pytorch.optim.modifier_quantization

None to not stop tracking batch norm stats during QAT. Default is None :param end_epoch: Disabled, setting to anything other than -1 will raise an exception. For compatibility with YAML serialization only. + :param model_fuse_fn_kwargs: dictionary of keyword argument values to be passed + to the model fusing function """ def __init__( @@ -261,6 +264,7 @@

Source code for sparseml.pytorch.optim.modifier_quantization

disable_quantization_observer_epoch: Union[float, None] = None, freeze_bn_stats_epoch: Union[float, None] = None, end_epoch: float = -1, + model_fuse_fn_kwargs: Dict[str, Any] = None, ): if torch_quantization is None or torch_intrinsic is None: raise RuntimeError( @@ -279,6 +283,7 @@

Source code for sparseml.pytorch.optim.modifier_quantization

self._start_epoch = start_epoch self._submodules = submodules self._model_fuse_fn_name = model_fuse_fn_name + self._model_fuse_fn_kwargs = model_fuse_fn_kwargs or {} self._disable_quantization_observer_epoch = disable_quantization_observer_epoch self._freeze_bn_stats_epoch = freeze_bn_stats_epoch @@ -430,9 +435,10 @@

Source code for sparseml.pytorch.optim.modifier_quantization

self._model_fuse_fn_name ) ) - module_fuse_fn() + module_fuse_fn(**self._model_fuse_fn_kwargs) elif self._model_fuse_fn_name is None: # default auto fn - fuse_module_conv_bn_relus(module, inplace=True) + self._model_fuse_fn_kwargs["inplace"] = True + fuse_module_conv_bn_relus(module, **self._model_fuse_fn_kwargs) # prepare each module / submodule for quantization qconfig = get_qat_qconfig() for quant_module in self._modules_to_quantize: diff --git a/sparseml/_modules/sparseml/pytorch/optim/modifier_regularizer.html b/sparseml/_modules/sparseml/pytorch/optim/modifier_regularizer.html index 23fb4b3d48f..fc696fb020f 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/modifier_regularizer.html +++ b/sparseml/_modules/sparseml/pytorch/optim/modifier_regularizer.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/optimizer.html b/sparseml/_modules/sparseml/pytorch/optim/optimizer.html index 0967f3b1f16..d71bb5983d1 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/optimizer.html +++ b/sparseml/_modules/sparseml/pytorch/optim/optimizer.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/quantization/helpers.html b/sparseml/_modules/sparseml/pytorch/optim/quantization/helpers.html index 0fe4dc1e6db..9d2f07805b7 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/quantization/helpers.html +++ b/sparseml/_modules/sparseml/pytorch/optim/quantization/helpers.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -193,14 +194,17 @@

Source code for sparseml.pytorch.optim.quantization.helpers

""" from copy import deepcopy +from typing import Union import torch from torch.nn import BatchNorm2d, Conv2d, Module, ReLU try: + import torch.nn.intrinsic as nni from torch import quantization as torch_quantization except Exception: + nni = None torch_quantization = None from sparseml.pytorch.nn import ReLU as ReLU_nm @@ -213,15 +217,40 @@

Source code for sparseml.pytorch.optim.quantization.helpers

] +_QUANTIZABLE_MODULE_TYPES = ( + { + # Conv based layers + torch.nn.Conv1d, + torch.nn.Conv2d, + torch.nn.Conv3d, + nni.ConvBn1d, + nni.ConvBn2d, + nni.ConvBn3d, + nni.ConvReLU1d, + nni.ConvReLU2d, + nni.ConvReLU3d, + nni.ConvBnReLU1d, + nni.ConvBnReLU2d, + nni.ConvBnReLU3d, + # Linear Layers + torch.nn.Linear, + nni.LinearReLU, + } + if nni # nni will always import if torch.quantization is available + else None +) + +
[docs]def add_quant_dequant(module): """ Wraps all Conv and Linear submodule with a qconfig with a QuantWrapper :param module: the module to modify """ - module_type = str(type(module)).split(".")[-1].lower() - is_quantizable_module = "conv" in module_type or "linear" in module_type - - if is_quantizable_module and hasattr(module, "qconfig") and module.qconfig: + if ( + type(module) in _QUANTIZABLE_MODULE_TYPES + and hasattr(module, "qconfig") + and module.qconfig + ): return torch_quantization.QuantWrapper(module) for name, child in module.named_children(): @@ -252,7 +281,11 @@

Source code for sparseml.pytorch.optim.quantization.helpers

)
-
[docs]def fuse_module_conv_bn_relus(module: Module, inplace: bool = True) -> Module: +
[docs]def fuse_module_conv_bn_relus( + module: Module, + inplace: bool = True, + override_bn_subclasses_forward: Union[bool, str] = True, +) -> Module: """ Performs fusion of Conv2d, BatchNorm2d, and ReLU layers found in the given module. To be fused, these layers must appear sequentially in @@ -264,6 +297,12 @@

Source code for sparseml.pytorch.optim.quantization.helpers

:param module: the module to fuse :param inplace: set True to perform fusions in-place. default is True + :param override_bn_subclasses_forward: if True, modules that are subclasses of + BatchNorm2d will be modified to be BatchNorm2d but with the forward + pass and state variables copied from the subclass. This is so these + BN modules can pass PyTorch type checking when fusing. Can set to + "override-only" and only parameters will be overwritten, not the + forward pass. Default is True :return: the fused module """ if torch_quantization is None: @@ -288,7 +327,22 @@

Source code for sparseml.pytorch.optim.quantization.helpers

and submodule_name == current_block_submodule_name ): if isinstance(layer, ReLU_nm): - _replace_nm_relu(module, name, layer) + _set_submodule(module, name, ReLU(inplace=layer.inplace)) + if isinstance(layer, BatchNorm2d) and not type(layer) is BatchNorm2d: + if not override_bn_subclasses_forward: + raise RuntimeError( + "Detected a Conv-BN block that uses a subclass of BatchNorm2d. " + "This will cause a type error when fusing with PyTorch, " + "set override_bn_subclasses_forward to True or 'override-only " + "to modify this BN subclass to be a BatchNorm2d object" + ) + # swap BN subclass with overwritten BN class that will pass torch + # type checking + overwritten_bn = _wrap_bn_sub_class( + layer, + override_forward=override_bn_subclasses_forward != "override-only", + ) + _set_submodule(module, name, overwritten_bn), current_block.append(name) else: if current_block: @@ -304,13 +358,21 @@

Source code for sparseml.pytorch.optim.quantization.helpers

return module
-def _replace_nm_relu(root_module, relu_path, nm_relu): +def _set_submodule(root_module, sub_module_path, sub_module): current_module = root_module - relu_path = relu_path.split(".") - for sub_module in relu_path[:-1]: - current_module = getattr(current_module, sub_module) - new_relu = ReLU(inplace=nm_relu.inplace) - setattr(current_module, relu_path[-1], new_relu) + sub_module_path = sub_module_path.split(".") + for child_module in sub_module_path[:-1]: + current_module = getattr(current_module, child_module) + setattr(current_module, sub_module_path[-1], sub_module) + + +def _wrap_bn_sub_class(bn_subclass, override_forward=True): + batch_norm = BatchNorm2d(bn_subclass.num_features) + batch_norm.__dict__ = bn_subclass.__dict__ + if override_forward: + batch_norm.forward = bn_subclass.forward + del bn_subclass + return batch_norm
diff --git a/sparseml/_modules/sparseml/pytorch/optim/quantization/quantize_qat_export.html b/sparseml/_modules/sparseml/pytorch/optim/quantization/quantize_qat_export.html index d453bfcd42d..e0348a9d1aa 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/quantization/quantize_qat_export.html +++ b/sparseml/_modules/sparseml/pytorch/optim/quantization/quantize_qat_export.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -745,15 +746,23 @@

Source code for sparseml.pytorch.optim.quantization.quantize_qat_export

remove_node_and_params_from_graph(model, remove_node) -
[docs]def quantize_torch_qat_export(model: ModelProto, inplace: bool = True) -> ModelProto: +
[docs]def quantize_torch_qat_export( + model: Union[ModelProto, str], + output_file_path: Union[str, None] = None, + inplace: bool = True, +) -> ModelProto: """ - :param model: The model to convert + :param model: The model to convert, or a file path to it + :param output_file_path: File path to save the converted model to :param inplace: If true, does conversion of model in place. Default is true :return: Converts a model exported from a torch QAT session from a QAT graph with fake quantize ops surrounding operations to a quantized graph with quantized operations. All quantized Convs and FC inputs and outputs be surrounded by fake quantize ops """ + if isinstance(model, str): + model = onnx.load(model) + if not inplace: model = deepcopy(model) @@ -765,6 +774,9 @@

Source code for sparseml.pytorch.optim.quantization.quantize_qat_export

quantize_resnet_identity_add_inputs(model) _remove_duplicate_quantize__ops(model) + if output_file_path: + onnx.save(model, output_file_path) + return model
diff --git a/sparseml/_modules/sparseml/pytorch/optim/sensitivity_as.html b/sparseml/_modules/sparseml/pytorch/optim/sensitivity_as.html index 778115f1370..45032c96317 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/sensitivity_as.html +++ b/sparseml/_modules/sparseml/pytorch/optim/sensitivity_as.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/sensitivity_lr.html b/sparseml/_modules/sparseml/pytorch/optim/sensitivity_lr.html index 0fb132f877e..4e8b5220173 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/sensitivity_lr.html +++ b/sparseml/_modules/sparseml/pytorch/optim/sensitivity_lr.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/optim/sensitivity_pruning.html b/sparseml/_modules/sparseml/pytorch/optim/sensitivity_pruning.html index d53a2208e3f..ef4da2029e7 100644 --- a/sparseml/_modules/sparseml/pytorch/optim/sensitivity_pruning.html +++ b/sparseml/_modules/sparseml/pytorch/optim/sensitivity_pruning.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/utils/benchmarker.html b/sparseml/_modules/sparseml/pytorch/utils/benchmarker.html index 1c7497cb0ae..9a13212ce8b 100644 --- a/sparseml/_modules/sparseml/pytorch/utils/benchmarker.html +++ b/sparseml/_modules/sparseml/pytorch/utils/benchmarker.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/utils/exporter.html b/sparseml/_modules/sparseml/pytorch/utils/exporter.html index bb757ba9fdc..f5421e57774 100644 --- a/sparseml/_modules/sparseml/pytorch/utils/exporter.html +++ b/sparseml/_modules/sparseml/pytorch/utils/exporter.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/utils/helpers.html b/sparseml/_modules/sparseml/pytorch/utils/helpers.html index bb3ea641ba9..576b2686d66 100644 --- a/sparseml/_modules/sparseml/pytorch/utils/helpers.html +++ b/sparseml/_modules/sparseml/pytorch/utils/helpers.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/utils/logger.html b/sparseml/_modules/sparseml/pytorch/utils/logger.html index 1f21b04d1d9..6685af7f4c0 100644 --- a/sparseml/_modules/sparseml/pytorch/utils/logger.html +++ b/sparseml/_modules/sparseml/pytorch/utils/logger.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/utils/loss.html b/sparseml/_modules/sparseml/pytorch/utils/loss.html index 29388f08303..d9289cdee99 100644 --- a/sparseml/_modules/sparseml/pytorch/utils/loss.html +++ b/sparseml/_modules/sparseml/pytorch/utils/loss.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/utils/model.html b/sparseml/_modules/sparseml/pytorch/utils/model.html index 7bddeffd30d..774b0c68747 100644 --- a/sparseml/_modules/sparseml/pytorch/utils/model.html +++ b/sparseml/_modules/sparseml/pytorch/utils/model.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -200,6 +201,7 @@

Source code for sparseml.pytorch.utils.model

from torch.optim.optimizer import Optimizer
 
 from sparseml.utils.helpers import create_parent_dirs
+from sparsezoo import Zoo
 
 
 try:
@@ -233,7 +235,10 @@ 

Source code for sparseml.pytorch.utils.model

"""
     Load the state dict into a model from a given file.
 
-    :param path: the path to the pth file to load the state dict from
+    :param path: the path to the pth file to load the state dict from.
+        May also be a SparseZoo stub path preceded by 'zoo:' with the optional
+        `?recipe_type=` argument. If given a recipe type, the base model weights
+        for that recipe will be loaded.
     :param model: the model to load the state dict into
     :param strict: True to enforce that all tensors match between the model
         and the file; False otherwise
@@ -243,6 +248,15 @@ 

Source code for sparseml.pytorch.utils.model

        look like they came from DataParallel type setup (start with module.).
         This removes "module." all keys
     """
+    if path.startswith("zoo:"):
+        if "recipe_type=" in path:
+            path = Zoo.download_recipe_base_framework_files(path, extensions=[".pth"])[
+                0
+            ]
+        else:
+            path = Zoo.load_model_from_stub(path).download_framework_files(
+                extensions=[".pth"]
+            )[0]
     model_dict = torch.load(path, map_location="cpu")
     current_dict = model.state_dict()
 
diff --git a/sparseml/_modules/sparseml/pytorch/utils/module.html b/sparseml/_modules/sparseml/pytorch/utils/module.html
index 4504a5a58ae..9bf1884a106 100644
--- a/sparseml/_modules/sparseml/pytorch/utils/module.html
+++ b/sparseml/_modules/sparseml/pytorch/utils/module.html
@@ -98,16 +98,17 @@
 
 

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/utils/ssd_helpers.html b/sparseml/_modules/sparseml/pytorch/utils/ssd_helpers.html index a720efcfbae..fa6ad025e3a 100644 --- a/sparseml/_modules/sparseml/pytorch/utils/ssd_helpers.html +++ b/sparseml/_modules/sparseml/pytorch/utils/ssd_helpers.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/pytorch/utils/yolo_helpers.html b/sparseml/_modules/sparseml/pytorch/utils/yolo_helpers.html index 4efa2bc6bd8..3122269f339 100644 --- a/sparseml/_modules/sparseml/pytorch/utils/yolo_helpers.html +++ b/sparseml/_modules/sparseml/pytorch/utils/yolo_helpers.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/cifar.html b/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/cifar.html index 6ff051793fd..eae5cab7ab4 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/cifar.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/cifar.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/imagefolder.html b/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/imagefolder.html index 4eca0d4ce0a..0b3d18546cd 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/imagefolder.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/imagefolder.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/imagenet.html b/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/imagenet.html index bd3e41d04d4..8881a275631 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/imagenet.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/imagenet.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/imagenette.html b/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/imagenette.html index 7d60fdbb8bb..2cbb0abcd9f 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/imagenette.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/datasets/classification/imagenette.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/datasets/dataset.html b/sparseml/_modules/sparseml/tensorflow_v1/datasets/dataset.html index 9fab2875c4c..cf0091312e0 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/datasets/dataset.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/datasets/dataset.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/datasets/helpers.html b/sparseml/_modules/sparseml/tensorflow_v1/datasets/helpers.html index aea83a16b25..469b26e2edf 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/datasets/helpers.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/datasets/helpers.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/datasets/registry.html b/sparseml/_modules/sparseml/tensorflow_v1/datasets/registry.html index c4c029845ca..4ff2bfd6723 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/datasets/registry.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/datasets/registry.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/models/classification/mnist.html b/sparseml/_modules/sparseml/tensorflow_v1/models/classification/mnist.html index 7db5b2936c2..abf37148efe 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/models/classification/mnist.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/models/classification/mnist.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/models/classification/mobilenet.html b/sparseml/_modules/sparseml/tensorflow_v1/models/classification/mobilenet.html index 70965fe44ee..9571d572122 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/models/classification/mobilenet.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/models/classification/mobilenet.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/models/classification/mobilenet_v2.html b/sparseml/_modules/sparseml/tensorflow_v1/models/classification/mobilenet_v2.html index 59671abd317..6c992ec77e3 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/models/classification/mobilenet_v2.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/models/classification/mobilenet_v2.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/models/classification/resnet.html b/sparseml/_modules/sparseml/tensorflow_v1/models/classification/resnet.html index a701161df7d..56d349af9fa 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/models/classification/resnet.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/models/classification/resnet.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/models/classification/vgg.html b/sparseml/_modules/sparseml/tensorflow_v1/models/classification/vgg.html index 425034550cb..295513bbece 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/models/classification/vgg.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/models/classification/vgg.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/models/estimator.html b/sparseml/_modules/sparseml/tensorflow_v1/models/estimator.html index 7305c4554ae..4599ba94416 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/models/estimator.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/models/estimator.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/models/registry.html b/sparseml/_modules/sparseml/tensorflow_v1/models/registry.html index 3e2a27cc877..606d949dd99 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/models/registry.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/models/registry.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/nn/layers.html b/sparseml/_modules/sparseml/tensorflow_v1/nn/layers.html index 4da12e5c26e..2674cb09900 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/nn/layers.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/nn/layers.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/optim/analyzer_module.html b/sparseml/_modules/sparseml/tensorflow_v1/optim/analyzer_module.html index 31582575f01..b109912a8ce 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/optim/analyzer_module.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/optim/analyzer_module.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/optim/manager.html b/sparseml/_modules/sparseml/tensorflow_v1/optim/manager.html index b8879c15e1c..f9900f2ab51 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/optim/manager.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/optim/manager.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -201,6 +202,7 @@

Source code for sparseml.tensorflow_v1.optim.manager

from sparseml.tensorflow_v1.optim.modifier import NM_RECAL, Modifier, ScheduledModifier from sparseml.tensorflow_v1.utils import tf_compat from sparseml.utils import load_recipe_yaml_str +from sparsezoo.objects import OptimizationRecipe __all__ = ["ScheduledModifierManager"] @@ -250,15 +252,23 @@

Source code for sparseml.tensorflow_v1.optim.manager

"""
[docs] @staticmethod - def from_yaml(file_path: str, add_modifiers: List[Modifier] = None): + def from_yaml( + file_path: Union[str, OptimizationRecipe], + add_modifiers: List[Modifier] = None, + ): """ - Convenience function used to create the manager of multiple modifiers - from a yaml file. - - :param file_path: the path to the yaml file to load the modifier from + Convenience function used to create the manager of multiple modifiers from a + recipe file. + + :param file_path: the path to the recipe file to load the modifier from, or + a SparseZoo model stub to load a recipe for a model stored in SparseZoo. + SparseZoo stubs should be preceded by 'zoo:', and can contain an optional + '?recipe_type=<type>' parameter. Can also be a SparseZoo OptimizationRecipe + object. i.e. '/path/to/local/recipe.yaml', 'zoo:model/stub/path', + 'zoo:model/stub/path?recipe_type=transfer' :param add_modifiers: additional modifiers that should be added to the - returned manager alongside the ones loaded from the yaml file - :return: ScheduledModifierManager() created from the yaml file + returned manager alongside the ones loaded from the recipe file + :return: ScheduledModifierManager() created from the recipe file """ yaml_str = load_recipe_yaml_str(file_path) modifiers = Modifier.load_list(yaml_str) diff --git a/sparseml/_modules/sparseml/tensorflow_v1/optim/mask_creator_pruning.html b/sparseml/_modules/sparseml/tensorflow_v1/optim/mask_creator_pruning.html index e4708162982..824d7753763 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/optim/mask_creator_pruning.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/optim/mask_creator_pruning.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/optim/mask_pruning.html b/sparseml/_modules/sparseml/tensorflow_v1/optim/mask_pruning.html index 68632c9bc54..e03b0a825bc 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/optim/mask_pruning.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/optim/mask_pruning.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier.html b/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier.html index d90f4954e3b..90e71a013c4 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_epoch.html b/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_epoch.html index 0aa6afc35f1..65434fa1892 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_epoch.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_epoch.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_lr.html b/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_lr.html index 44ac066483f..b2104396fc6 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_lr.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_lr.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_params.html b/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_params.html index 951067476d5..3cc3d80ac67 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_params.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_params.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_pruning.html b/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_pruning.html index 60df3b6f2ba..bed16ce6517 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_pruning.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/optim/modifier_pruning.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -232,7 +233,7 @@

Source code for sparseml.tensorflow_v1.optim.modifier_pruning

Useful for transfer learning use cases. | Sample yaml: - | !ConstantKSModifier + | !ConstantPruningModifier | params: __ALL__ | start_epoch: 0.0 | end_epoch: 10.0 @@ -413,7 +414,7 @@

Source code for sparseml.tensorflow_v1.optim.modifier_pruning

Applies based on magnitude pruning without any structure to the pruning. | Sample yaml: - | !GradualKSModifier + | !GMPruningModifier | params: __ALL__ | init_sparsity: 0.05 | final_sparsity: 0.8 diff --git a/sparseml/_modules/sparseml/tensorflow_v1/optim/schedule_lr.html b/sparseml/_modules/sparseml/tensorflow_v1/optim/schedule_lr.html index 8df66f4e752..1c8c3027888 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/optim/schedule_lr.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/optim/schedule_lr.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/optim/sensitivity_pruning.html b/sparseml/_modules/sparseml/tensorflow_v1/optim/sensitivity_pruning.html index 1fc09fa11cb..c7a37144591 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/optim/sensitivity_pruning.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/optim/sensitivity_pruning.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/utils/exporter.html b/sparseml/_modules/sparseml/tensorflow_v1/utils/exporter.html index db4230fe0bb..63853b5a570 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/utils/exporter.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/utils/exporter.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/utils/loss.html b/sparseml/_modules/sparseml/tensorflow_v1/utils/loss.html index 5359c277c2e..1fbab0c9964 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/utils/loss.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/utils/loss.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/utils/nets_utils.html b/sparseml/_modules/sparseml/tensorflow_v1/utils/nets_utils.html index e21a6b4771b..4ed80c11b7a 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/utils/nets_utils.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/utils/nets_utils.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/utils/summary.html b/sparseml/_modules/sparseml/tensorflow_v1/utils/summary.html index 980d1e9e5a9..4b5258fb5b5 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/utils/summary.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/utils/summary.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/tensorflow_v1/utils/variable.html b/sparseml/_modules/sparseml/tensorflow_v1/utils/variable.html index 17431faeb29..690e17a5c07 100644 --- a/sparseml/_modules/sparseml/tensorflow_v1/utils/variable.html +++ b/sparseml/_modules/sparseml/tensorflow_v1/utils/variable.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/utils/datasets/helpers.html b/sparseml/_modules/sparseml/utils/datasets/helpers.html index 242d08a19b6..1afa76014e9 100644 --- a/sparseml/_modules/sparseml/utils/datasets/helpers.html +++ b/sparseml/_modules/sparseml/utils/datasets/helpers.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/utils/datasets/imagenette.html b/sparseml/_modules/sparseml/utils/datasets/imagenette.html index 9a9b92b8fbc..ac59f0862a5 100644 --- a/sparseml/_modules/sparseml/utils/datasets/imagenette.html +++ b/sparseml/_modules/sparseml/utils/datasets/imagenette.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/utils/helpers.html b/sparseml/_modules/sparseml/utils/helpers.html index ea36ce39182..0f533fcd6b6 100644 --- a/sparseml/_modules/sparseml/utils/helpers.html +++ b/sparseml/_modules/sparseml/utils/helpers.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -205,6 +206,8 @@

Source code for sparseml.utils.helpers

 
 import numpy
 
+from sparsezoo import Zoo
+from sparsezoo.objects import OptimizationRecipe
 from sparsezoo.utils import load_numpy_list
 
 
@@ -939,16 +942,30 @@ 

Source code for sparseml.utils.helpers

     )
 
 
-
[docs]def load_recipe_yaml_str(file_path: str) -> str: +
[docs]def load_recipe_yaml_str(file_path: Union[str, OptimizationRecipe]) -> str: """ Loads a YAML recipe file to a string or extracts recipe from YAML front matter in a sparsezoo markdown recipe card. + Recipes can also be provided as SparseZoo model stubs or OptimizationRecipe + objects. YAML front matter: https://jekyllrb.com/docs/front-matter/ - :param file_path: file path to recipe YAML file or markdown recipe card + :param file_path: file path to recipe YAML file or markdown recipe card or + stub to a SparseZoo model whose recipe will be downloaded and loaded. + SparseZoo stubs should be preceded by 'zoo:', and can contain an optional + '?recipe_type=<type>' parameter. Can also be a SparseZoo OptimizationRecipe + object. i.e. '/path/to/local/recipe.yaml', 'zoo:model/stub/path', + 'zoo:model/stub/path?recipe_type=transfer' :return: the recipe YAML configuration loaded as a string """ + if isinstance(file_path, OptimizationRecipe): + # download and unwrap OptimizationRecipe object + file_path = file_path.downloaded_path() + elif file_path.startswith("zoo:"): + # download from zoo stub + file_path = Zoo.download_recipe_from_stub(file_path) + extension = file_path.lower().split(".")[-1] if extension not in ["md", "yaml"]: raise ValueError( diff --git a/sparseml/_modules/sparseml/utils/singleton.html b/sparseml/_modules/sparseml/utils/singleton.html index 83658e1bd52..eb6e2837717 100644 --- a/sparseml/_modules/sparseml/utils/singleton.html +++ b/sparseml/_modules/sparseml/utils/singleton.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/utils/worker.html b/sparseml/_modules/sparseml/utils/worker.html index ce85cada75c..2ba3a469293 100644 --- a/sparseml/_modules/sparseml/utils/worker.html +++ b/sparseml/_modules/sparseml/utils/worker.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/sparseml/utils/wrapper.html b/sparseml/_modules/sparseml/utils/wrapper.html index da529aadd11..6c623873a0e 100644 --- a/sparseml/_modules/sparseml/utils/wrapper.html +++ b/sparseml/_modules/sparseml/utils/wrapper.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/_modules/tensorflow/python/ops/math_ops.html b/sparseml/_modules/tensorflow/python/ops/math_ops.html index 2574647da6e..119591176b7 100644 --- a/sparseml/_modules/tensorflow/python/ops/math_ops.html +++ b/sparseml/_modules/tensorflow/python/ops/math_ops.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

@@ -258,10 +259,8 @@

Source code for tensorflow.python.ops.math_ops

from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape -from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops -from tensorflow.python.ops import gen_bitwise_ops from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import gen_nn_ops @@ -275,131 +274,16 @@

Source code for tensorflow.python.ops.math_ops

from tensorflow.python.util import deprecation from tensorflow.python.util import dispatch from tensorflow.python.util import nest -from tensorflow.python.util.compat import collections_abc from tensorflow.python.util.tf_export import tf_export # Aliases for some automatically-generated names. +linspace = gen_math_ops.lin_space nextafter = gen_math_ops.next_after - -@tf_export("linspace", v1=["lin_space", "linspace"]) -@dispatch.add_dispatch_support -@deprecation.deprecated_endpoints("lin_space") -def linspace_nd(start, stop, num, name=None, axis=0): - r"""Generates evenly-spaced values in an interval along a given axis. - - A sequence of `num` evenly-spaced values are generated beginning at `start` - along a given `axis`. - If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, - so that the last one is exactly `stop`. If `num <= 0`, `ValueError` is raised. - - Matches - [np.linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)'s - behaviour - except when `num == 0`. - - For example: - - ``` - tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] - ``` - - `Start` and `stop` can be tensors of arbitrary size: - - >>> tf.linspace([0., 5.], [10., 40.], 5, axis=0) - <tf.Tensor: shape=(5, 2), dtype=float32, numpy= - array([[ 0. , 5. ], - [ 2.5 , 13.75], - [ 5. , 22.5 ], - [ 7.5 , 31.25], - [10. , 40. ]], dtype=float32)> - - `Axis` is where the values will be generated (the dimension in the - returned tensor which corresponds to the axis will be equal to `num`) - - >>> tf.linspace([0., 5.], [10., 40.], 5, axis=-1) - <tf.Tensor: shape=(2, 5), dtype=float32, numpy= - array([[ 0. , 2.5 , 5. , 7.5 , 10. ], - [ 5. , 13.75, 22.5 , 31.25, 40. ]], dtype=float32)> - - - - Args: - start: A `Tensor`. Must be one of the following types: `bfloat16`, - `float32`, `float64`. N-D tensor. First entry in the range. - stop: A `Tensor`. Must have the same type and shape as `start`. N-D tensor. - Last entry in the range. - num: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D - tensor. Number of values to generate. - name: A name for the operation (optional). - axis: Axis along which the operation is performed (used only when N-D - tensors are provided). - - Returns: - A `Tensor`. Has the same type as `start`. - """ - - with ops.name_scope(name, "linspace", [start, stop]): - start = ops.convert_to_tensor(start, name="start") - # stop must be convertible to the same dtype as start - stop = ops.convert_to_tensor(stop, name="stop", dtype=start.dtype) - num_int = array_ops.convert_to_int_tensor(num, name="num") - num = cast(num_int, dtype=start.dtype) - - broadcast_shape = array_ops.broadcast_dynamic_shape( - array_ops.shape(start), array_ops.shape(stop)) - start = array_ops.broadcast_to(start, broadcast_shape) - stop = array_ops.broadcast_to(stop, broadcast_shape) - - expanded_start = array_ops.expand_dims(start, axis=axis) - expanded_stop = array_ops.expand_dims(stop, axis=axis) - - shape = array_ops.shape(expanded_start) - ndims = array_ops.shape(shape)[0] - - axis = array_ops.where_v2(axis >= 0, axis, ndims + axis) - - # The purpose is to avoid having negative values when repeating. - num_fill = gen_math_ops.maximum(num_int - 2, 0) - # To avoid having negative values in the range or zero division - # the result is sliced in the end so a correct result is returned for - # num == 1, and num == 0. - n_steps = gen_math_ops.maximum(num_int - 1, 1) - delta = (expanded_stop - expanded_start) / cast(n_steps, - expanded_stop.dtype) - # Re-cast tensors as delta. - expanded_start = cast(expanded_start, delta.dtype) - expanded_stop = cast(expanded_stop, delta.dtype) - # If num < 0, we will throw exception in the range - # otherwise use the same div for delta - range_end = array_ops.where_v2(num_int >= 0, n_steps, -1) - # Even though range supports an output dtype, its limited - # (e.g. doesn't support half at the moment). - desired_range = cast(range(1, range_end, dtype=dtypes.int64), delta.dtype) - mask = gen_math_ops.equal(axis, range(ndims)) - # desired_range_shape is [1. 1. 1. ... 1. num_fill 1. 1. ... 1.], where the - # index of num_fill is equal to axis. - desired_range_shape = array_ops.where_v2(mask, num_fill, 1) - desired_range = array_ops.reshape(desired_range, desired_range_shape) - - res = expanded_start + delta * desired_range - - # Add the start and endpoints to the result, and slice out the desired - # portion. - all_tensors = (expanded_start, res, expanded_stop) - concatenated = array_ops.concat(all_tensors, axis=axis) - begin = array_ops.zeros_like(shape) - size = array_ops.where_v2(mask, num_int, shape) - - return array_ops.slice(concatenated, begin, size) - - -linspace = linspace_nd - arg_max = deprecation.deprecated(None, "Use `tf.math.argmax` instead")(arg_max) # pylint: disable=used-before-assignment arg_min = deprecation.deprecated(None, "Use `tf.math.argmin` instead")(arg_min) # pylint: disable=used-before-assignment -tf_export(v1=["arg_max"])(dispatch.add_dispatch_support(arg_max)) -tf_export(v1=["arg_min"])(dispatch.add_dispatch_support(arg_min)) +tf_export(v1=["arg_max"])(arg_max) +tf_export(v1=["arg_min"])(arg_min) # This is set by resource_variable_ops.py. It is included in this way since @@ -418,7 +302,6 @@

Source code for tensorflow.python.ops.math_ops

# pylint: disable=redefined-builtin @tf_export(v1=["math.argmax", "argmax"]) -@dispatch.add_dispatch_support @deprecation.deprecated_args(None, "Use the `axis` argument instead", "dimension") @_set_doc( @@ -435,11 +318,10 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.argmax", "argmax", v1=[]) -@dispatch.add_dispatch_support def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None): """Returns the index with the largest value across axes of a tensor. - In case of identity returns the smallest index. + Note that in case of ties the identity of the return value is not guaranteed. For example: @@ -452,9 +334,6 @@

Source code for tensorflow.python.ops.math_ops

<tf.Tensor: shape=(5,), dtype=int64, numpy=array([2, 2, 0, 2, 2])> >>> tf.math.argmax(B, 1) <tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 2, 1])> - >>> C = tf.constant([0, 0, 0, 0]) - >>> tf.math.argmax(C) # Returns smallest index in case of ties - <tf.Tensor: shape=(), dtype=int64, numpy=0> Args: input: A `Tensor`. @@ -472,7 +351,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export(v1=["math.argmin", "argmin"]) -@dispatch.add_dispatch_support @deprecation.deprecated_args(None, "Use the `axis` argument instead", "dimension") @_set_doc( @@ -489,11 +367,10 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.argmin", "argmin", v1=[]) -@dispatch.add_dispatch_support def argmin_v2(input, axis=None, output_type=dtypes.int64, name=None): """Returns the index with the smallest value across axes of a tensor. - Returns the smallest index in case of ties. + Note that in case of ties the identity of the return value is not guaranteed. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, @@ -542,18 +419,9 @@

Source code for tensorflow.python.ops.math_ops

Given a tensor `x` of complex numbers, this operation returns a tensor of type `float32` or `float64` that is the absolute value of each element in `x`. For - a complex number \\(a + bj\\), its absolute value is computed as - \\(\sqrt{a^2 + b^2}\\). + a complex number \\(a + bj\\), its absolute value is computed as \\(\sqrt{a^2 + + b^2}\\). For example: - For example: - - >>> # real number - >>> x = tf.constant([-2.25, 3.25]) - >>> tf.abs(x) - <tf.Tensor: shape=(2,), dtype=float32, - numpy=array([2.25, 3.25], dtype=float32)> - - >>> # complex number >>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]]) >>> tf.abs(x) <tf.Tensor: shape=(2, 1), dtype=float64, numpy= @@ -638,10 +506,6 @@

Source code for tensorflow.python.ops.math_ops

# override names. Use a dummy class to track the runtime division behavior return DivideDelegateWithName(x, name) / y else: - # We do conversion here to make sure at least x is a tensor. - if not tensor_util.is_tensor(x): - dtype = y.dtype.base_dtype if tensor_util.is_tensor(y) else None - x = ops.convert_to_tensor(x, dtype=dtype) return x / y @@ -709,31 +573,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.subtract", "subtract") @dispatch.add_dispatch_support def subtract(x, y, name=None): - """Returns x - y element-wise. - - *Note*: Subtract supports broadcasting. More about broadcasting - [here](https://numpy.org/doc/stable/user/basics.broadcasting.html) - - Both input and output have a range `(-inf, inf)`. - - For example: - - >>> x = tf.constant([1.0, -1.0, 5.0, -2.0, 0.0]) - >>> y = tf.constant([5.0, 1.0, 3.7, -19.9, float("inf")]) - >>> tf.subtract(x,y) - <tf.Tensor: shape=(5,), dtype=float32, - numpy= array([-4. , -2. , 1.3, 17.9, -inf], dtype=float32)> - - Args: - x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, - `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, - `complex64`, `complex128`, `string`. - y: A `Tensor`. Must have the same type as x. - name: A name for the operation (optional). - - Returns: - A `Tensor`. Has the same type as x. - """ return gen_math_ops.sub(x, y, name) @@ -778,7 +617,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export(v1=["math.scalar_mul", "scalar_mul"]) -@dispatch.add_dispatch_support def scalar_mul(scalar, x, name=None): """Multiplies a scalar times a `Tensor` or `IndexedSlices` object. @@ -811,7 +649,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.scalar_mul", "scalar_mul", v1=[]) -@dispatch.add_dispatch_support @_set_doc(scalar_mul.__doc__) def scalar_mul_v2(scalar, x, name=None): with ops.name_scope(name, "scalar_mul", [x]) as name: @@ -895,27 +732,20 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.sign", "sign") @dispatch.add_dispatch_support def sign(x, name=None): - r"""Returns an element-wise indication of the sign of a number. + """Returns an element-wise indication of the sign of a number. - `y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0`. + y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0. - For complex numbers, `y = sign(x) = x / |x| if x != 0, otherwise y = 0`. + For complex numbers, y = sign(x) = x / |x| if x != 0, otherwise y = 0. Example usage: - >>> # real number >>> tf.math.sign([0., 2., -3.]) - <tf.Tensor: shape=(3,), dtype=float32, - numpy=array([ 0., 1., -1.], dtype=float32)> - - >>> # complex number - >>> tf.math.sign([1 + 1j, 0 + 0j]) - <tf.Tensor: shape=(2,), dtype=complex128, - numpy=array([0.70710678+0.70710678j, 0. +0.j ])> + <tf.Tensor: ... numpy=array([ 0., 1., -1.], dtype=float32)> Args: x: A Tensor. Must be one of the following types: bfloat16, half, float32, - float64, int32, int64, complex64, complex128. + float64, int32, int64, complex64, complex128. name: A name for the operation (optional). Returns: @@ -925,7 +755,7 @@

Source code for tensorflow.python.ops.math_ops

tf.math.sign(x.values, ...), x.dense_shape). """ x = ops.convert_to_tensor(x) - if x.dtype.is_complex: + if x.dtype in (dtypes.complex64, dtypes.complex128): return gen_math_ops.div_no_nan( x, cast( @@ -939,7 +769,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.real", v1=["math.real", "real"]) -@dispatch.add_dispatch_support @deprecation.deprecated_endpoints("real") @dispatch.add_dispatch_support def real(input, name=None): @@ -974,7 +803,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.imag", v1=["math.imag", "imag"]) -@dispatch.add_dispatch_support @deprecation.deprecated_endpoints("imag") @dispatch.add_dispatch_support def imag(input, name=None): @@ -1008,7 +836,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.angle", v1=["math.angle", "angle"]) -@dispatch.add_dispatch_support @deprecation.deprecated_endpoints("angle") @dispatch.add_dispatch_support def angle(input, name=None): @@ -1045,8 +872,7 @@

Source code for tensorflow.python.ops.math_ops

if input.dtype.is_complex: return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name) else: - return array_ops.where(input < 0, np.pi * array_ops.ones_like(input), - array_ops.zeros_like(input)) + return array_ops.zeros_like(input) # pylint: enable=redefined-outer-name,redefined-builtin @@ -1103,8 +929,6 @@

Source code for tensorflow.python.ops.math_ops

returned value is set to `0`. The handling of complex types here matches the behavior of numpy. - Note casting nan and inf values to integral types has undefined behavior. - Args: x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, @@ -1180,7 +1004,6 @@

Source code for tensorflow.python.ops.math_ops

@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_float"]) -@dispatch.add_dispatch_support def to_float(x, name="ToFloat"): """Casts a tensor to type `float32`. @@ -1200,7 +1023,6 @@

Source code for tensorflow.python.ops.math_ops

@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_double"]) -@dispatch.add_dispatch_support def to_double(x, name="ToDouble"): """Casts a tensor to type `float64`. @@ -1220,7 +1042,6 @@

Source code for tensorflow.python.ops.math_ops

@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_int32"]) -@dispatch.add_dispatch_support def to_int32(x, name="ToInt32"): """Casts a tensor to type `int32`. @@ -1240,7 +1061,6 @@

Source code for tensorflow.python.ops.math_ops

@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_int64"]) -@dispatch.add_dispatch_support def to_int64(x, name="ToInt64"): """Casts a tensor to type `int64`. @@ -1260,7 +1080,6 @@

Source code for tensorflow.python.ops.math_ops

@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_bfloat16"]) -@dispatch.add_dispatch_support def to_bfloat16(x, name="ToBFloat16"): """Casts a tensor to type `bfloat16`. @@ -1280,7 +1099,6 @@

Source code for tensorflow.python.ops.math_ops

@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_complex64"]) -@dispatch.add_dispatch_support def to_complex64(x, name="ToComplex64"): """Casts a tensor to type `complex64`. @@ -1300,7 +1118,6 @@

Source code for tensorflow.python.ops.math_ops

@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.") @tf_export(v1=["to_complex128"]) -@dispatch.add_dispatch_support def to_complex128(x, name="ToComplex128"): """Casts a tensor to type `complex128`. @@ -1320,6 +1137,10 @@

Source code for tensorflow.python.ops.math_ops

ops.Tensor._override_operator("__neg__", gen_math_ops.neg) ops.Tensor._override_operator("__abs__", abs) +# __invert__ corresponds to the ~ operator. Here we follow the numpy convention +# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean +# tensors and will throw a TypeError if used on nonboolean arrays +ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not) def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor): @@ -1336,26 +1157,21 @@

Source code for tensorflow.python.ops.math_ops

def binary_op_wrapper(x, y): with ops.name_scope(None, op_name, [x, y]) as name: - try: + if isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor): return func(x, y, name=name) - except (TypeError, ValueError) as e: - # Even if dispatching the op failed, the RHS may be a tensor aware - # object that can implement the operator with knowledge of itself - # and the tensor. - # If the RHS is not tensor aware we still want to raise the - # original error from the LHS, because it may be more - # informative. - if hasattr(type(y), "__r%s__" % op_name): - try: - r_op = getattr(y, "__r%s__" % op_name) - out = r_op(x) - if out is NotImplemented: - raise - return out - except (TypeError, ValueError): - raise e - else: - raise + elif not isinstance(y, sparse_tensor.SparseTensor): + try: + y = ops.convert_to_tensor_v2( + y, dtype_hint=x.dtype.base_dtype, name="y") + except TypeError: + # If the RHS is not a tensor, it might be a tensor aware object + # that can implement the operator with knowledge of itself + # and the tensor. + if hasattr(type(y), "__r%s__" % op_name): + return NotImplemented + else: + raise + return func(x, y, name=name) def binary_op_wrapper_sparse(sp_x, y): with ops.name_scope(None, op_name, [sp_x, y]) as name: @@ -1435,7 +1251,7 @@

Source code for tensorflow.python.ops.math_ops

def _truediv_python3(x, y, name=None): with ops.name_scope(name, "truediv", [x, y]) as name: x = ops.convert_to_tensor(x, name="x") - y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y") + y = ops.convert_to_tensor(y, name="y") x_dtype = x.dtype.base_dtype y_dtype = y.dtype.base_dtype if x_dtype != y_dtype: @@ -1516,7 +1332,6 @@

Source code for tensorflow.python.ops.math_ops

date=None, instructions="Deprecated in favor of operator or tf.math.divide.") @tf_export(v1=["div"]) -@dispatch.add_dispatch_support def div(x, y, name=None): """Divides x / y elementwise (using Python 2 division operator semantics). @@ -1540,7 +1355,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.divide_no_nan", v1=["math.divide_no_nan", "div_no_nan"]) -@dispatch.add_dispatch_support @deprecation.deprecated_endpoints("div_no_nan") @dispatch.add_dispatch_support def div_no_nan(x, y, name=None): @@ -1631,31 +1445,8 @@

Source code for tensorflow.python.ops.math_ops

floormod = gen_math_ops.floor_mod -@tf_export("__operators__.add", v1=[]) -@dispatch.add_dispatch_support def _add_dispatch(x, y, name=None): - """The operation invoked by the `Tensor.__add__` operator. - - Purpose in the API: - - This method is exposed in TensorFlow's API so that library developers - can register dispatching for `Tensor.__add__` to allow it to handle - custom composite tensors & other custom objects. - - The API symbol is not intended to be called by users directly and does - appear in TensorFlow's generated documentation. - - Args: - x: The left-hand side of the `+` operator. - y: The right-hand side of the `+` operator. - name: an optional name for the operation. - - Returns: - The result of the elementwise `+` operation. - """ - if not isinstance(y, ops.Tensor) and not isinstance( - y, sparse_tensor.SparseTensor): - y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y") + """Dispatches to add for strings and add_v2 for all other types.""" if x.dtype == dtypes.string: return gen_math_ops.add(x, y, name=name) else: @@ -1664,12 +1455,14 @@

Source code for tensorflow.python.ops.math_ops

def _mul_dispatch(x, y, name=None): """Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse".""" - if isinstance(y, sparse_tensor.SparseTensor): # Case: Dense * Sparse. + is_tensor_y = isinstance(y, ops.Tensor) + if is_tensor_y: + return gen_math_ops.mul(x, y, name=name) + else: + assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse. new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values, y.dense_shape, x, name) return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape) - else: - return multiply(x, y, name=name) # NOTE(aselle): When integer division is added for sparse_dense_cwise, @@ -1683,10 +1476,10 @@

Source code for tensorflow.python.ops.math_ops

sparse_tensor.SparseTensor) _OverrideBinaryOperatorHelper(_add_dispatch, "add") -_OverrideBinaryOperatorHelper(subtract, "sub") +_OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub") _OverrideBinaryOperatorHelper(_mul_dispatch, "mul") -_OverrideBinaryOperatorHelper(div, "div") -_OverrideBinaryOperatorHelper(truediv, "truediv") +_OverrideBinaryOperatorHelper(_div_python2, "div") +_OverrideBinaryOperatorHelper(_truediv_python3, "truediv") _OverrideBinaryOperatorHelper(floordiv, "floordiv") _OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod") _OverrideBinaryOperatorHelper(pow, "pow") @@ -1783,35 +1576,9 @@

Source code for tensorflow.python.ops.math_ops

return gen_math_ops.logical_and(x, y, name) -def and_(x, y, name=None): - if x.dtype == dtypes.bool: - return gen_math_ops.logical_and(x, y, name) - return gen_bitwise_ops.bitwise_and(x, y) - - -def or_(x, y, name=None): - if x.dtype == dtypes.bool: - return gen_math_ops.logical_or(x, y, name) - return gen_bitwise_ops.bitwise_or(x, y) - - -def xor_(x, y, name=None): - if x.dtype == dtypes.bool: - return logical_xor(x, y, name) - return gen_bitwise_ops.bitwise_xor(x, y) - - -def invert_(x, name=None): - if x.dtype == dtypes.bool: - return gen_math_ops.logical_not(x, name=name) - return gen_bitwise_ops.invert(x, name=name) - - -_OverrideBinaryOperatorHelper(and_, "and") -_OverrideBinaryOperatorHelper(or_, "or") -_OverrideBinaryOperatorHelper(xor_, "xor") -ops.Tensor._override_operator("__invert__", invert_) - +_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and") +_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or") +_OverrideBinaryOperatorHelper(logical_xor, "xor") ops.Tensor._override_operator("__lt__", gen_math_ops.less) ops.Tensor._override_operator("__le__", gen_math_ops.less_equal) @@ -1842,8 +1609,8 @@

Source code for tensorflow.python.ops.math_ops

<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])> Args: - x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`. - y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`. + x: A `tf.Tensor` or `tf.SparseTensor` or `tf.IndexedSlices`. + y: A `tf.Tensor` or `tf.SparseTensor` or `tf.IndexedSlices`. name: A name for the operation (optional). Returns: @@ -1878,8 +1645,8 @@

Source code for tensorflow.python.ops.math_ops

<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])> Args: - x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`. - y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`. + x: A `tf.Tensor` or `tf.SparseTensor` or `tf.IndexedSlices`. + y: A `tf.Tensor` or `tf.SparseTensor` or `tf.IndexedSlices`. name: A name for the operation (optional). Returns: @@ -1891,33 +1658,8 @@

Source code for tensorflow.python.ops.math_ops

return gen_math_ops.not_equal(x, y, name=name) -@tf_export("__operators__.eq", v1=[]) -@dispatch.add_dispatch_support def tensor_equals(self, other): - """The operation invoked by the `Tensor.__eq__` operator. - - Compares two tensors element-wise for equality if they are - broadcast-compatible; or returns False if they are not broadcast-compatible. - (Note that this behavior differs from `tf.math.equal`, which raises an - exception if the two tensors are not broadcast-compatible.) - - Purpose in the API: - - This method is exposed in TensorFlow's API so that library developers - can register dispatching for `Tensor.__eq__` to allow it to handle - custom composite tensors & other custom objects. - - The API symbol is not intended to be called by users directly and does - appear in TensorFlow's generated documentation. - - Args: - self: The left-hand side of the `==` operator. - other: The right-hand side of the `==` operator. - - Returns: - The result of the elementwise `==` operation, or `False` if the arguments - are not broadcast-compatible. - """ + """Compares two tensors element-wise for equality.""" if other is None: return False g = getattr(self, "graph", None) @@ -1929,33 +1671,8 @@

Source code for tensorflow.python.ops.math_ops

return self is other -@tf_export("__operators__.ne", v1=[]) -@dispatch.add_dispatch_support def tensor_not_equals(self, other): - """The operation invoked by the `Tensor.__ne__` operator. - - Compares two tensors element-wise for inequality if they are - broadcast-compatible; or returns True if they are not broadcast-compatible. - (Note that this behavior differs from `tf.math.not_equal`, which raises an - exception if the two tensors are not broadcast-compatible.) - - Purpose in the API: - - This method is exposed in TensorFlow's API so that library developers - can register dispatching for `Tensor.__ne__` to allow it to handle - custom composite tensors & other custom objects. - - The API symbol is not intended to be called by users directly and does - appear in TensorFlow's generated documentation. - - Args: - self: The left-hand side of the `!=` operator. - other: The right-hand side of the `!=` operator. - - Returns: - The result of the elementwise `!=` operation, or `True` if the arguments - are not broadcast-compatible. - """ + """Compares two tensors element-wise for equality.""" if other is None: return True if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions(): @@ -1970,7 +1687,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("range") -@dispatch.add_dispatch_support def range(start, limit=None, delta=1, dtype=None, name="range"): # pylint: disable=redefined-builtin """Creates a sequence of numbers. @@ -2064,23 +1780,28 @@

Source code for tensorflow.python.ops.math_ops

_range_tensor_conversion_function) # Reduction operations -def _ReductionDims(x, axis): # pylint: disable=invalid-name - """Returns range(0, rank(x)) if axis is None.""" +def _ReductionDims(x, axis, reduction_indices=None): # pylint: disable=invalid-name + """Returns range(0, rank(x)) if reduction_indices is None.""" + # TODO(aselle): Remove this after deprecation + if reduction_indices is not None: + if axis is not None: + raise ValueError("Can't specify both axis' and 'reduction_indices'.") + axis = reduction_indices if axis is not None: return axis else: - x_rank = None + # Fast path: avoid creating Rank and Range ops if ndims is known. if isinstance(x, ops.Tensor): - x_rank = x.shape.rank + rank = x.shape.rank + if rank is not None: + return constant_op.constant(np.arange(rank, dtype=np.int32)) elif (isinstance(x, sparse_tensor.SparseTensor) and x.dense_shape.shape.is_fully_defined()): - x_rank = x.dense_shape.shape.dims[0].value # sparse.dense_shape is 1-D. - # Fast path: avoid creating Rank and Range ops if ndims is known. - if x_rank: - return constant_op.constant(np.arange(x_rank, dtype=np.int32)) - else: - # Otherwise, we rely on Range and Rank to do the right thing at run-time. - return range(0, array_ops.rank(x)) + rank = x.dense_shape.shape.dims[0].value # sparse.dense_shape is 1-D. + return constant_op.constant(np.arange(rank, dtype=np.int32)) + + # Otherwise, we rely on Range and Rank to do the right thing at run-time. + return range(0, array_ops.rank(x)) def _has_fully_defined_shape(tensor): @@ -2097,7 +1818,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export(v1=["math.reduce_sum", "reduce_sum"]) -@dispatch.add_dispatch_support @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") @@ -2111,8 +1831,8 @@

Source code for tensorflow.python.ops.math_ops

Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. @@ -2161,49 +1881,28 @@

Source code for tensorflow.python.ops.math_ops

Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: - >>> # x has a shape of (2, 3) (two rows and three columns): - >>> x = tf.constant([[1, 1, 1], [1, 1, 1]]) - >>> x.numpy() - array([[1, 1, 1], - [1, 1, 1]], dtype=int32) - >>> # sum all the elements - >>> # 1 + 1 + 1 + 1 + 1+ 1 = 6 - >>> tf.reduce_sum(x).numpy() - 6 - >>> # reduce along the first dimension - >>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2] - >>> tf.reduce_sum(x, 0).numpy() - array([2, 2, 2], dtype=int32) - >>> # reduce along the second dimension - >>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3] - >>> tf.reduce_sum(x, 1).numpy() - array([3, 3], dtype=int32) - >>> # keep the original dimensions - >>> tf.reduce_sum(x, 1, keepdims=True).numpy() - array([[3], - [3]], dtype=int32) - >>> # reduce along both dimensions - >>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6 - >>> # or, equivalently, reduce along rows, then reduce the resultant array - >>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2] - >>> # 2 + 2 + 2 = 6 - >>> tf.reduce_sum(x, [0, 1]).numpy() - 6 - + ```python + x = tf.constant([[1, 1, 1], [1, 1, 1]]) + tf.reduce_sum(x) # 6 + tf.reduce_sum(x, 0) # [2, 2, 2] + tf.reduce_sum(x, 1) # [3, 3] + tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]] + tf.reduce_sum(x, [0, 1]) # 6 + ``` Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), - rank(input_tensor)]`. + rank(input_tensor))`. keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). @@ -2232,14 +1931,13 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.reduce_euclidean_norm") -@dispatch.add_dispatch_support def reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None): """Computes the Euclidean norm of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. @@ -2276,7 +1974,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export(v1=["math.count_nonzero", "count_nonzero"]) -@dispatch.add_dispatch_support @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") @@ -2354,7 +2051,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.count_nonzero", v1=[]) -@dispatch.add_dispatch_support def count_nonzero_v2( input, # pylint: disable=redefined-builtin axis=None, @@ -2422,7 +2118,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export(v1=["math.reduce_mean", "reduce_mean"]) -@dispatch.add_dispatch_support def reduce_mean_v1(input_tensor, axis=None, keepdims=None, @@ -2434,8 +2129,8 @@

Source code for tensorflow.python.ops.math_ops

Reduces `input_tensor` along the dimensions given in `axis` by computing the mean of elements across the dimensions in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. @@ -2496,8 +2191,8 @@

Source code for tensorflow.python.ops.math_ops

Reduces `input_tensor` along the dimensions given in `axis` by computing the mean of elements across the dimensions in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions are retained + with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. @@ -2549,30 +2244,28 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.reduce_variance") -@dispatch.add_dispatch_support def reduce_variance(input_tensor, axis=None, keepdims=False, name=None): """Computes the variance of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: - >>> x = tf.constant([[1., 2.], [3., 4.]]) - >>> tf.math.reduce_variance(x) - <tf.Tensor: shape=(), dtype=float32, numpy=1.25> - >>> tf.math.reduce_variance(x, 0) - <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], ...)> - >>> tf.math.reduce_variance(x, 1) - <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.25, 0.25], ...)> + ```python + x = tf.constant([[1., 2.], [3., 4.]]) + tf.reduce_variance(x) # 1.25 + tf.reduce_variance(x, 0) # [1., 1.] + tf.reduce_variance(x, 1) # [0.25, 0.25] + ``` Args: - input_tensor: The tensor to reduce. Should have real or complex type. + input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. @@ -2580,60 +2273,47 @@

Source code for tensorflow.python.ops.math_ops

name: A name scope for the associated operations (optional). Returns: - The reduced tensor, of the same dtype as the input_tensor. Note, for - `complex64` or `complex128` input, the returned `Tensor` will be of type - `float32` or `float64`, respectively. + The reduced tensor, of the same dtype as the input_tensor. @compatibility(numpy) Equivalent to np.var - Please note `np.var` has a `dtype` parameter that could be used to specify the - output type. By default this is `dtype=float64`. On the other hand, - `tf.math.reduce_variance` has aggressive type inference from `input_tensor`. + Please note that `np.var` has a `dtype` parameter that could be used to + specify the output type. By default this is `dtype=float64`. On the other + hand, `tf.reduce_variance` has an aggressive type inference from + `input_tensor`, @end_compatibility """ name = name if name else "reduce_variance" with ops.name_scope(name): means = reduce_mean(input_tensor, axis=axis, keepdims=True) - if means.dtype.is_integer: - raise TypeError("Input must be either real or complex") - diff = input_tensor - means - if diff.dtype.is_complex: - # For complex values we need to take the absolute value before squaring. - # This is achieved by multiplying with the conjugate. - real_dtype = diff.dtype.real_dtype - squared_deviations = gen_math_ops.real( - gen_math_ops.mul(gen_math_ops.conj(diff), diff), Tout=real_dtype) - else: - squared_deviations = gen_math_ops.square(diff) + squared_deviations = gen_math_ops.square(input_tensor - means) return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims) @tf_export("math.reduce_std") -@dispatch.add_dispatch_support def reduce_std(input_tensor, axis=None, keepdims=False, name=None): """Computes the standard deviation of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. For example: - >>> x = tf.constant([[1., 2.], [3., 4.]]) - >>> tf.math.reduce_std(x) - <tf.Tensor: shape=(), dtype=float32, numpy=1.118034> - >>> tf.math.reduce_std(x, 0) - <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], dtype=float32)> - >>> tf.math.reduce_std(x, 1) - <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.5, 0.5], dtype=float32)> + ```python + x = tf.constant([[1., 2.], [3., 4.]]) + tf.reduce_std(x) # 1.1180339887498949 + tf.reduce_std(x, 0) # [1., 1.] + tf.reduce_std(x, 1) # [0.5, 0.5] + ``` Args: - input_tensor: The tensor to reduce. Should have real or complex type. + input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(input_tensor), rank(input_tensor))`. @@ -2641,16 +2321,14 @@

Source code for tensorflow.python.ops.math_ops

name: A name scope for the associated operations (optional). Returns: - The reduced tensor, of the same dtype as the input_tensor. Note, for - `complex64` or `complex128` input, the returned `Tensor` will be of type - `float32` or `float64`, respectively. + The reduced tensor, of the same dtype as the input_tensor. @compatibility(numpy) Equivalent to np.std - Please note `np.std` has a `dtype` parameter that could be used to specify the - output type. By default this is `dtype=float64`. On the other hand, - `tf.math.reduce_std` has aggressive type inference from `input_tensor`. + Please note that `np.std` has a `dtype` parameter that could be used to + specify the output type. By default this is `dtype=float64`. On the other + hand, `tf.reduce_std` has an aggressive type inference from `input_tensor`, @end_compatibility """ name = name if name else "reduce_std" @@ -2696,7 +2374,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export(v1=["math.reduce_prod", "reduce_prod"]) -@dispatch.add_dispatch_support @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") @@ -2710,8 +2387,8 @@

Source code for tensorflow.python.ops.math_ops

Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. @@ -2742,7 +2419,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export(v1=["math.reduce_min", "reduce_min"]) -@dispatch.add_dispatch_support @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") @@ -2756,8 +2432,8 @@

Source code for tensorflow.python.ops.math_ops

Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. @@ -2794,8 +2470,8 @@

Source code for tensorflow.python.ops.math_ops

Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. @@ -2829,7 +2505,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export(v1=["math.reduce_max", "reduce_max"]) -@dispatch.add_dispatch_support @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") @@ -2843,8 +2518,8 @@

Source code for tensorflow.python.ops.math_ops

Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. @@ -2881,8 +2556,8 @@

Source code for tensorflow.python.ops.math_ops

Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. @@ -2897,10 +2572,10 @@

Source code for tensorflow.python.ops.math_ops

tf.Tensor(-1, shape=(), dtype=int32) >>> x = tf.constant([4, float('nan')]) >>> print(tf.reduce_max(x)) - tf.Tensor(nan, shape=(), dtype=float32) + tf.Tensor(4.0, shape=(), dtype=float32) >>> x = tf.constant([float('nan'), float('nan')]) >>> print(tf.reduce_max(x)) - tf.Tensor(nan, shape=(), dtype=float32) + tf.Tensor(-inf, shape=(), dtype=float32) >>> x = tf.constant([float('-inf'), float('inf')]) >>> print(tf.reduce_max(x)) tf.Tensor(inf, shape=(), dtype=float32) @@ -2934,7 +2609,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export(v1=["math.reduce_all", "reduce_all"]) -@dispatch.add_dispatch_support @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") @@ -2948,8 +2622,8 @@

Source code for tensorflow.python.ops.math_ops

Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. @@ -2988,15 +2662,15 @@

Source code for tensorflow.python.ops.math_ops

return reduce_all(input_tensor, axis, keepdims, name) -@tf_export("math.reduce_all", "reduce_all", v1=[]) +@tf_export("reduce_all", "math.reduce_all", v1=[]) @dispatch.add_dispatch_support def reduce_all(input_tensor, axis=None, keepdims=False, name=None): """Computes the "logical and" of elements across dimensions of a tensor. Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. @@ -3034,7 +2708,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export(v1=["math.reduce_any", "reduce_any"]) -@dispatch.add_dispatch_support @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") @@ -3048,8 +2721,8 @@

Source code for tensorflow.python.ops.math_ops

Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. @@ -3095,8 +2768,8 @@

Source code for tensorflow.python.ops.math_ops

Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` is None, all dimensions are reduced, and a tensor with a single element is returned. @@ -3134,7 +2807,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"]) -@dispatch.add_dispatch_support @deprecation.deprecated_args(None, "keep_dims is deprecated, use keepdims instead", "keep_dims") @@ -3148,8 +2820,8 @@

Source code for tensorflow.python.ops.math_ops

Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. @@ -3191,14 +2863,13 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[]) -@dispatch.add_dispatch_support def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None): """Computes log(sum(exp(elements across dimensions of a tensor))). Reduces `input_tensor` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each - of the entries in `axis`, which must be unique. If `keepdims` is true, the - reduced dimensions are retained with length 1. + entry in `axis`. If `keepdims` is true, the reduced dimensions + are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. @@ -3247,12 +2918,11 @@

Source code for tensorflow.python.ops.math_ops

dims=reduce_dim)) if not keepdims: my_max = array_ops.reshape(my_max, gen_array_ops.shape(result)) - result = _add_dispatch(result, my_max, name=name) + result = gen_math_ops.add(result, my_max) return _may_reduce_to_scalar(keepdims, axis, result) @tf_export("linalg.trace", v1=["linalg.trace", "trace"]) -@dispatch.add_dispatch_support @deprecation.deprecated_endpoints("trace") @dispatch.add_dispatch_support def trace(x, name=None): @@ -3262,7 +2932,7 @@

Source code for tensorflow.python.ops.math_ops

in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where - `output[i, j, k, ..., l] = trace(x[i, j, k, ..., l, :, :])` + `output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])` For example: @@ -3393,12 +3063,12 @@

Source code for tensorflow.python.ops.math_ops

**does not support `tf.sparse.SparseTensor`**, it just makes optimizations that assume most values in `a` are zero. See `tf.sparse.sparse_dense_matmul` - for some support for `tf.sparse.SparseTensor` multiplication. + for some support for `tf.SparseTensor` multiplication. b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this **does not support `tf.sparse.SparseTensor`**, it just makes optimizations that assume most values in `a` are zero. See `tf.sparse.sparse_dense_matmul` - for some support for `tf.sparse.SparseTensor` multiplication. + for some support for `tf.SparseTensor` multiplication. name: Name for the operation (optional). Returns: @@ -3426,10 +3096,10 @@

Source code for tensorflow.python.ops.math_ops

if not isinstance(a, (ops.EagerTensor, _resource_variable_type)): a = ops.convert_to_tensor(a, name="a") if not isinstance(b, (ops.EagerTensor, _resource_variable_type)): - b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b") + b = ops.convert_to_tensor(b, name="b") else: a = ops.convert_to_tensor(a, name="a") - b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b") + b = ops.convert_to_tensor(b, name="b") # TODO(apassos) remove _shape_tuple here when it is not needed. a_shape = a._shape_tuple() # pylint: disable=protected-access @@ -3492,7 +3162,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("linalg.matvec") -@dispatch.add_dispatch_support def matvec(a, b, transpose_a=False, @@ -3596,7 +3265,6 @@

Source code for tensorflow.python.ops.math_ops

sparse_matmul = deprecation.deprecated(None, "Use `tf.linalg.matmul` instead")( gen_math_ops.sparse_mat_mul) tf_export(v1=["sparse_matmul"])(sparse_matmul) -@dispatch.add_dispatch_support @ops.RegisterStatistics("MatMul", "flops") @@ -3729,12 +3397,12 @@

Source code for tensorflow.python.ops.math_ops

ValueError: If `inputs` don't all have same shape and dtype or the shape cannot be inferred. """ - if not inputs or not isinstance(inputs, collections_abc.Iterable): - raise ValueError("inputs must be an iterable of at least one " + if not inputs or not isinstance(inputs, (list, tuple)): + raise ValueError("inputs must be a list of at least one " "Tensor/IndexedSlices with the same dtype and shape") inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs) if not all(isinstance(x, (ops.Tensor, ops.IndexedSlices)) for x in inputs): - raise ValueError("inputs must be an iterable of at least one " + raise ValueError("inputs must be a list of at least one " "Tensor/IndexedSlices with the same dtype and shape") if len(inputs) == 1: @@ -3749,7 +3417,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"]) -@dispatch.add_dispatch_support @deprecation.deprecated_endpoints("accumulate_n") def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None): """Returns the element-wise sum of a list of tensors. @@ -3828,13 +3495,12 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid") -@dispatch.add_dispatch_support def sigmoid(x, name=None): r"""Computes sigmoid of `x` element-wise. - Formula for calculating $\mathrm{sigmoid}(x) = y = 1 / (1 + \exp(-x))$. + Formula for calculating sigmoid(x): `y = 1 / (1 + exp(-x))`. - For $x \in (-\infty, \infty)$, $\mathrm{sigmoid}(x) \in (0, 1)$. + For x \in (-inf, inf) => sigmoid(x) \in (0, 1) Example Usage: @@ -3862,9 +3528,9 @@

Source code for tensorflow.python.ops.math_ops

Returns: A Tensor with the same type as `x`. - + Usage Example: - + >>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32) >>> tf.sigmoid(x) <tf.Tensor: shape=(3,), dtype=float32, @@ -3894,37 +3560,121 @@

Source code for tensorflow.python.ops.math_ops

Returns: A Tensor with the same type as `x`. + """ + with ops.name_scope(name, "LogSigmoid", [x]) as name: + x = ops.convert_to_tensor(x, name="x") + return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name) - Usage Example: - If a positive number is large, then its log_sigmoid will approach to 0 since - the formula will be `y = log( <large_num> / (1 + <large_num>) )` which - approximates to `log (1)` which is 0. +@tf_export("math.bincount", v1=[]) +def bincount(arr, + weights=None, + minlength=None, + maxlength=None, + dtype=dtypes.int32, + name=None): + """Counts the number of occurrences of each value in an integer array. - >>> x = tf.constant([0.0, 1.0, 50.0, 100.0]) - >>> tf.math.log_sigmoid(x) - <tf.Tensor: shape=(4,), dtype=float32, numpy= - array([-6.9314718e-01, -3.1326169e-01, -1.9287499e-22, -0.0000000e+00], - dtype=float32)> + If `minlength` and `maxlength` are not given, returns a vector with length + `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise. + If `weights` are non-None, then index `i` of the output stores the sum of the + value in `weights` at each index where the corresponding value in `arr` is + `i`. - If a negative number is large, its log_sigmoid will approach to the number - itself since the formula will be `y = log( 1 / (1 + <large_num>) )` which is - `log (1) - log ( (1 + <large_num>) )` which approximates to `- <large_num>` - that is the number itself. + ```python + values = tf.constant([1,1,2,3,2,4,4,5]) + tf.math.bincount(values) #[0 2 2 1 2 1] + ``` + Vector length = Maximum element in vector `values` is 5. Adding 1, which is 6 + will be the vector length. + + Each bin value in the output indicates number of occurrences of the particular + index. Here, index 1 in output has a value 2. This indicates value 1 occurs + two times in `values`. + + ```python + values = tf.constant([1,1,2,3,2,4,4,5]) + weights = tf.constant([1,5,0,1,0,5,4,5]) + tf.math.bincount(values, weights=weights) #[0 6 0 1 9 5] + ``` + Bin will be incremented by the corresponding weight instead of 1. + Here, index 1 in output has a value 6. This is the summation of weights + corresponding to the value in `values`. + + Args: + arr: An int32 tensor of non-negative values. + weights: If non-None, must be the same shape as arr. For each value in + `arr`, the bin will be incremented by the corresponding weight instead of + 1. + minlength: If given, ensures the output has length at least `minlength`, + padding with zeros at the end if necessary. + maxlength: If given, skips values in `arr` that are equal or greater than + `maxlength`, ensuring that the output has length at most `maxlength`. + dtype: If `weights` is None, determines the type of the output bins. + name: A name scope for the associated operations (optional). + + Returns: + A vector with the same dtype as `weights` or the given `dtype`. The bin + values. + + Raises: + `InvalidArgumentError` if negative values are provided as an input. - >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0]) - >>> tf.math.log_sigmoid(x) - <tf.Tensor: shape=(4,), dtype=float32, numpy= - array([-100. , -50. , -1.3132616, -0.6931472], - dtype=float32)> """ - with ops.name_scope(name, "LogSigmoid", [x]) as name: - x = ops.convert_to_tensor(x, name="x") - return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name) + name = "bincount" if name is None else name + with ops.name_scope(name): + arr = ops.convert_to_tensor(arr, name="arr", dtype=dtypes.int32) + array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0 + output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1) + if minlength is not None: + minlength = ops.convert_to_tensor( + minlength, name="minlength", dtype=dtypes.int32) + output_size = gen_math_ops.maximum(minlength, output_size) + if maxlength is not None: + maxlength = ops.convert_to_tensor( + maxlength, name="maxlength", dtype=dtypes.int32) + output_size = gen_math_ops.minimum(maxlength, output_size) + if weights is not None: + weights = ops.convert_to_tensor(weights, name="weights") + return gen_math_ops.unsorted_segment_sum(weights, arr, output_size) + weights = constant_op.constant([], dtype) + return gen_math_ops.bincount(arr, output_size, weights) + + +@tf_export(v1=["math.bincount", "bincount"]) +@deprecation.deprecated_endpoints("bincount") +def bincount_v1(arr, + weights=None, + minlength=None, + maxlength=None, + dtype=dtypes.int32): + """Counts the number of occurrences of each value in an integer array. + + If `minlength` and `maxlength` are not given, returns a vector with length + `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise. + If `weights` are non-None, then index `i` of the output stores the sum of the + value in `weights` at each index where the corresponding value in `arr` is + `i`. + + Args: + arr: An int32 tensor of non-negative values. + weights: If non-None, must be the same shape as arr. For each value in + `arr`, the bin will be incremented by the corresponding weight instead of + 1. + minlength: If given, ensures the output has length at least `minlength`, + padding with zeros at the end if necessary. + maxlength: If given, skips values in `arr` that are equal or greater than + `maxlength`, ensuring that the output has length at most `maxlength`. + dtype: If `weights` is None, determines the type of the output bins. + + Returns: + A vector with the same dtype as `weights` or the given `dtype`. The bin + values. + """ + return bincount(arr, weights, minlength, maxlength, dtype) @tf_export("math.cumsum", "cumsum") -@dispatch.add_dispatch_support def cumsum(x, axis=0, exclusive=False, reverse=False, name=None): """Compute the cumulative sum of the tensor `x` along `axis`. @@ -3996,7 +3746,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.cumprod", v1=["math.cumprod", "cumprod"]) -@dispatch.add_dispatch_support @deprecation.deprecated_endpoints("cumprod") def cumprod(x, axis=0, exclusive=False, reverse=False, name=None): """Compute the cumulative product of the tensor `x` along `axis`. @@ -4050,7 +3799,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.cumulative_logsumexp", v1=["math.cumulative_logsumexp"]) -@dispatch.add_dispatch_support def cumulative_logsumexp(x, axis=0, exclusive=False, reverse=False, name=None): """Compute the cumulative log-sum-exp of the tensor `x` along `axis`. @@ -4111,29 +3859,20 @@

Source code for tensorflow.python.ops.math_ops

def conj(x, name=None): r"""Returns the complex conjugate of a complex number. - Given a tensor `x` of complex numbers, this operation returns a tensor of - complex numbers that are the complex conjugate of each element in `x`. The - complex numbers in `x` must be of the form \\(a + bj\\), where `a` is the - real part and `b` is the imaginary part. + Given a tensor `input` of complex numbers, this operation returns a tensor of + complex numbers that are the complex conjugate of each element in `input`. The + complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the + real part and *b* is the imaginary part. The complex conjugate returned by this operation is of the form \\(a - bj\\). For example: - >>> x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j]) - >>> tf.math.conj(x) - <tf.Tensor: shape=(2,), dtype=complex128, - numpy=array([-2.25-4.75j, 3.25-5.75j])> + # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + tf.math.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] If `x` is real, it is returned unchanged. - For example: - - >>> x = tf.constant([-2.25, 3.25]) - >>> tf.math.conj(x) - <tf.Tensor: shape=(2,), dtype=float32, - numpy=array([-2.25, 3.25], dtype=float32)> - Args: x: `Tensor` to conjugate. Must have numeric or variant type. name: A name for the operation (optional). @@ -4143,10 +3882,6 @@

Source code for tensorflow.python.ops.math_ops

Raises: TypeError: If `x` is not a numeric tensor. - - @compatibility(numpy) - Equivalent to numpy.conj. - @end_compatibility """ if isinstance(x, ops.Tensor): dt = x.dtype @@ -4173,18 +3908,11 @@

Source code for tensorflow.python.ops.math_ops

Returns: A 1-D Tensor, the output shape as if keepdims were set to True. """ - # TODO(allenl): Refactor `reduced_shape` to take the tensor corresponding to - # `input_shape` rather than `tf.shape` of it. Then we can check if the shape - # is fully defined here, which may be faster executing eagerly than running - # `tf.shape` and then fetching its constant value. - constant_input_shape = tensor_util.constant_value(input_shape) - if constant_input_shape is not None: - constant_axes = tensor_util.constant_value(axes) - if constant_axes is not None: - constant_axes = np.array(constant_axes, dtype=np.int32) - constant_input_shape = np.array(constant_input_shape, dtype=np.int32) - constant_input_shape[constant_axes] = 1 - return constant_input_shape + if context.executing_eagerly(): + input_shape = input_shape.numpy() + axes = axes.numpy() + input_shape[axes] = 1 + return input_shape # Example: # cast needed for SparseTensor reductions @@ -4230,7 +3958,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export( "math.unsorted_segment_mean", v1=["math.unsorted_segment_mean", "unsorted_segment_mean"]) -@dispatch.add_dispatch_support @deprecation.deprecated_endpoints("unsorted_segment_mean") @dispatch.add_dispatch_support def unsorted_segment_mean(data, segment_ids, num_segments, name=None): @@ -4240,7 +3967,8 @@

Source code for tensorflow.python.ops.math_ops

segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) for an explanation of segments. - This operator is similar to the `tf.math.unsorted_segment_sum` operator. + This operator is similar to the unsorted segment sum operator found + [here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). Instead of computing the sum over segments, it computes the mean of all entries belonging to a segment such that: @@ -4276,7 +4004,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export( "math.unsorted_segment_sqrt_n", v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"]) -@dispatch.add_dispatch_support @deprecation.deprecated_endpoints("unsorted_segment_sqrt_n") @dispatch.add_dispatch_support def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None): @@ -4286,7 +4013,8 @@

Source code for tensorflow.python.ops.math_ops

segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation) for an explanation of segments. - This operator is similar to the `tf.math.unsorted_segment_sum` operator. + This operator is similar to the unsorted segment sum operator found + [here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). Additionally to computing the sum over segments, it divides the results by sqrt(N). @@ -4625,7 +4353,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("tensordot", "linalg.tensordot") -@dispatch.add_dispatch_support def tensordot(a, b, axes, name=None): r"""Tensor contraction of a and b along specified axes and outer product. @@ -4738,7 +4465,7 @@

Source code for tensorflow.python.ops.math_ops

rank_a = array_ops.rank(a) axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes") axes = array_ops.where(axes >= 0, axes, axes + rank_a) - free, _ = gen_array_ops.list_diff(range(rank_a), axes, dtypes.int32) + free, _ = array_ops.setdiff1d(range(rank_a), axes) free_dims = array_ops.gather(shape_a, free) axes_dims = array_ops.gather(shape_a, axes) prod_free_dims = reduce_prod(free_dims) @@ -4812,22 +4539,21 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.polyval") -@dispatch.add_dispatch_support def polyval(coeffs, x, name=None): r"""Computes the elementwise value of a polynomial. If `x` is a tensor and `coeffs` is a list n + 1 tensors, this function returns the value of the n-th order polynomial - `p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1)` + p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1) evaluated using Horner's method, i.e. - `p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] - + x * coeffs[0]))` - + p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] + + x * coeffs[0])) + Usage Example: - + >>> coefficients = [1.0, 2.5, -4.2] >>> x = 5.0 >>> y = tf.math.polyval(coefficients, x) @@ -4883,7 +4609,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.reciprocal_no_nan") -@dispatch.add_dispatch_support def reciprocal_no_nan(x, name=None): """Performs a safe reciprocal operation, element wise. @@ -4985,37 +4710,7 @@

Source code for tensorflow.python.ops.math_ops

return gen_math_ops.ndtri(x) -@tf_export("math.erfcinv") -@dispatch.add_dispatch_support -def erfcinv(x, name=None): - """Computes the inverse of complementary error function. - - Given `x`, compute the inverse complementary error function of `x`. - This function is the inverse of `tf.math.erfc`, and is defined on - `[0, 2]`. - - >>> tf.math.erfcinv([0., 0.2, 1., 1.5, 2.]) - <tf.Tensor: shape=(5,), dtype=float32, numpy= - array([ inf, 0.9061935, -0. , -0.4769363, -inf], - dtype=float32)> - - Args: - x: `Tensor` with type `float` or `double`. - name: A name for the operation (optional). - Returns: - Inverse complementary error function of `x`. - - @compatibility(numpy) - Equivalent to scipy.special.erfcinv - @end_compatibility - """ - with ops.name_scope(name, "erfcinv", [x]): - x = ops.convert_to_tensor(x, name="start") - return -ndtri(0.5 * x) * np.sqrt(0.5) - - @tf_export("math.ceil", v1=["math.ceil", "ceil"]) -@dispatch.add_dispatch_support @deprecation.deprecated_endpoints("ceil") @dispatch.add_dispatch_support def ceil(x, name=None): @@ -5100,14 +4795,10 @@

Source code for tensorflow.python.ops.math_ops

numpy=array([ 7.389056, 2980.958 ], dtype=float32)> For complex numbers, the exponential value is calculated as - $$ - e^{x+iy} = {e^x} {e^{iy}} = {e^x} ({\cos (y) + i \sin (y)}) - $$ + \\(e^{x+iy}={e^x}{e^{iy}}={e^x}{\\cos(y)+i\\sin(y)}\\) For `1+1j` the value would be computed as: - $$ - e^1 (\cos (1) + i \sin (1)) = 2.7182817 \times (0.5403023+0.84147096j) - $$ + \\(e^1{\\cos(1)+i\\sin(1)} = 2.7182817 \\times (0.5403023+0.84147096j)\\) >>> x = tf.constant(1 + 1j) >>> tf.math.exp(x) @@ -5133,7 +4824,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.sobol_sample") -@dispatch.add_dispatch_support def sobol_sample(dim, num_results, skip=0, dtype=dtypes.float32, name=None): """Generates points from the Sobol sequence. @@ -5158,7 +4848,6 @@

Source code for tensorflow.python.ops.math_ops

@tf_export("math.rsqrt", v1=["math.rsqrt", "rsqrt"]) -@dispatch.add_dispatch_support @deprecation.deprecated_endpoints("rsqrt") @dispatch.add_dispatch_support def rsqrt(x, name=None): @@ -5173,69 +4862,13 @@

Source code for tensorflow.python.ops.math_ops

Args: x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`, - `float32`, `float64`. + `float32`, `float64`. `int32` name: A name for the operation (optional). Returns: A `tf.Tensor`. Has the same type as `x`. """ return gen_math_ops.rsqrt(x, name) - - -@tf_export("math.acos", "acos") -@dispatch.add_dispatch_support -def acos(x, name=None): - """Computes acos of x element-wise. - - Provided an input tensor, the `tf.math.acos` operation - returns the inverse cosine of each element of the tensor. - If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`. - - Input range is `[-1, 1]` and the output has a range of `[0, pi]`. - - For example: - - >>> x = tf.constant([1.0, -0.5, 3.4, 0.2, 0.0, -2], dtype = tf.float32) - >>> tf.math.acos(x) - <tf.Tensor: shape=(6,), dtype=float32, - numpy= array([0. , 2.0943952, nan, 1.3694383, 1.5707964, nan], - dtype=float32)> - - Args: - x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, - `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, - `complex64`, `complex128`, `string`. - name: A name for the operation (optional). - - Returns: - A `Tensor`. Has the same type as x. - """ - return gen_math_ops.acos(x, name) - - -@tf_export("math.floor", "floor") -@dispatch.add_dispatch_support -def floor(x, name=None): - """Returns element-wise largest integer not greater than x. - - Both input range is `(-inf, inf)` and the - ouput range consists of all integer values. - - For example: - - >>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float("inf")]) - >>> tf.floor(x).numpy() - array([ 1., -2., 5., -3., 0., inf], dtype=float32) - - Args: - x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, - `float32`, `float64`. - name: A name for the operation (optional). - - Returns: - A `Tensor`. Has the same type as x. - """ - return gen_math_ops.floor(x, name)

diff --git a/sparseml/_sources/index.rst.txt b/sparseml/_sources/index.rst.txt index 2f48185854f..80e266e9277 100644 --- a/sparseml/_sources/index.rst.txt +++ b/sparseml/_sources/index.rst.txt @@ -17,8 +17,7 @@ SparseML |version| =================== -Libraries for state-of-the-art deep neural network optimization algorithms, -enabling simple pipelines integration with a few lines of code +Libraries for applying sparsification recipes to neural networks with a few lines of code, enabling faster and smaller models .. raw:: html @@ -49,45 +48,50 @@ enabling simple pipelines integration with a few lines of code Overview ======== -SparseML is a toolkit that includes APIs, CLIs, scripts and libraries that apply state-of-the-art optimization -algorithms such as `pruning `_ and -`quantization `_ to any neural network. -General, recipe-driven approaches built around these optimizations enable the simplification of creating faster -and smaller models for the ML performance community at large. +SparseML is a toolkit that includes APIs, CLIs, scripts and libraries that apply state-of-the-art sparsification algorithms such as pruning and quantization to any neural network. +General, recipe-driven approaches built around these algorithms enable the simplification of creating faster and smaller models for the ML performance community at large. -SparseML is integrated for easy model optimizations within the `PyTorch `_, -`Keras `_, and `TensorFlow V1 `_ ecosystems currently. +`This repository `_ contains integrations within the `PyTorch `_, `Keras `_, and `TensorFlow V1 `_, allowing for seamless model sparsification. -Related Products -================ +Sparsification +============== -- `DeepSparse `_: - CPU inference engine that delivers unprecedented performance for sparse models -- `SparseZoo `_: - Neural network model repository for highly sparse models and optimization recipes -- `Sparsify `_: - Easy-to-use autoML interface to optimize deep neural networks for - better inference performance and a smaller footprint +Sparsification is the process of taking a trained deep learning model and removing redundant information from the overprecise and over-parameterized network resulting in a faster and smaller model. +Techniques for sparsification are all encompassing including everything from inducing sparsity using `pruning `_ and `quantization `_ to enabling naturally occurring sparsity using `activation sparsity `_ or `winograd/FFT `_. +When implemented correctly, these techniques result in significantly more performant and smaller models with limited to no effect on the baseline metrics. +For example, pruning plus quantization can give over `7x improvements in performance `_ while recovering to nearly the same baseline accuracy. + +The Deep Sparse product suite builds on top of sparsification enabling you to easily apply the techniques to your datasets and models using recipe-driven approaches. +Recipes encode the directions for how to sparsify a model into a simple, easily editable format. +- Download a sparsification recipe and sparsified model from the `SparseZoo `_. +- Alternatively, create a recipe for your model using `Sparsify `_. +- Apply your recipe with only a few lines of code using `SparseML `_. +- Finally, for GPU-level performance on CPUs, deploy your sparse-quantized model with the `DeepSparse Engine `_. + + +**Full Deep Sparse product flow:** + + Resources and Learning More =========================== -- `SparseZoo Documentation `_ -- `Sparsify Documentation `_ -- `DeepSparse Documentation `_ -- `Neural Magic Blog `_, - `Resources `_, - `Website `_ +- `SparseZoo Documentation `_ +- `Sparsify Documentation `_ +- `DeepSparse Documentation `_ +- `Neural Magic Blog `_, + `Resources `_, + `Website `_ Release History =============== Official builds are hosted on PyPi -- stable: `sparseml `_ -- nightly (dev): `sparseml-nightly `_ +- stable: `sparseml `_ +- nightly (dev): `sparseml-nightly `_ Additionally, more information can be found via -`GitHub Releases `_. +`GitHub Releases `_. .. toctree:: :maxdepth: 3 @@ -104,8 +108,9 @@ Additionally, more information can be found via api/sparseml .. toctree:: - :maxdepth: 2 - :caption: Help and Support + :maxdepth: 3 + :caption: Help Bugs, Feature Requests - Support, General Q&A \ No newline at end of file + Support, General Q&A + Neural Magic Docs diff --git a/sparseml/_sources/installation.md.txt b/sparseml/_sources/installation.md.txt index 397046cf16e..5016202241b 100644 --- a/sparseml/_sources/installation.md.txt +++ b/sparseml/_sources/installation.md.txt @@ -25,4 +25,4 @@ Install with pip using: pip install sparseml ``` -Then if you would like to explore any of the [scripts](https://github.com/neuralmagic/sparseml/tree/main/scripts), [notebooks](https://github.com/neuralmagic/sparseml/tree/main/notebooks), or [examples](https://github.com/neuralmagic/sparseml/tree/main/examples) clone the repository and install any additional dependencies as required. +Then if you would like to explore any of the [scripts](https://github.com/neuralmagic/sparseml/tree/main/scripts), [notebooks](https://github.com/neuralmagic/sparseml/tree/main/notebooks), or [integrations](https://github.com/neuralmagic/sparseml/tree/main/integrations) clone the repository and install any additional dependencies as required. diff --git a/sparseml/_sources/quicktour.md.txt b/sparseml/_sources/quicktour.md.txt index 380fc0cf803..eaf4bdfc9c0 100644 --- a/sparseml/_sources/quicktour.md.txt +++ b/sparseml/_sources/quicktour.md.txt @@ -16,12 +16,11 @@ limitations under the License. ## Quick Tour -To enable flexibility, ease of use, and repeatability, optimizing a model is generally done using a recipe file. -The files encode the instructions needed for modifying the model and/or training process as a list of modifiers. +To enable flexibility, ease of use, and repeatability, sparsifying a model is generally done using a recipe. +The recipes encode the instructions needed for modifying the model and/or training process as a list of modifiers. Example modifiers can be anything from setting the learning rate for the optimizer to gradual magnitude pruning. The files are written in [YAML](https://yaml.org/) and stored in YAML or [markdown](https://www.markdownguide.org/) files using [YAML front matter](https://assemble.io/docs/YAML-front-matter.html). -The rest of the SparseML system is coded to parse the recipe files into a native format for the desired framework -and apply the modifications to the model and training pipeline. +The rest of the SparseML system is coded to parse the recipes into a native format for the desired framework and apply the modifications to the model and training pipeline. A sample recipe for pruning a model generally looks like the following: @@ -56,12 +55,15 @@ Pre-configured recipes and the resulting models can be explored and downloaded f For a more in-depth read, check out [SparseML documentation](https://docs.neuralmagic.com/sparseml/). -### PyTorch Optimization +### PyTorch Sparsification -The PyTorch optimization libraries are located under the `sparseml.pytorch.optim` package. -Inside are APIs designed to make model optimization as easy as possible by integrating seamlessly into PyTorch training pipelines. +The PyTorch sparsification libraries are located under the `sparseml.pytorch.optim` package. +Inside are APIs designed to make model sparsification as easy as possible by integrating seamlessly into PyTorch training pipelines. -The integration is done using the `ScheduledOptimizer` class. It is intended to wrap your current optimizer and its step function. The step function then calls into the `ScheduledModifierManager` class which can be created from a recipe file. With this setup, the training process can then be modified as desired to optimize the model. +The integration is done using the `ScheduledOptimizer` class. +It is intended to wrap your current optimizer and its step function. +The step function then calls into the `ScheduledModifierManager` class which can be created from a recipe file. +With this setup, the training process can then be modified as desired to sparsify the model. To enable all of this, the integration code you'll need to write is only a handful of lines: @@ -80,11 +82,11 @@ optimizer = ScheduledOptimizer(optimizer, model, manager, steps_per_epoch=num_tr ### Keras Optimization -The Keras optimization libraries are located under the `sparseml.keras.optim` package. -Inside are APIs designed to make model optimization as easy as possible by integrating seamlessly into Keras training pipelines. +The Keras sparsification libraries are located under the `sparseml.keras.optim` package. +Inside are APIs designed to make model sparsification as easy as possible by integrating seamlessly into Keras training pipelines. The integration is done using the `ScheduledModifierManager` class which can be created from a recipe file. -This class handles modifying the Keras objects for the desired optimizations using the `modify` method. +This class handles modifying the Keras objects for the desired algorithms using the `modify` method. The edited model, optimizer, and any callbacks necessary to modify the training process are returned. The model and optimizer can be used normally and the callbacks must be passed into the `fit` or `fit_generator` function. If using `train_on_batch`, the callbacks must be invoked after each call. @@ -114,15 +116,16 @@ model.fit(..., callbacks=callbacks) save_model = manager.finalize(model) ``` -### TensorFlow V1 Optimization +### TensorFlow V1 Sparsification -The TensorFlow optimization libraries for TensorFlow version 1.X are located under the `sparseml.tensorflow_v1.optim` package. Inside are APIs designed to make model optimization as easy as possible by integrating seamlessly into TensorFlow V1 training pipelines. +The TensorFlow sparsification libraries for TensorFlow version 1.X are located under the `sparseml.tensorflow_v1.optim` package. +Inside are APIs designed to make model sparsification as easy as possible by integrating seamlessly into TensorFlow V1 training pipelines. The integration is done using the `ScheduledModifierManager` class which can be created from a recipe file. -This class handles modifying the TensorFlow graph for the desired optimizations. -With this setup, the training process can then be modified as desired to optimize the model. +This class handles modifying the TensorFlow graph for the desired algorithms. +With this setup, the training process can then be modified as desired to sparsify the model. -#### Estimator-based pipelines +#### Estimator-Based pipelines Estimator-based pipelines are simpler to integrate with as compared to session-based pipelines. The `ScheduledModifierManager` can override the necessary callbacks in the estimator to modify the graph using the `modify_estimator` function. @@ -139,12 +142,12 @@ manager.modify_estimator(estimator, steps_per_epoch=num_train_batches) # Normal estimator training code... ``` -#### Session-based pipelines +#### Session-Based pipelines Session-based pipelines need a little bit more as compared to estimator-based pipelines; however, it is still designed to require only a few lines of code for integration. After graph creation, the manager's `create_ops` method must be called. -This will modify the graph as needed for the optimizations and return modifying ops and extras. +This will modify the graph as needed for the algorithms and return modifying ops and extras. After creating the session and training normally, call into `session.run` with the modifying ops after each step. Modifying extras contain objects such as tensorboard summaries of the modifiers to be used if desired. Finally, once completed, `complete_graph` must be called to remove the modifying ops for saving and export. @@ -235,3 +238,4 @@ with tf_compat.Graph().as_default() as graph: exporter.export_pb(outputs=[logits]) exporter.export_onnx(inputs=input_names, outputs=output_names) +``` diff --git a/sparseml/_sources/recipes.md.txt b/sparseml/_sources/recipes.md.txt index b5120953c00..b47e16fef11 100644 --- a/sparseml/_sources/recipes.md.txt +++ b/sparseml/_sources/recipes.md.txt @@ -14,9 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. --> -# Optimization Recipes +# Sparsification Recipes -All optimization APIs are designed to work with recipe files. +All SparseML Sparsification APIs are designed to work with recipes. The files encode the instructions needed for modifying the model and/or training process as a list of modifiers. Example modifiers can be anything from setting the learning rate for the optimizer to gradual magnitude pruning. The files are written in [YAML](https://yaml.org/) and stored in YAML or @@ -27,12 +27,10 @@ and apply the modifications to the model and training pipeline. In a recipe, modifiers must be written in a list that includes "modifiers" in its name. -The easiest ways to get or create optimization recipes are by either using -the pre-configured recipes in [SparseZoo](https://github.com/neuralmagic/sparsezoo) or -using [Sparsify's](https://github.com/neuralmagic/sparsify) autoML style creation. +The easiest ways to get or create recipes are by either using the pre-configured recipes in [SparseZoo](https://github.com/neuralmagic/sparsezoo) or using [Sparsify's](https://github.com/neuralmagic/sparsify) automatic creation. However, power users may be inclined to create their own recipes by hand to enable more -fine grained control or to add in custom modifiers. +fine-grained control or to add in custom modifiers. A sample recipe for pruning a model generally looks like the following: ```yaml @@ -185,7 +183,7 @@ Notes: the script `scripts/pytorch/model_quantize_qat_export.py` or the function `neuralmagicML.pytorch.quantization.quantize_qat_export`. - If performing QAT on a sparse model, you must preserve sparsity during QAT by - applying a `ConstantKSModifier` or have already used a `GradualKSModifier` with + applying a `ConstantPruningModifier` or have already used a `GMPruningModifier` with `leave_enabled` set to True. Required Parameters: diff --git a/sparseml/api/modules.html b/sparseml/api/modules.html index 39abc166b45..924bbc9482f 100644 --- a/sparseml/api/modules.html +++ b/sparseml/api/modules.html @@ -98,16 +98,17 @@

API

-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.html b/sparseml/api/sparseml.html index f98a9700eb0..097ba0c7771 100644 --- a/sparseml/api/sparseml.html +++ b/sparseml/api/sparseml.html @@ -44,7 +44,7 @@ - + @@ -100,7 +100,7 @@

API

    @@ -120,10 +120,11 @@
-

Help and Support

+

Help

@@ -467,7 +468,7 @@

Submodules - +


diff --git a/sparseml/api/sparseml.keras.html b/sparseml/api/sparseml.keras.html index c6fa41d839c..7cbf4ece718 100644 --- a/sparseml/api/sparseml.keras.html +++ b/sparseml/api/sparseml.keras.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.keras.optim.html b/sparseml/api/sparseml.keras.optim.html index e8aeb69ba8f..c454ff1741b 100644 --- a/sparseml/api/sparseml.keras.optim.html +++ b/sparseml/api/sparseml.keras.optim.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

@@ -231,19 +232,24 @@

Submodules
-static from_yaml(file_path: str, add_modifiers: Optional[List[sparseml.keras.optim.modifier.Modifier]] = None)[source]
-

Convenience function used to create the manager of multiple modifiers -from a yaml file.

+static from_yaml(file_path: Union[str, sparsezoo.objects.optimization_recipe.OptimizationRecipe], add_modifiers: Optional[List[sparseml.keras.optim.modifier.Modifier]] = None)[source] +

Convenience function used to create the manager of multiple modifiers from a +recipe file.

Parameters
    -
  • file_path – the path to the yaml file to load the modifier from

  • +
  • file_path – the path to the recipe file to load the modifier from, or +a SparseZoo model stub to load a recipe for a model stored in SparseZoo. +SparseZoo stubs should be preceded by ‘zoo:’, and can contain an optional +‘?recipe_type=<type>’ parameter. Can also be a SparseZoo OptimizationRecipe +object. i.e. ‘/path/to/local/recipe.yaml’, ‘zoo:model/stub/path’, +‘zoo:model/stub/path?recipe_type=transfer’

  • add_modifiers – additional modifiers that should be added to the -returned manager alongside the ones loaded from the yaml file

  • +returned manager alongside the ones loaded from the recipe file

Returns
-

ScheduledModifierManager() created from the yaml file

+

ScheduledModifierManager() created from the recipe file

@@ -289,6 +295,23 @@

Submodules +
+build(input_shape)[source]
+

Creates the variables of the layer (optional, for subclass implementers).

+

This is a method that implementers of subclasses of Layer or Model +can override if they need a state-creation step in-between +layer instantiation and layer call.

+

This is typically used to create the weights of Layer subclasses.

+
+
Parameters
+

input_shape – Instance of TensorShape, or list of instances of +TensorShape if the layer expects a list of inputs +(one instance per input).

+
+
+
+
call(inputs: tensorflow.python.framework.ops.Tensor, training=None)[source]
@@ -315,6 +338,35 @@

Submodules +
+classmethod from_config(config)[source]
+

Creates a layer from its config.

+

This method is the reverse of get_config, +capable of instantiating the same layer from the config +dictionary. It does not handle layer connectivity +(handled by Network), nor weights (handled by set_weights).

+
+
Parameters
+

config – A Python dictionary, typically the +output of get_config.

+
+
Returns
+

A layer instance.

+
+
+

+ +
+
+get_config()[source]
+

Get layer config +Serialization and deserialization should be done using +tf.keras.serialize/deserialize, which create and retrieve the “class_name” +field automatically. +The resulting config below therefore does not contain the field.

+
+
property global_step
@@ -325,6 +377,11 @@

Submodulesproperty mask_updater

+
+
+property masked_layer
+
+
property masks
@@ -347,6 +404,26 @@

Submodulesclass sparseml.keras.optim.mask_pruning.PruningScheduler[source]

Bases: abc.ABC

Abstract pruning scheduler

+
+
+classmethod deserialize(config)[source]
+

Deserialize a pruning scheduler from config returned by scheduler’s +get_config method

+
+
Parameters
+

config – a pruning scheduler’s config

+
+
Returns
+

a pruning scheduler instance

+
+
+
+ +
+
+abstract get_config()[source]
+
+
abstract should_prune(step: int) → bool[source]
@@ -1191,7 +1268,7 @@

Submodules
class sparseml.keras.optim.modifier_pruning.ConstantPruningModifier(params: Union[str, List[str]], start_epoch: float = - 1, end_epoch: float = - 1, log_types: Union[str, List[str]] = '__ALL__')[source]
-

Bases: sparseml.keras.optim.modifier.ScheduledModifier, sparseml.keras.optim.mask_pruning.PruningScheduler

+

Bases: sparseml.keras.optim.modifier.ScheduledModifier

Holds the sparsity level and shape for a given param constant while training. Useful for transfer learning use cases.

@@ -1306,7 +1383,7 @@

Submodules
-class sparseml.keras.optim.modifier_pruning.GMPruningModifier(params: Union[str, List[str]], init_sparsity: float, final_sparsity: float, start_epoch: float, end_epoch: float, update_frequency: float, inter_func: str = 'cubic', log_types: Union[str, List[str]] = '__ALL__', mask_type: Union[str, List[int], sparseml.keras.optim.mask_pruning_creator.PruningMaskCreator] = 'unstructured', leave_enabled: bool = True)[source]
+class sparseml.keras.optim.modifier_pruning.GMPruningModifier(params: Union[str, List[str]], init_sparsity: float, final_sparsity: float, start_epoch: float, end_epoch: float, update_frequency: float, inter_func: str = 'cubic', log_types: Union[str, List[str]] = '__ALL__', mask_type: Union[str, List[int]] = 'unstructured', leave_enabled: bool = True)[source]

Bases: sparseml.keras.optim.modifier.ScheduledUpdateModifier

Gradually applies kernel sparsity to a given variable or variables from init_sparsity until final_sparsity is reached over a given amount of time and @@ -1352,7 +1429,7 @@

Submodules
mask_type
-

the PruningMaskCreator object used

+

the mask type used

Type

return

diff --git a/sparseml/api/sparseml.keras.utils.html b/sparseml/api/sparseml.keras.utils.html index 2b73431b1b9..73318a24f58 100644 --- a/sparseml/api/sparseml.keras.utils.html +++ b/sparseml/api/sparseml.keras.utils.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.onnx.html b/sparseml/api/sparseml.onnx.html index 9b63151424c..a2f3565338d 100644 --- a/sparseml/api/sparseml.onnx.html +++ b/sparseml/api/sparseml.onnx.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.onnx.optim.html b/sparseml/api/sparseml.onnx.optim.html index 1a2bc3e58b2..0475121d042 100644 --- a/sparseml/api/sparseml.onnx.optim.html +++ b/sparseml/api/sparseml.onnx.optim.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.onnx.optim.quantization.html b/sparseml/api/sparseml.onnx.optim.quantization.html index c20842e56a7..560f13e9f95 100644 --- a/sparseml/api/sparseml.onnx.optim.quantization.html +++ b/sparseml/api/sparseml.onnx.optim.quantization.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.onnx.utils.html b/sparseml/api/sparseml.onnx.utils.html index 49c874f5e12..857f35517e8 100644 --- a/sparseml/api/sparseml.onnx.utils.html +++ b/sparseml/api/sparseml.onnx.utils.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.optim.html b/sparseml/api/sparseml.optim.html index d00f5b98b81..e9d3d3e46bf 100644 --- a/sparseml/api/sparseml.optim.html +++ b/sparseml/api/sparseml.optim.html @@ -100,7 +100,7 @@

API

    @@ -129,10 +129,11 @@
-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.pytorch.datasets.classification.html b/sparseml/api/sparseml.pytorch.datasets.classification.html index 097bf3ed5fd..b89e94aacc7 100644 --- a/sparseml/api/sparseml.pytorch.datasets.classification.html +++ b/sparseml/api/sparseml.pytorch.datasets.classification.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.pytorch.datasets.detection.html b/sparseml/api/sparseml.pytorch.datasets.detection.html index 62667e0404c..a2d50fad52f 100644 --- a/sparseml/api/sparseml.pytorch.datasets.detection.html +++ b/sparseml/api/sparseml.pytorch.datasets.detection.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.pytorch.datasets.html b/sparseml/api/sparseml.pytorch.datasets.html index eaec8ecabad..4502c44ee55 100644 --- a/sparseml/api/sparseml.pytorch.datasets.html +++ b/sparseml/api/sparseml.pytorch.datasets.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.pytorch.datasets.recommendation.html b/sparseml/api/sparseml.pytorch.datasets.recommendation.html index 62ac95ce4d3..a255d86ba09 100644 --- a/sparseml/api/sparseml.pytorch.datasets.recommendation.html +++ b/sparseml/api/sparseml.pytorch.datasets.recommendation.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.pytorch.datasets.video.html b/sparseml/api/sparseml.pytorch.datasets.video.html index 57f5f1b2388..720bad9fe2e 100644 --- a/sparseml/api/sparseml.pytorch.datasets.video.html +++ b/sparseml/api/sparseml.pytorch.datasets.video.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.pytorch.html b/sparseml/api/sparseml.pytorch.html index 628a98f118d..d168ca9a90c 100644 --- a/sparseml/api/sparseml.pytorch.html +++ b/sparseml/api/sparseml.pytorch.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

diff --git a/sparseml/api/sparseml.pytorch.models.classification.html b/sparseml/api/sparseml.pytorch.models.classification.html index f4dd943675e..251fddbdbe2 100644 --- a/sparseml/api/sparseml.pytorch.models.classification.html +++ b/sparseml/api/sparseml.pytorch.models.classification.html @@ -100,7 +100,7 @@

API

    @@ -124,10 +124,11 @@
-

Help and Support

+

Help

@@ -302,7 +303,10 @@

SubmodulesQuick Tour
  • Installation
  • -
  • Optimization Recipes
  • +
  • Sparsification Recipes
  • API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    @@ -383,7 +384,10 @@

    SubmodulesQuick Tour
  • Installation
  • -
  • Optimization Recipes
  • +
  • Sparsification Recipes
  • API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    diff --git a/sparseml/api/sparseml.pytorch.models.html b/sparseml/api/sparseml.pytorch.models.html index a460ae83761..4e8476486c6 100644 --- a/sparseml/api/sparseml.pytorch.models.html +++ b/sparseml/api/sparseml.pytorch.models.html @@ -100,7 +100,7 @@

    API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    diff --git a/sparseml/api/sparseml.pytorch.models.recommendation.html b/sparseml/api/sparseml.pytorch.models.recommendation.html index f3f65c96c43..682dd6de1f3 100644 --- a/sparseml/api/sparseml.pytorch.models.recommendation.html +++ b/sparseml/api/sparseml.pytorch.models.recommendation.html @@ -100,7 +100,7 @@

    API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    diff --git a/sparseml/api/sparseml.pytorch.nn.html b/sparseml/api/sparseml.pytorch.nn.html index b122f1cc66b..0edeeb76eff 100644 --- a/sparseml/api/sparseml.pytorch.nn.html +++ b/sparseml/api/sparseml.pytorch.nn.html @@ -100,7 +100,7 @@

    API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    diff --git a/sparseml/api/sparseml.pytorch.optim.html b/sparseml/api/sparseml.pytorch.optim.html index 4e88b44562e..68c3074f47e 100644 --- a/sparseml/api/sparseml.pytorch.optim.html +++ b/sparseml/api/sparseml.pytorch.optim.html @@ -100,7 +100,7 @@

    API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    @@ -718,19 +719,24 @@

    Submodules
    -static from_yaml(file_path: str, add_modifiers: Optional[List[sparseml.pytorch.optim.modifier.Modifier]] = None)[source]
    +static from_yaml(file_path: Union[str, sparsezoo.objects.optimization_recipe.OptimizationRecipe], add_modifiers: Optional[List[sparseml.pytorch.optim.modifier.Modifier]] = None)[source]

    Convenience function used to create the manager of multiple modifiers from a -yaml file.

    +recipe file.

    Parameters
      -
    • file_path – the path to the yaml file to load the modifier from

    • +
    • file_path – the path to the recipe file to load the modifier from, or +a SparseZoo model stub to load a recipe for a model stored in SparseZoo. +SparseZoo stubs should be preceded by ‘zoo:’, and can contain an optional +‘?recipe_type=<type>’ parameter. Can also be a SparseZoo OptimizationRecipe +object. i.e. ‘/path/to/local/recipe.yaml’, ‘zoo:model/stub/path’, +‘zoo:model/stub/path?recipe_type=transfer’

    • add_modifiers – additional modifiers that should be added to the -returned manager alongside the ones loaded from the yaml file

    • +returned manager alongside the ones loaded from the recipe file

    Returns
    -

    ScheduledModifierManager() created from the yaml file

    +

    ScheduledModifierManager() created from the recipe file

    @@ -2767,7 +2773,7 @@

    Submodules
    Sample yaml:
    -
    !ConstantKSModifier
    +
    !ConstantPruningModifier
    start_epoch: 0.0
    end_epoch: 10.0
    @@ -2924,7 +2930,7 @@

    Submodules
    Sample yaml:
    -
    !GradualKSModifier
    +
    !GMPruningModifier
    init_sparsity: 0.05
    final_sparsity: 0.8
    @@ -3151,7 +3157,7 @@

    Submodules
    -class sparseml.pytorch.optim.modifier_quantization.QuantizationModifier(start_epoch: float = - 1.0, submodules: Optional[List[str]] = None, model_fuse_fn_name: Optional[str] = None, disable_quantization_observer_epoch: Union[None, float] = None, freeze_bn_stats_epoch: Union[None, float] = None, end_epoch: float = - 1)[source]
    +class sparseml.pytorch.optim.modifier_quantization.QuantizationModifier(start_epoch: float = - 1.0, submodules: Optional[List[str]] = None, model_fuse_fn_name: Optional[str] = None, disable_quantization_observer_epoch: Union[None, float] = None, freeze_bn_stats_epoch: Union[None, float] = None, end_epoch: float = - 1, model_fuse_fn_kwargs: Optional[Dict[str, Any]] = None)[source]

    Bases: sparseml.pytorch.optim.modifier.ScheduledModifier

    Enables quantization aware training (QAT) for a given module or its submodules After the start epoch, the specified module(s)’ forward pass will emulate @@ -3186,6 +3192,8 @@

    SubmodulesQuick Tour
  • Installation
  • -
  • Optimization Recipes
  • +
  • Sparsification Recipes
  • API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    @@ -218,7 +219,7 @@

    Submodules
    -sparseml.pytorch.optim.quantization.helpers.fuse_module_conv_bn_relus(module: torch.nn.modules.module.Module, inplace: bool = True) → torch.nn.modules.module.Module[source]
    +sparseml.pytorch.optim.quantization.helpers.fuse_module_conv_bn_relus(module: torch.nn.modules.module.Module, inplace: bool = True, override_bn_subclasses_forward: Union[bool, str] = True) → torch.nn.modules.module.Module[source]

    Performs fusion of Conv2d, BatchNorm2d, and ReLU layers found in the given module. To be fused, these layers must appear sequentially in module.named_modules() and be in the same submodule. @@ -230,6 +231,12 @@

    Submodules
    • module – the module to fuse

    • inplace – set True to perform fusions in-place. default is True

    • +
    • override_bn_subclasses_forward – if True, modules that are subclasses of +BatchNorm2d will be modified to be BatchNorm2d but with the forward +pass and state variables copied from the subclass. This is so these +BN modules can pass PyTorch type checking when fusing. Can set to +“override-only” and only parameters will be overwritten, not the +forward pass. Default is True

    Returns
    @@ -301,11 +308,12 @@

    Submodules
    -sparseml.pytorch.optim.quantization.quantize_qat_export.quantize_torch_qat_export(model: onnx.onnx_ONNX_REL_1_6_ml_pb2.ModelProto, inplace: bool = True) → onnx.onnx_ONNX_REL_1_6_ml_pb2.ModelProto[source]
    +sparseml.pytorch.optim.quantization.quantize_qat_export.quantize_torch_qat_export(model: Union[onnx.onnx_ONNX_REL_1_6_ml_pb2.ModelProto, str], output_file_path: Optional[str] = None, inplace: bool = True) → onnx.onnx_ONNX_REL_1_6_ml_pb2.ModelProto[source]
    Parameters
      -
    • model – The model to convert

    • +
    • model – The model to convert, or a file path to it

    • +
    • output_file_path – File path to save the converted model to

    • inplace – If true, does conversion of model in place. Default is true

    diff --git a/sparseml/api/sparseml.pytorch.utils.html b/sparseml/api/sparseml.pytorch.utils.html index 937ddec83ad..e61d748bf92 100644 --- a/sparseml/api/sparseml.pytorch.utils.html +++ b/sparseml/api/sparseml.pytorch.utils.html @@ -100,7 +100,7 @@

    API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    @@ -1879,7 +1880,10 @@

    Submodules
    Parameters
    -

    Help and Support

    +

    Help

    diff --git a/sparseml/api/sparseml.tensorflow_v1.datasets.html b/sparseml/api/sparseml.tensorflow_v1.datasets.html index 497135cbe03..ee8a561e676 100644 --- a/sparseml/api/sparseml.tensorflow_v1.datasets.html +++ b/sparseml/api/sparseml.tensorflow_v1.datasets.html @@ -100,7 +100,7 @@

    API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    diff --git a/sparseml/api/sparseml.tensorflow_v1.html b/sparseml/api/sparseml.tensorflow_v1.html index f9b9a2e6291..aa5e9633ac0 100644 --- a/sparseml/api/sparseml.tensorflow_v1.html +++ b/sparseml/api/sparseml.tensorflow_v1.html @@ -100,7 +100,7 @@

    API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    diff --git a/sparseml/api/sparseml.tensorflow_v1.models.classification.html b/sparseml/api/sparseml.tensorflow_v1.models.classification.html index 1ad92199514..5b74b673ff9 100644 --- a/sparseml/api/sparseml.tensorflow_v1.models.classification.html +++ b/sparseml/api/sparseml.tensorflow_v1.models.classification.html @@ -100,7 +100,7 @@

    API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    diff --git a/sparseml/api/sparseml.tensorflow_v1.models.html b/sparseml/api/sparseml.tensorflow_v1.models.html index 34c9b85ce89..b90a20397ca 100644 --- a/sparseml/api/sparseml.tensorflow_v1.models.html +++ b/sparseml/api/sparseml.tensorflow_v1.models.html @@ -100,7 +100,7 @@

    API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    diff --git a/sparseml/api/sparseml.tensorflow_v1.nn.html b/sparseml/api/sparseml.tensorflow_v1.nn.html index 0344a8025fa..e8636a9c71e 100644 --- a/sparseml/api/sparseml.tensorflow_v1.nn.html +++ b/sparseml/api/sparseml.tensorflow_v1.nn.html @@ -100,7 +100,7 @@

    API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    diff --git a/sparseml/api/sparseml.tensorflow_v1.optim.html b/sparseml/api/sparseml.tensorflow_v1.optim.html index eeeebed09dd..ada6b6ef53e 100644 --- a/sparseml/api/sparseml.tensorflow_v1.optim.html +++ b/sparseml/api/sparseml.tensorflow_v1.optim.html @@ -100,7 +100,7 @@

    API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    @@ -319,19 +320,24 @@

    Submodules
    -static from_yaml(file_path: str, add_modifiers: Optional[List[sparseml.tensorflow_v1.optim.modifier.Modifier]] = None)[source]
    -

    Convenience function used to create the manager of multiple modifiers -from a yaml file.

    +static from_yaml(file_path: Union[str, sparsezoo.objects.optimization_recipe.OptimizationRecipe], add_modifiers: Optional[List[sparseml.tensorflow_v1.optim.modifier.Modifier]] = None)[source] +

    Convenience function used to create the manager of multiple modifiers from a +recipe file.

    Parameters
      -
    • file_path – the path to the yaml file to load the modifier from

    • +
    • file_path – the path to the recipe file to load the modifier from, or +a SparseZoo model stub to load a recipe for a model stored in SparseZoo. +SparseZoo stubs should be preceded by ‘zoo:’, and can contain an optional +‘?recipe_type=<type>’ parameter. Can also be a SparseZoo OptimizationRecipe +object. i.e. ‘/path/to/local/recipe.yaml’, ‘zoo:model/stub/path’, +‘zoo:model/stub/path?recipe_type=transfer’

    • add_modifiers – additional modifiers that should be added to the -returned manager alongside the ones loaded from the yaml file

    • +returned manager alongside the ones loaded from the recipe file

    Returns
    -

    ScheduledModifierManager() created from the yaml file

    +

    ScheduledModifierManager() created from the recipe file

    @@ -1792,7 +1798,7 @@

    Submodules
    Sample yaml:
    -
    !ConstantKSModifier
    +
    !ConstantPruningModifier
    params: __ALL__
    start_epoch: 0.0
    @@ -1934,7 +1940,7 @@

    Submodules
    Sample yaml:
    -
    !GradualKSModifier
    +
    !GMPruningModifier
    params: __ALL__
    init_sparsity: 0.05
    diff --git a/sparseml/api/sparseml.tensorflow_v1.utils.html b/sparseml/api/sparseml.tensorflow_v1.utils.html index d1d8c1fdb8a..004591f83ab 100644 --- a/sparseml/api/sparseml.tensorflow_v1.utils.html +++ b/sparseml/api/sparseml.tensorflow_v1.utils.html @@ -100,7 +100,7 @@

    API

      @@ -124,10 +124,11 @@
    -

    Help and Support

    +

    Help

    diff --git a/sparseml/api/sparseml.utils.datasets.html b/sparseml/api/sparseml.utils.datasets.html index ed6cb8b417b..7fd2a97ae68 100644 --- a/sparseml/api/sparseml.utils.datasets.html +++ b/sparseml/api/sparseml.utils.datasets.html @@ -99,7 +99,7 @@

    API

      @@ -129,10 +129,11 @@
    -

    Help and Support

    +

    Help

    diff --git a/sparseml/api/sparseml.utils.html b/sparseml/api/sparseml.utils.html index 8fd18393221..4f4267e2a2e 100644 --- a/sparseml/api/sparseml.utils.html +++ b/sparseml/api/sparseml.utils.html @@ -100,7 +100,7 @@

    API

      @@ -130,10 +130,11 @@
    -

    Help and Support

    +

    Help

    @@ -479,13 +480,20 @@

    Submodules
    -sparseml.utils.helpers.load_recipe_yaml_str(file_path: str) → str[source]
    +sparseml.utils.helpers.load_recipe_yaml_str(file_path: Union[str, sparsezoo.objects.optimization_recipe.OptimizationRecipe]) → str[source]

    Loads a YAML recipe file to a string or -extracts recipe from YAML front matter in a sparsezoo markdown recipe card.

    +extracts recipe from YAML front matter in a sparsezoo markdown recipe card. +Recipes can also be provided as SparseZoo model stubs or OptimizationRecipe +objects.

    YAML front matter: https://jekyllrb.com/docs/front-matter/

    Parameters
    -

    file_path – file path to recipe YAML file or markdown recipe card

    +

    file_path – file path to recipe YAML file or markdown recipe card or +stub to a SparseZoo model whose recipe will be downloaded and loaded. +SparseZoo stubs should be preceded by ‘zoo:’, and can contain an optional +‘?recipe_type=<type>’ parameter. Can also be a SparseZoo OptimizationRecipe +object. i.e. ‘/path/to/local/recipe.yaml’, ‘zoo:model/stub/path’, +‘zoo:model/stub/path?recipe_type=transfer’

    Returns

    the recipe YAML configuration loaded as a string

    diff --git a/sparseml/genindex.html b/sparseml/genindex.html index 47b2bd138b0..31f2d8b459e 100644 --- a/sparseml/genindex.html +++ b/sparseml/genindex.html @@ -98,16 +98,17 @@

    API

    -

    Help and Support

    +

    Help

    @@ -386,8 +387,6 @@

    B

  • (sparseml.pytorch.utils.ssd_helpers.MeanAveragePrecision method)
  • - - + - +

    diff --git a/sparseml/objects.inv b/sparseml/objects.inv index ba5d4232af0..1a540491b5e 100644 Binary files a/sparseml/objects.inv and b/sparseml/objects.inv differ diff --git a/sparseml/py-modindex.html b/sparseml/py-modindex.html index ba1978defd7..bfdda9791ec 100644 --- a/sparseml/py-modindex.html +++ b/sparseml/py-modindex.html @@ -101,16 +101,17 @@

    API

    -

    Help and Support

    +

    Help

    diff --git a/sparseml/quicktour.html b/sparseml/quicktour.html index 44323239b59..67ed1a99141 100644 --- a/sparseml/quicktour.html +++ b/sparseml/quicktour.html @@ -99,11 +99,11 @@

    General

    • Quick Tour
        -
      • PyTorch Optimization
      • +
      • PyTorch Sparsification
      • Keras Optimization
      • -
      • TensorFlow V1 Optimization

        API

        -

        Help and Support

        +

        Help

        @@ -208,12 +209,11 @@ limitations under the License. -->

        Quick Tour

        -

        To enable flexibility, ease of use, and repeatability, optimizing a model is generally done using a recipe file. -The files encode the instructions needed for modifying the model and/or training process as a list of modifiers. +

        To enable flexibility, ease of use, and repeatability, sparsifying a model is generally done using a recipe. +The recipes encode the instructions needed for modifying the model and/or training process as a list of modifiers. Example modifiers can be anything from setting the learning rate for the optimizer to gradual magnitude pruning. The files are written in YAML and stored in YAML or markdown files using YAML front matter. -The rest of the SparseML system is coded to parse the recipe files into a native format for the desired framework -and apply the modifications to the model and training pipeline.

        +The rest of the SparseML system is coded to parse the recipes into a native format for the desired framework and apply the modifications to the model and training pipeline.

        A sample recipe for pruning a model generally looks like the following:

        version: 0.1.0
         modifiers:
        @@ -242,11 +242,14 @@ 

        Quick Tourhere. Additionally, all code implementations of the modifiers under the optim packages for the frameworks are documented with example YAML formats.

        Pre-configured recipes and the resulting models can be explored and downloaded from the SparseZoo. Also, Sparsify enables autoML style creation of optimization recipes for use with SparseML.

        For a more in-depth read, check out SparseML documentation.

        -
        -

        PyTorch Optimization

        -

        The PyTorch optimization libraries are located under the sparseml.pytorch.optim package. -Inside are APIs designed to make model optimization as easy as possible by integrating seamlessly into PyTorch training pipelines.

        -

        The integration is done using the ScheduledOptimizer class. It is intended to wrap your current optimizer and its step function. The step function then calls into the ScheduledModifierManager class which can be created from a recipe file. With this setup, the training process can then be modified as desired to optimize the model.

        +
        +

        PyTorch Sparsification

        +

        The PyTorch sparsification libraries are located under the sparseml.pytorch.optim package. +Inside are APIs designed to make model sparsification as easy as possible by integrating seamlessly into PyTorch training pipelines.

        +

        The integration is done using the ScheduledOptimizer class. +It is intended to wrap your current optimizer and its step function. +The step function then calls into the ScheduledModifierManager class which can be created from a recipe file. +With this setup, the training process can then be modified as desired to sparsify the model.

        To enable all of this, the integration code you’ll need to write is only a handful of lines:

        from sparseml.pytorch.optim import ScheduledModifierManager, ScheduledOptimizer
         
        @@ -263,10 +266,10 @@ 

        PyTorch Optimization

        Keras Optimization

        -

        The Keras optimization libraries are located under the sparseml.keras.optim package. -Inside are APIs designed to make model optimization as easy as possible by integrating seamlessly into Keras training pipelines.

        +

        The Keras sparsification libraries are located under the sparseml.keras.optim package. +Inside are APIs designed to make model sparsification as easy as possible by integrating seamlessly into Keras training pipelines.

        The integration is done using the ScheduledModifierManager class which can be created from a recipe file. -This class handles modifying the Keras objects for the desired optimizations using the modify method. +This class handles modifying the Keras objects for the desired algorithms using the modify method. The edited model, optimizer, and any callbacks necessary to modify the training process are returned. The model and optimizer can be used normally and the callbacks must be passed into the fit or fit_generator function. If using train_on_batch, the callbacks must be invoked after each call. @@ -294,14 +297,15 @@

        Keras Optimization -

        TensorFlow V1 Optimization

        -

        The TensorFlow optimization libraries for TensorFlow version 1.X are located under the sparseml.tensorflow_v1.optim package. Inside are APIs designed to make model optimization as easy as possible by integrating seamlessly into TensorFlow V1 training pipelines.

        +
        +

        TensorFlow V1 Sparsification

        +

        The TensorFlow sparsification libraries for TensorFlow version 1.X are located under the sparseml.tensorflow_v1.optim package. +Inside are APIs designed to make model sparsification as easy as possible by integrating seamlessly into TensorFlow V1 training pipelines.

        The integration is done using the ScheduledModifierManager class which can be created from a recipe file. -This class handles modifying the TensorFlow graph for the desired optimizations. -With this setup, the training process can then be modified as desired to optimize the model.

        +This class handles modifying the TensorFlow graph for the desired algorithms. +With this setup, the training process can then be modified as desired to sparsify the model.

        -

        Estimator-based pipelines

        +

        Estimator-Based pipelines

        Estimator-based pipelines are simpler to integrate with as compared to session-based pipelines. The ScheduledModifierManager can override the necessary callbacks in the estimator to modify the graph using the modify_estimator function.

        from sparseml.tensorflow_v1.optim import ScheduledModifierManager
        @@ -317,11 +321,11 @@ 

        Estimator-based pipelines -

        Session-based pipelines

        +

        Session-Based pipelines

        Session-based pipelines need a little bit more as compared to estimator-based pipelines; however, it is still designed to require only a few lines of code for integration. After graph creation, the manager’s create_ops method must be called. -This will modify the graph as needed for the optimizations and return modifying ops and extras. +This will modify the graph as needed for the algorithms and return modifying ops and extras. After creating the session and training normally, call into session.run with the modifying ops after each step. Modifying extras contain objects such as tensorboard summaries of the modifiers to be used if desired. Finally, once completed, complete_graph must be called to remove the modifying ops for saving and export.

        diff --git a/sparseml/recipes.html b/sparseml/recipes.html index 1a7855ca178..55475bdbd91 100644 --- a/sparseml/recipes.html +++ b/sparseml/recipes.html @@ -7,7 +7,7 @@ - Optimization Recipes — SparseML 0.1.0 documentation + Sparsification Recipes — SparseML 0.1.0 documentation @@ -100,7 +100,7 @@
        • Quick Tour
        • Installation
        • -
        • Optimization Recipes
            +
          • Sparsification Recipes
            • Modifiers Intro
            • Training Epoch Modifiers
            • Pruning Modifiers

              Required Parameters:

              diff --git a/sparseml/search.html b/sparseml/search.html index f6fd892df52..b0380131cfb 100644 --- a/sparseml/search.html +++ b/sparseml/search.html @@ -101,16 +101,17 @@

              API

              -

              Help and Support

              +

              Help

              diff --git a/sparseml/searchindex.js b/sparseml/searchindex.js index 86995fa33b2..b9fe31ac24d 100644 --- a/sparseml/searchindex.js +++ b/sparseml/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["api/modules","api/sparseml","api/sparseml.keras","api/sparseml.keras.optim","api/sparseml.keras.utils","api/sparseml.onnx","api/sparseml.onnx.optim","api/sparseml.onnx.optim.quantization","api/sparseml.onnx.utils","api/sparseml.optim","api/sparseml.pytorch","api/sparseml.pytorch.datasets","api/sparseml.pytorch.datasets.classification","api/sparseml.pytorch.datasets.detection","api/sparseml.pytorch.datasets.recommendation","api/sparseml.pytorch.datasets.video","api/sparseml.pytorch.models","api/sparseml.pytorch.models.classification","api/sparseml.pytorch.models.detection","api/sparseml.pytorch.models.external","api/sparseml.pytorch.models.recommendation","api/sparseml.pytorch.nn","api/sparseml.pytorch.optim","api/sparseml.pytorch.optim.quantization","api/sparseml.pytorch.utils","api/sparseml.tensorflow_v1","api/sparseml.tensorflow_v1.datasets","api/sparseml.tensorflow_v1.datasets.classification","api/sparseml.tensorflow_v1.models","api/sparseml.tensorflow_v1.models.classification","api/sparseml.tensorflow_v1.nn","api/sparseml.tensorflow_v1.optim","api/sparseml.tensorflow_v1.utils","api/sparseml.utils","api/sparseml.utils.datasets","index","installation","quicktour","recipes"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":3,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["api/modules.rst","api/sparseml.rst","api/sparseml.keras.rst","api/sparseml.keras.optim.rst","api/sparseml.keras.utils.rst","api/sparseml.onnx.rst","api/sparseml.onnx.optim.rst","api/sparseml.onnx.optim.quantization.rst","api/sparseml.onnx.utils.rst","api/sparseml.optim.rst","api/sparseml.pytorch.rst","api/sparseml.pytorch.datasets.rst","api/sparseml.pytorch.datasets.classification.rst","api/sparseml.pytorch.datasets.detection.rst","api/sparseml.pytorch.datasets.recommendation.rst","api/sparseml.pytorch.datasets.video.rst","api/sparseml.pytorch.models.rst","api/sparseml.pytorch.models.classification.rst","api/sparseml.pytorch.models.detection.rst","api/sparseml.pytorch.models.external.rst","api/sparseml.pytorch.models.recommendation.rst","api/sparseml.pytorch.nn.rst","api/sparseml.pytorch.optim.rst","api/sparseml.pytorch.optim.quantization.rst","api/sparseml.pytorch.utils.rst","api/sparseml.tensorflow_v1.rst","api/sparseml.tensorflow_v1.datasets.rst","api/sparseml.tensorflow_v1.datasets.classification.rst","api/sparseml.tensorflow_v1.models.rst","api/sparseml.tensorflow_v1.models.classification.rst","api/sparseml.tensorflow_v1.nn.rst","api/sparseml.tensorflow_v1.optim.rst","api/sparseml.tensorflow_v1.utils.rst","api/sparseml.utils.rst","api/sparseml.utils.datasets.rst","index.rst","installation.md","quicktour.md","recipes.md"],objects:{"":{sparseml:[1,0,0,"-"]},"sparseml.keras":{optim:[3,0,0,"-"],utils:[4,0,0,"-"]},"sparseml.keras.optim":{manager:[3,0,0,"-"],mask_pruning:[3,0,0,"-"],mask_pruning_creator:[3,0,0,"-"],modifier:[3,0,0,"-"],modifier_epoch:[3,0,0,"-"],modifier_lr:[3,0,0,"-"],modifier_params:[3,0,0,"-"],modifier_pruning:[3,0,0,"-"],utils:[3,0,0,"-"]},"sparseml.keras.optim.manager":{ScheduledModifierManager:[3,1,1,""]},"sparseml.keras.optim.manager.ScheduledModifierManager":{finalize:[3,2,1,""],from_yaml:[3,2,1,""],modify:[3,2,1,""]},"sparseml.keras.optim.mask_pruning":{MaskedLayer:[3,1,1,""],PruningScheduler:[3,1,1,""],remove_pruning_masks:[3,3,1,""]},"sparseml.keras.optim.mask_pruning.MaskedLayer":{call:[3,2,1,""],compute_output_shape:[3,2,1,""],global_step:[3,2,1,""],mask_updater:[3,2,1,""],masks:[3,2,1,""],pruned_layer:[3,2,1,""],pruning_vars:[3,2,1,""]},"sparseml.keras.optim.mask_pruning.PruningScheduler":{should_prune:[3,2,1,""],target_sparsity:[3,2,1,""]},"sparseml.keras.optim.mask_pruning_creator":{BlockPruningMaskCreator:[3,1,1,""],DimensionPruningMaskCreator:[3,1,1,""],GroupedPruningMaskCreator:[3,1,1,""],PruningMaskCreator:[3,1,1,""],UnstructuredPruningMaskCreator:[3,1,1,""],load_mask_creator:[3,3,1,""]},"sparseml.keras.optim.mask_pruning_creator.BlockPruningMaskCreator":{group_tensor:[3,2,1,""]},"sparseml.keras.optim.mask_pruning_creator.DimensionPruningMaskCreator":{group_tensor:[3,2,1,""]},"sparseml.keras.optim.mask_pruning_creator.GroupedPruningMaskCreator":{create_sparsity_mask:[3,2,1,""],get_grouping_op:[3,2,1,""],get_mask_initializer:[3,2,1,""],group_tensor:[3,2,1,""]},"sparseml.keras.optim.mask_pruning_creator.PruningMaskCreator":{create_sparsity_mask:[3,2,1,""],get_mask_initializer:[3,2,1,""]},"sparseml.keras.optim.mask_pruning_creator.UnstructuredPruningMaskCreator":{create_sparsity_mask:[3,2,1,""],get_mask_initializer:[3,2,1,""]},"sparseml.keras.optim.modifier":{KerasModifierYAML:[3,1,1,""],Modifier:[3,1,1,""],ModifierProp:[3,1,1,""],ScheduledModifier:[3,1,1,""],ScheduledUpdateModifier:[3,1,1,""]},"sparseml.keras.optim.modifier.Modifier":{finalize:[3,2,1,""],load_list:[3,2,1,""],load_obj:[3,2,1,""],modify:[3,2,1,""]},"sparseml.keras.optim.modifier.ModifierProp":{getter:[3,2,1,""],no_serialize_val:[3,2,1,""],restrictions:[3,2,1,""],serializable:[3,2,1,""],setter:[3,2,1,""]},"sparseml.keras.optim.modifier.ScheduledModifier":{end_epoch:[3,2,1,""],start_end_steps:[3,2,1,""],start_epoch:[3,2,1,""]},"sparseml.keras.optim.modifier.ScheduledUpdateModifier":{update_frequency_steps:[3,2,1,""]},"sparseml.keras.optim.modifier_epoch":{EpochRangeModifier:[3,1,1,""]},"sparseml.keras.optim.modifier_lr":{LearningRateModifier:[3,1,1,""],SetLearningRateModifier:[3,1,1,""]},"sparseml.keras.optim.modifier_lr.LearningRateModifier":{modify:[3,2,1,""]},"sparseml.keras.optim.modifier_lr.SetLearningRateModifier":{modify:[3,2,1,""]},"sparseml.keras.optim.modifier_params":{TrainableParamsModifier:[3,1,1,""]},"sparseml.keras.optim.modifier_params.TrainableParamsModifier":{layer_names:[3,2,1,""],modify:[3,2,1,""],params:[3,4,1,""],params_strict:[3,4,1,""],trainable:[3,4,1,""],validate:[3,2,1,""]},"sparseml.keras.optim.modifier_pruning":{ConstantPruningModifier:[3,1,1,""],GMPruningModifier:[3,1,1,""]},"sparseml.keras.optim.modifier_pruning.ConstantPruningModifier":{finalize:[3,2,1,""],is_pruning_step:[3,2,1,""],layer_names:[3,2,1,""],modify:[3,2,1,""],params:[3,4,1,""],sparsity:[3,2,1,""],update_ready:[3,2,1,""]},"sparseml.keras.optim.modifier_pruning.GMPruningModifier":{exponent:[3,4,1,""],final_sparsity:[3,4,1,""],finalize:[3,2,1,""],init_sparsity:[3,4,1,""],inter_func:[3,4,1,""],layer_names:[3,2,1,""],leave_enabled:[3,4,1,""],mask_type:[3,4,1,""],modify:[3,2,1,""],params:[3,4,1,""],prunable_layers:[3,2,1,""],sparsity:[3,2,1,""],update_ready:[3,2,1,""],validate:[3,2,1,""]},"sparseml.keras.optim.utils":{get_layer_name_from_param:[3,3,1,""]},"sparseml.keras.utils":{callbacks:[4,0,0,"-"],exporter:[4,0,0,"-"],logger:[4,0,0,"-"],model:[4,0,0,"-"]},"sparseml.keras.utils.callbacks":{LoggerSettingCallback:[4,1,1,""],LossesAndMetricsLoggingCallback:[4,1,1,""]},"sparseml.keras.utils.callbacks.LoggerSettingCallback":{on_epoch_begin:[4,2,1,""],on_epoch_end:[4,2,1,""],on_predict_batch_begin:[4,2,1,""],on_predict_batch_end:[4,2,1,""],on_predict_begin:[4,2,1,""],on_predict_end:[4,2,1,""],on_test_batch_begin:[4,2,1,""],on_test_batch_end:[4,2,1,""],on_test_begin:[4,2,1,""],on_test_end:[4,2,1,""],on_train_batch_begin:[4,2,1,""],on_train_batch_end:[4,2,1,""],on_train_begin:[4,2,1,""],on_train_end:[4,2,1,""]},"sparseml.keras.utils.callbacks.LossesAndMetricsLoggingCallback":{on_epoch_end:[4,2,1,""],on_test_end:[4,2,1,""],on_train_batch_end:[4,2,1,""],on_train_begin:[4,2,1,""]},"sparseml.keras.utils.exporter":{ModelExporter:[4,1,1,""]},"sparseml.keras.utils.exporter.ModelExporter":{export_h5:[4,2,1,""],export_keras:[4,2,1,""],export_onnx:[4,2,1,""],export_samples:[4,2,1,""]},"sparseml.keras.utils.logger":{KerasLogger:[4,1,1,""],LoggingMode:[4,1,1,""],PythonLogger:[4,1,1,""],TensorBoardLogger:[4,1,1,""]},"sparseml.keras.utils.logger.KerasLogger":{log_scalar:[4,2,1,""],mode:[4,2,1,""],name:[4,2,1,""],update_freq:[4,2,1,""]},"sparseml.keras.utils.logger.LoggingMode":{PREDICT:[4,4,1,""],TEST:[4,4,1,""],TRAIN:[4,4,1,""]},"sparseml.keras.utils.logger.PythonLogger":{log_scalar:[4,2,1,""]},"sparseml.keras.utils.logger.TensorBoardLogger":{log_scalar:[4,2,1,""]},"sparseml.keras.utils.model":{sparsity:[4,3,1,""]},"sparseml.log":{get_main_logger:[1,3,1,""],get_nm_root_logger:[1,3,1,""],set_logging_level:[1,3,1,""]},"sparseml.onnx":{optim:[6,0,0,"-"],utils:[8,0,0,"-"]},"sparseml.onnx.optim":{analyzer_model:[6,0,0,"-"],quantization:[7,0,0,"-"],sensitivity_pruning:[6,0,0,"-"]},"sparseml.onnx.optim.analyzer_model":{ModelAnalyzer:[6,1,1,""],NodeAnalyzer:[6,1,1,""]},"sparseml.onnx.optim.analyzer_model.ModelAnalyzer":{dict:[6,2,1,""],from_dict:[6,2,1,""],get_node:[6,2,1,""],load_json:[6,2,1,""],nodes:[6,2,1,""],save_json:[6,2,1,""]},"sparseml.onnx.optim.analyzer_model.NodeAnalyzer":{attributes:[6,2,1,""],bias_name:[6,2,1,""],bias_shape:[6,2,1,""],dict:[6,2,1,""],flops:[6,2,1,""],id_:[6,2,1,""],input_names:[6,2,1,""],input_shapes:[6,2,1,""],op_type:[6,2,1,""],output_names:[6,2,1,""],output_shapes:[6,2,1,""],params:[6,2,1,""],prunable:[6,2,1,""],prunable_equation_sensitivity:[6,2,1,""],prunable_params:[6,2,1,""],prunable_params_zeroed:[6,2,1,""],weight_name:[6,2,1,""],weight_shape:[6,2,1,""]},"sparseml.onnx.optim.quantization":{calibration:[7,0,0,"-"],quantize:[7,0,0,"-"],quantize_model_post_training:[7,0,0,"-"]},"sparseml.onnx.optim.quantization.calibration":{CalibrationSession:[7,1,1,""]},"sparseml.onnx.optim.quantization.calibration.CalibrationSession":{add_reduce_to_node_output:[7,2,1,""],generate_augmented_model:[7,2,1,""],get_model_input_names:[7,2,1,""],get_quantization_params_dict:[7,2,1,""],model:[7,2,1,""],model_augmented:[7,2,1,""],process_batch:[7,2,1,""]},"sparseml.onnx.optim.quantization.quantize":{ONNXQuantizer:[7,1,1,""],QuantizationMode:[7,1,1,""],QuantizedInitializer:[7,1,1,""],QuantizedValue:[7,1,1,""],QuantizedValueType:[7,1,1,""],check_opset_version:[7,3,1,""],quantize:[7,3,1,""],quantize_data:[7,3,1,""]},"sparseml.onnx.optim.quantization.quantize.ONNXQuantizer":{find_weight_data:[7,2,1,""],quantize_model:[7,2,1,""]},"sparseml.onnx.optim.quantization.quantize.QuantizationMode":{IntegerOps:[7,4,1,""],QLinearOps:[7,4,1,""]},"sparseml.onnx.optim.quantization.quantize.QuantizedValueType":{Initializer:[7,4,1,""],Input:[7,4,1,""]},"sparseml.onnx.optim.quantization.quantize_model_post_training":{quantize_model_post_training:[7,3,1,""]},"sparseml.onnx.optim.sensitivity_pruning":{PruningLossSensitivityAnalysis:[6,1,1,""],PruningPerfSensitivityAnalysis:[6,1,1,""],PruningSensitivityResult:[6,1,1,""],pruning_loss_sens_approx:[6,3,1,""],pruning_loss_sens_magnitude:[6,3,1,""],pruning_loss_sens_magnitude_iter:[6,3,1,""],pruning_loss_sens_one_shot:[6,3,1,""],pruning_loss_sens_one_shot_iter:[6,3,1,""],pruning_perf_sens_one_shot:[6,3,1,""],pruning_perf_sens_one_shot_iter:[6,3,1,""]},"sparseml.onnx.optim.sensitivity_pruning.PruningLossSensitivityAnalysis":{add_result:[6,2,1,""],dict:[6,2,1,""],from_dict:[6,2,1,""],get_result:[6,2,1,""],load_json:[6,2,1,""],plot:[6,2,1,""],print_res:[6,2,1,""],results:[6,2,1,""],results_model:[6,2,1,""],save_json:[6,2,1,""]},"sparseml.onnx.optim.sensitivity_pruning.PruningPerfSensitivityAnalysis":{add_model_result:[6,2,1,""],add_result:[6,2,1,""],batch_size:[6,2,1,""],dict:[6,2,1,""],from_dict:[6,2,1,""],get_result:[6,2,1,""],load_json:[6,2,1,""],num_cores:[6,2,1,""],plot:[6,2,1,""],print_res:[6,2,1,""],results:[6,2,1,""],results_model:[6,2,1,""],save_json:[6,2,1,""]},"sparseml.onnx.optim.sensitivity_pruning.PruningSensitivityResult":{add_measurement:[6,2,1,""],averages:[6,2,1,""],baseline_average:[6,2,1,""],baseline_measurement_index:[6,2,1,""],baseline_measurement_key:[6,2,1,""],dict:[6,2,1,""],from_dict:[6,2,1,""],has_baseline:[6,2,1,""],id_:[6,2,1,""],index:[6,2,1,""],name:[6,2,1,""],sparse_average:[6,2,1,""],sparse_comparison:[6,2,1,""],sparse_integral:[6,2,1,""],sparse_measurements:[6,2,1,""]},"sparseml.onnx.utils":{data:[8,0,0,"-"],graph_editor:[8,0,0,"-"],graph_optimizer:[8,0,0,"-"],helpers:[8,0,0,"-"],loss:[8,0,0,"-"],model:[8,0,0,"-"],sparse_tensor:[8,0,0,"-"]},"sparseml.onnx.utils.data":{DataLoader:[8,1,1,""]},"sparseml.onnx.utils.data.DataLoader":{batch_size:[8,2,1,""],from_model_random:[8,2,1,""],from_random:[8,2,1,""],infinite:[8,2,1,""],iter_steps:[8,2,1,""],labeled_data:[8,2,1,""]},"sparseml.onnx.utils.graph_editor":{override_model_batch_size:[8,3,1,""],prune_model_one_shot:[8,3,1,""],prune_model_one_shot_iter:[8,3,1,""],prune_unstructured:[8,3,1,""],remove_node_and_params_from_graph:[8,3,1,""],swap_node_output:[8,3,1,""],update_model_param:[8,3,1,""]},"sparseml.onnx.utils.graph_optimizer":{fold_conv_bns:[8,3,1,""],quantize_resnet_identity_add_inputs:[8,3,1,""]},"sparseml.onnx.utils.helpers":{BatchNormParams:[8,1,1,""],NodeParam:[8,1,1,""],NodeShape:[8,1,1,""],SparsityMeasurement:[8,1,1,""],calculate_flops:[8,3,1,""],check_load_model:[8,3,1,""],conv_node_params:[8,3,1,""],extract_node_id:[8,3,1,""],extract_node_shapes:[8,3,1,""],extract_nodes_shapes_ort:[8,3,1,""],extract_nodes_shapes_shape_inference:[8,3,1,""],extract_shape:[8,3,1,""],gemm_node_params:[8,3,1,""],get_attr_float_val_for_node:[8,3,1,""],get_batch_norm_params:[8,3,1,""],get_init_by_name:[8,3,1,""],get_kernel_shape:[8,3,1,""],get_node_attributes:[8,3,1,""],get_node_by_id:[8,3,1,""],get_node_input_nodes:[8,3,1,""],get_node_inputs:[8,3,1,""],get_node_output_nodes:[8,3,1,""],get_node_outputs:[8,3,1,""],get_node_params:[8,3,1,""],get_nodes_by_input_id:[8,3,1,""],get_nodes_by_output_id:[8,3,1,""],get_numpy_dtype:[8,3,1,""],get_prunable_node_from_foldable:[8,3,1,""],get_prunable_nodes:[8,3,1,""],get_quantize_parent_for_dequantize_node:[8,3,1,""],is_foldable_node:[8,3,1,""],is_prunable_node:[8,3,1,""],matmul_node_params:[8,3,1,""],model_inputs:[8,3,1,""],model_outputs:[8,3,1,""],onnx_nodes_sparsities:[8,3,1,""],validate_onnx_file:[8,3,1,""]},"sparseml.onnx.utils.helpers.BatchNormParams":{"var":[8,2,1,""],bias:[8,2,1,""],epsilon:[8,2,1,""],mean:[8,2,1,""],momentum:[8,2,1,""],scale:[8,2,1,""]},"sparseml.onnx.utils.helpers.NodeParam":{name:[8,2,1,""],val:[8,2,1,""]},"sparseml.onnx.utils.helpers.NodeShape":{id:[8,2,1,""],input_shapes:[8,2,1,""],output_shapes:[8,2,1,""]},"sparseml.onnx.utils.helpers.SparsityMeasurement":{density:[8,2,1,""],node_id:[8,2,1,""],params_count:[8,2,1,""],params_zero_count:[8,2,1,""],sparsity:[8,2,1,""]},"sparseml.onnx.utils.loss":{kl_divergence:[8,3,1,""]},"sparseml.onnx.utils.model":{DeepSparseAnalyzeModelRunner:[8,1,1,""],DeepSparseModelRunner:[8,1,1,""],ModelRunner:[8,1,1,""],ORTModelRunner:[8,1,1,""],OpenVINOModelRunner:[8,1,1,""],correct_nm_analyze_model_node_ids:[8,3,1,""],max_available_cores:[8,3,1,""],split_canonical_names:[8,3,1,""]},"sparseml.onnx.utils.model.DeepSparseAnalyzeModelRunner":{batch_forward:[8,2,1,""],run:[8,2,1,""]},"sparseml.onnx.utils.model.DeepSparseModelRunner":{batch_forward:[8,2,1,""],run:[8,2,1,""]},"sparseml.onnx.utils.model.ModelRunner":{batch_forward:[8,2,1,""],run:[8,2,1,""],run_iter:[8,2,1,""]},"sparseml.onnx.utils.model.ORTModelRunner":{batch_forward:[8,2,1,""],run:[8,2,1,""]},"sparseml.onnx.utils.model.OpenVINOModelRunner":{available:[8,2,1,""],batch_forward:[8,2,1,""],network_input_shapes:[8,2,1,""]},"sparseml.onnx.utils.sparse_tensor":{convert_model_initializers_to_sparse:[8,3,1,""],convert_sparse_initializers_to_dense:[8,3,1,""],create_sparse_tensor:[8,3,1,""],sparse_tensor_to_dense:[8,3,1,""]},"sparseml.optim":{analyzer:[9,0,0,"-"],learning_rate:[9,0,0,"-"],manager:[9,0,0,"-"],modifier:[9,0,0,"-"],sensitivity:[9,0,0,"-"]},"sparseml.optim.analyzer":{AnalyzedLayerDesc:[9,1,1,""]},"sparseml.optim.analyzer.AnalyzedLayerDesc":{dict:[9,2,1,""],load_descs:[9,2,1,""],merge_descs:[9,2,1,""],prunable:[9,2,1,""],save_descs:[9,2,1,""],terminal:[9,2,1,""]},"sparseml.optim.learning_rate":{LearningRate:[9,1,1,""],SetLearningRate:[9,1,1,""]},"sparseml.optim.learning_rate.LearningRate":{corrected_lr_info:[9,2,1,""],init_lr:[9,4,1,""],lr_class:[9,4,1,""],lr_kwargs:[9,4,1,""],validate_lr_info:[9,2,1,""]},"sparseml.optim.learning_rate.SetLearningRate":{learning_rate:[9,4,1,""],validate_learning_rate:[9,2,1,""]},"sparseml.optim.manager":{BaseManager:[9,1,1,""]},"sparseml.optim.manager.BaseManager":{max_epochs:[9,4,1,""],min_epochs:[9,4,1,""],modifiers:[9,4,1,""],modifiers_to_string_lines:[9,2,1,""],save:[9,2,1,""],to_string_lines:[9,2,1,""]},"sparseml.optim.modifier":{BaseModifier:[9,1,1,""],BaseObject:[9,1,1,""],BaseProp:[9,1,1,""],BaseScheduled:[9,1,1,""],BaseUpdate:[9,1,1,""],ModifierProp:[9,1,1,""],ModifierYAML:[9,1,1,""]},"sparseml.optim.modifier.BaseModifier":{enabled:[9,4,1,""],initialized:[9,4,1,""],load_framework_list:[9,2,1,""],load_framework_obj:[9,2,1,""],log_types:[9,4,1,""],props:[9,2,1,""],yaml_key:[9,2,1,""]},"sparseml.optim.modifier.BaseProp":{getter:[9,2,1,""],setter:[9,2,1,""]},"sparseml.optim.modifier.BaseScheduled":{end_epoch:[9,4,1,""],start_epoch:[9,4,1,""],validate_schedule:[9,2,1,""]},"sparseml.optim.modifier.BaseUpdate":{update_frequency:[9,4,1,""],validate_update:[9,2,1,""]},"sparseml.optim.modifier.ModifierProp":{getter:[9,2,1,""],no_serialize_val:[9,2,1,""],restrictions:[9,2,1,""],serializable:[9,2,1,""],setter:[9,2,1,""]},"sparseml.optim.sensitivity":{LRLossSensitivityAnalysis:[9,1,1,""],PruningLossSensitivityAnalysis:[9,1,1,""],PruningPerfSensitivityAnalysis:[9,1,1,""],PruningSensitivityResult:[9,1,1,""],default_pruning_sparsities_loss:[9,3,1,""],default_pruning_sparsities_perf:[9,3,1,""]},"sparseml.optim.sensitivity.LRLossSensitivityAnalysis":{add_result:[9,2,1,""],dict:[9,2,1,""],load_json:[9,2,1,""],plot:[9,2,1,""],print_res:[9,2,1,""],results:[9,2,1,""],save_json:[9,2,1,""]},"sparseml.optim.sensitivity.PruningLossSensitivityAnalysis":{add_result:[9,2,1,""],dict:[9,2,1,""],from_dict:[9,2,1,""],get_result:[9,2,1,""],load_json:[9,2,1,""],plot:[9,2,1,""],print_res:[9,2,1,""],results:[9,2,1,""],results_model:[9,2,1,""],save_json:[9,2,1,""]},"sparseml.optim.sensitivity.PruningPerfSensitivityAnalysis":{add_model_result:[9,2,1,""],add_result:[9,2,1,""],batch_size:[9,2,1,""],dict:[9,2,1,""],from_dict:[9,2,1,""],get_result:[9,2,1,""],load_json:[9,2,1,""],num_cores:[9,2,1,""],plot:[9,2,1,""],print_res:[9,2,1,""],results:[9,2,1,""],results_model:[9,2,1,""],save_json:[9,2,1,""]},"sparseml.optim.sensitivity.PruningSensitivityResult":{add_measurement:[9,2,1,""],averages:[9,2,1,""],baseline_average:[9,2,1,""],baseline_measurement_index:[9,2,1,""],baseline_measurement_key:[9,2,1,""],dict:[9,2,1,""],from_dict:[9,2,1,""],has_baseline:[9,2,1,""],id_:[9,2,1,""],index:[9,2,1,""],name:[9,2,1,""],sparse_average:[9,2,1,""],sparse_comparison:[9,2,1,""],sparse_integral:[9,2,1,""],sparse_measurements:[9,2,1,""]},"sparseml.pytorch":{datasets:[11,0,0,"-"],models:[16,0,0,"-"],nn:[21,0,0,"-"],optim:[22,0,0,"-"],utils:[24,0,0,"-"]},"sparseml.pytorch.datasets":{classification:[12,0,0,"-"],detection:[13,0,0,"-"],generic:[11,0,0,"-"],recommendation:[14,0,0,"-"],registry:[11,0,0,"-"],video:[15,0,0,"-"]},"sparseml.pytorch.datasets.classification":{cifar:[12,0,0,"-"],imagefolder:[12,0,0,"-"],imagenet:[12,0,0,"-"],imagenette:[12,0,0,"-"],mnist:[12,0,0,"-"]},"sparseml.pytorch.datasets.classification.cifar":{CIFAR100Dataset:[12,1,1,""],CIFAR10Dataset:[12,1,1,""]},"sparseml.pytorch.datasets.classification.imagefolder":{ImageFolderDataset:[12,1,1,""]},"sparseml.pytorch.datasets.classification.imagefolder.ImageFolderDataset":{num_classes:[12,2,1,""]},"sparseml.pytorch.datasets.classification.imagenet":{ImageNetDataset:[12,1,1,""]},"sparseml.pytorch.datasets.classification.imagenette":{ImagenetteDataset:[12,1,1,""],ImagenetteSize:[12,1,1,""],ImagewoofDataset:[12,1,1,""]},"sparseml.pytorch.datasets.classification.imagenette.ImagenetteSize":{full:[12,4,1,""],s160:[12,4,1,""],s320:[12,4,1,""]},"sparseml.pytorch.datasets.classification.mnist":{MNISTDataset:[12,1,1,""]},"sparseml.pytorch.datasets.detection":{coco:[13,0,0,"-"],helpers:[13,0,0,"-"],voc:[13,0,0,"-"]},"sparseml.pytorch.datasets.detection.coco":{CocoDetectionDataset:[13,1,1,""],coco_2017_yolo:[13,3,1,""]},"sparseml.pytorch.datasets.detection.coco.CocoDetectionDataset":{default_boxes:[13,2,1,""]},"sparseml.pytorch.datasets.detection.helpers":{AnnotatedImageTransforms:[13,1,1,""],bounding_box_and_labels_to_yolo_fmt:[13,3,1,""],random_horizontal_flip_image_and_annotations:[13,3,1,""],ssd_collate_fn:[13,3,1,""],ssd_random_crop_image_and_annotations:[13,3,1,""],yolo_collate_fn:[13,3,1,""]},"sparseml.pytorch.datasets.detection.helpers.AnnotatedImageTransforms":{transforms:[13,2,1,""]},"sparseml.pytorch.datasets.detection.voc":{VOCDetectionDataset:[13,1,1,""],VOCSegmentationDataset:[13,1,1,""]},"sparseml.pytorch.datasets.detection.voc.VOCDetectionDataset":{default_boxes:[13,2,1,""]},"sparseml.pytorch.datasets.generic":{CacheableDataset:[11,1,1,""],EarlyStopDataset:[11,1,1,""],NoisyDataset:[11,1,1,""],RandNDataset:[11,1,1,""]},"sparseml.pytorch.datasets.registry":{DatasetRegistry:[11,1,1,""]},"sparseml.pytorch.datasets.registry.DatasetRegistry":{attributes:[11,2,1,""],create:[11,2,1,""],register:[11,2,1,""]},"sparseml.pytorch.models":{classification:[17,0,0,"-"],detection:[18,0,0,"-"],external:[19,0,0,"-"],recommendation:[20,0,0,"-"],registry:[16,0,0,"-"]},"sparseml.pytorch.models.classification":{darknet:[17,0,0,"-"],efficientnet:[17,0,0,"-"],inception_v3:[17,0,0,"-"],mnist:[17,0,0,"-"],mobilenet:[17,0,0,"-"],mobilenet_v2:[17,0,0,"-"],resnet:[17,0,0,"-"],vgg:[17,0,0,"-"]},"sparseml.pytorch.models.classification.darknet":{DarkNet:[17,1,1,""],DarkNetSectionSettings:[17,1,1,""],darknet53:[17,3,1,""]},"sparseml.pytorch.models.classification.darknet.DarkNet":{as_classifier:[17,2,1,""],as_yolo_backbone:[17,2,1,""],create_section:[17,2,1,""],forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.efficientnet":{EfficientNet:[17,1,1,""],EfficientNetSectionSettings:[17,1,1,""],efficientnet_b0:[17,3,1,""],efficientnet_b1:[17,3,1,""],efficientnet_b2:[17,3,1,""],efficientnet_b3:[17,3,1,""],efficientnet_b4:[17,3,1,""],efficientnet_b5:[17,3,1,""],efficientnet_b6:[17,3,1,""],efficientnet_b7:[17,3,1,""]},"sparseml.pytorch.models.classification.efficientnet.EfficientNet":{create_section:[17,2,1,""],forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.inception_v3":{InceptionV3:[17,1,1,""],inception_v3:[17,3,1,""]},"sparseml.pytorch.models.classification.inception_v3.InceptionV3":{forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.mnist":{MnistNet:[17,1,1,""],mnist_net:[17,3,1,""]},"sparseml.pytorch.models.classification.mnist.MnistNet":{forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.mobilenet":{MobileNet:[17,1,1,""],MobileNetSectionSettings:[17,1,1,""],han_mobilenet:[17,3,1,""],mobilenet:[17,3,1,""]},"sparseml.pytorch.models.classification.mobilenet.MobileNet":{create_section:[17,2,1,""],forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.mobilenet_v2":{MobilenetV2:[17,1,1,""],MobilenetV2SectionSettings:[17,1,1,""],mobilenet_v2:[17,3,1,""],mobilenet_v2_width:[17,3,1,""]},"sparseml.pytorch.models.classification.mobilenet_v2.MobilenetV2":{create_section:[17,2,1,""],forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.resnet":{ResNet:[17,1,1,""],ResNetSectionSettings:[17,1,1,""],resnet101:[17,3,1,""],resnet101_2xwidth:[17,3,1,""],resnet152:[17,3,1,""],resnet18:[17,3,1,""],resnet34:[17,3,1,""],resnet50:[17,3,1,""],resnet50_2xwidth:[17,3,1,""],resnetv2_101:[17,3,1,""],resnetv2_152:[17,3,1,""],resnetv2_18:[17,3,1,""],resnetv2_34:[17,3,1,""],resnetv2_50:[17,3,1,""],resnext101:[17,3,1,""],resnext152:[17,3,1,""],resnext50:[17,3,1,""]},"sparseml.pytorch.models.classification.resnet.ResNet":{create_section:[17,2,1,""],forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.vgg":{VGG:[17,1,1,""],vgg11:[17,3,1,""],vgg11bn:[17,3,1,""],vgg13:[17,3,1,""],vgg13bn:[17,3,1,""],vgg16:[17,3,1,""],vgg16bn:[17,3,1,""],vgg19:[17,3,1,""],vgg19bn:[17,3,1,""]},"sparseml.pytorch.models.classification.vgg.VGG":{create_section:[17,2,1,""],forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.detection":{ssd:[18,0,0,"-"],ssd_lite:[18,0,0,"-"],ssd_mobilenet:[18,0,0,"-"],ssd_resnet:[18,0,0,"-"],yolo_v3:[18,0,0,"-"]},"sparseml.pytorch.models.detection.ssd":{SSD300:[18,1,1,""],SSDBackbone:[18,1,1,""]},"sparseml.pytorch.models.detection.ssd.SSD300":{forward:[18,2,1,""],training:[18,4,1,""]},"sparseml.pytorch.models.detection.ssd.SSDBackbone":{get_feature_extractor:[18,2,1,""],out_channels:[18,2,1,""]},"sparseml.pytorch.models.detection.ssd_lite":{SSD300Lite:[18,1,1,""]},"sparseml.pytorch.models.detection.ssd_lite.SSD300Lite":{forward:[18,2,1,""],training:[18,4,1,""]},"sparseml.pytorch.models.detection.ssd_mobilenet":{SSD300MobileNetBackbone:[18,1,1,""],ssd300lite_mobilenetv2:[18,3,1,""]},"sparseml.pytorch.models.detection.ssd_mobilenet.SSD300MobileNetBackbone":{get_feature_extractor:[18,2,1,""],out_channels:[18,2,1,""]},"sparseml.pytorch.models.detection.ssd_resnet":{SSD300ResNetBackbone:[18,1,1,""],ssd300_resnet101:[18,3,1,""],ssd300_resnet152:[18,3,1,""],ssd300_resnet18:[18,3,1,""],ssd300_resnet34:[18,3,1,""],ssd300_resnet50:[18,3,1,""]},"sparseml.pytorch.models.detection.ssd_resnet.SSD300ResNetBackbone":{get_feature_extractor:[18,2,1,""],out_channels:[18,2,1,""]},"sparseml.pytorch.models.detection.yolo_v3":{YoloV3:[18,1,1,""],yolo_v3:[18,3,1,""]},"sparseml.pytorch.models.detection.yolo_v3.YoloV3":{forward:[18,2,1,""],training:[18,4,1,""]},"sparseml.pytorch.models.external":{torchvision:[19,0,0,"-"]},"sparseml.pytorch.models.registry":{ModelRegistry:[16,1,1,""]},"sparseml.pytorch.models.registry.ModelRegistry":{available_keys:[16,2,1,""],create:[16,2,1,""],create_zoo_model:[16,2,1,""],input_shape:[16,2,1,""],register:[16,2,1,""],register_wrapped_model_constructor:[16,2,1,""]},"sparseml.pytorch.nn":{activations:[21,0,0,"-"],fatrelu:[21,0,0,"-"],se:[21,0,0,"-"]},"sparseml.pytorch.nn.activations":{Hardswish:[21,1,1,""],ReLU6:[21,1,1,""],ReLU:[21,1,1,""],Swish:[21,1,1,""],create_activation:[21,3,1,""],hard_swish:[21,3,1,""],is_activation:[21,3,1,""],replace_activation:[21,3,1,""],swish:[21,3,1,""]},"sparseml.pytorch.nn.activations.Hardswish":{forward:[21,2,1,""],training:[21,4,1,""]},"sparseml.pytorch.nn.activations.ReLU":{inplace:[21,4,1,""]},"sparseml.pytorch.nn.activations.ReLU6":{inplace:[21,4,1,""],max_val:[21,4,1,""],min_val:[21,4,1,""]},"sparseml.pytorch.nn.activations.Swish":{forward:[21,2,1,""],training:[21,4,1,""]},"sparseml.pytorch.nn.fatrelu":{FATReLU:[21,1,1,""],convert_relus_to_fat:[21,3,1,""],fat_exp_relu:[21,3,1,""],fat_pw_relu:[21,3,1,""],fat_relu:[21,3,1,""],fat_sig_relu:[21,3,1,""],set_relu_to_fat:[21,3,1,""]},"sparseml.pytorch.nn.fatrelu.FATReLU":{channel_wise:[21,2,1,""],dynamic:[21,2,1,""],extra_repr:[21,2,1,""],forward:[21,2,1,""],get_threshold:[21,2,1,""],load_state_dict:[21,2,1,""],num_channels:[21,2,1,""],set_threshold:[21,2,1,""],training:[21,4,1,""]},"sparseml.pytorch.nn.se":{SqueezeExcite:[21,1,1,""]},"sparseml.pytorch.nn.se.SqueezeExcite":{forward:[21,2,1,""],training:[21,4,1,""]},"sparseml.pytorch.optim":{analyzer_as:[22,0,0,"-"],analyzer_module:[22,0,0,"-"],analyzer_pruning:[22,0,0,"-"],manager:[22,0,0,"-"],mask_creator_pruning:[22,0,0,"-"],mask_pruning:[22,0,0,"-"],modifier:[22,0,0,"-"],modifier_as:[22,0,0,"-"],modifier_epoch:[22,0,0,"-"],modifier_lr:[22,0,0,"-"],modifier_params:[22,0,0,"-"],modifier_pruning:[22,0,0,"-"],modifier_quantization:[22,0,0,"-"],modifier_regularizer:[22,0,0,"-"],optimizer:[22,0,0,"-"],quantization:[23,0,0,"-"],sensitivity_as:[22,0,0,"-"],sensitivity_lr:[22,0,0,"-"],sensitivity_pruning:[22,0,0,"-"]},"sparseml.pytorch.optim.analyzer_as":{ASResultType:[22,1,1,""],ModuleASAnalyzer:[22,1,1,""]},"sparseml.pytorch.optim.analyzer_as.ASResultType":{inputs_sample:[22,4,1,""],inputs_sparsity:[22,4,1,""],outputs_sample:[22,4,1,""],outputs_sparsity:[22,4,1,""]},"sparseml.pytorch.optim.analyzer_as.ModuleASAnalyzer":{analyze_layers:[22,2,1,""],clear:[22,2,1,""],dim:[22,2,1,""],disable:[22,2,1,""],enable:[22,2,1,""],enabled:[22,2,1,""],inputs_sample:[22,2,1,""],inputs_sample_max:[22,2,1,""],inputs_sample_mean:[22,2,1,""],inputs_sample_min:[22,2,1,""],inputs_sample_size:[22,2,1,""],inputs_sample_std:[22,2,1,""],inputs_sparsity:[22,2,1,""],inputs_sparsity_max:[22,2,1,""],inputs_sparsity_mean:[22,2,1,""],inputs_sparsity_min:[22,2,1,""],inputs_sparsity_std:[22,2,1,""],module:[22,2,1,""],outputs_sample:[22,2,1,""],outputs_sample_max:[22,2,1,""],outputs_sample_mean:[22,2,1,""],outputs_sample_min:[22,2,1,""],outputs_sample_size:[22,2,1,""],outputs_sample_std:[22,2,1,""],outputs_sparsity:[22,2,1,""],outputs_sparsity_max:[22,2,1,""],outputs_sparsity_mean:[22,2,1,""],outputs_sparsity_min:[22,2,1,""],outputs_sparsity_std:[22,2,1,""],results:[22,2,1,""],results_max:[22,2,1,""],results_mean:[22,2,1,""],results_min:[22,2,1,""],results_std:[22,2,1,""],track_inputs_sparsity:[22,2,1,""],track_outputs_sparsity:[22,2,1,""]},"sparseml.pytorch.optim.analyzer_module":{ModuleAnalyzer:[22,1,1,""]},"sparseml.pytorch.optim.analyzer_module.ModuleAnalyzer":{enabled:[22,2,1,""],ks_layer_descs:[22,2,1,""],layer_desc:[22,2,1,""],module:[22,2,1,""]},"sparseml.pytorch.optim.analyzer_pruning":{ModulePruningAnalyzer:[22,1,1,""]},"sparseml.pytorch.optim.analyzer_pruning.ModulePruningAnalyzer":{analyze_layers:[22,2,1,""],module:[22,2,1,""],name:[22,2,1,""],param:[22,2,1,""],param_name:[22,2,1,""],param_sparsity:[22,2,1,""],param_sparsity_dim:[22,2,1,""],tag:[22,2,1,""]},"sparseml.pytorch.optim.manager":{ScheduledModifierManager:[22,1,1,""],load_manager:[22,3,1,""]},"sparseml.pytorch.optim.manager.ScheduledModifierManager":{from_yaml:[22,2,1,""],initialize:[22,2,1,""],initialize_loggers:[22,2,1,""],load_state_dict:[22,2,1,""],loss_update:[22,2,1,""],optimizer_post_step:[22,2,1,""],optimizer_pre_step:[22,2,1,""],state_dict:[22,2,1,""],update:[22,2,1,""]},"sparseml.pytorch.optim.mask_creator_pruning":{BlockPruningMaskCreator:[22,1,1,""],DimensionSparsityMaskCreator:[22,1,1,""],GroupedPruningMaskCreator:[22,1,1,""],PruningMaskCreator:[22,1,1,""],UnstructuredPruningMaskCreator:[22,1,1,""],load_mask_creator:[22,3,1,""]},"sparseml.pytorch.optim.mask_creator_pruning.BlockPruningMaskCreator":{group_tensor:[22,2,1,""]},"sparseml.pytorch.optim.mask_creator_pruning.DimensionSparsityMaskCreator":{group_tensor:[22,2,1,""]},"sparseml.pytorch.optim.mask_creator_pruning.GroupedPruningMaskCreator":{create_sparsity_mask:[22,2,1,""],create_sparsity_mask_from_abs_threshold:[22,2,1,""],create_sparsity_mask_from_tensor:[22,2,1,""],get_grouping_fn:[22,2,1,""],group_tensor:[22,2,1,""]},"sparseml.pytorch.optim.mask_creator_pruning.PruningMaskCreator":{create_sparsity_mask:[22,2,1,""],create_sparsity_mask_from_abs_threshold:[22,2,1,""],create_sparsity_mask_from_tensor:[22,2,1,""]},"sparseml.pytorch.optim.mask_creator_pruning.UnstructuredPruningMaskCreator":{create_sparsity_mask:[22,2,1,""],create_sparsity_mask_from_abs_threshold:[22,2,1,""]},"sparseml.pytorch.optim.mask_pruning":{ModuleParamPruningMask:[22,1,1,""]},"sparseml.pytorch.optim.mask_pruning.ModuleParamPruningMask":{apply:[22,2,1,""],enabled:[22,2,1,""],layer:[22,2,1,""],layer_name:[22,2,1,""],mask_creator:[22,2,1,""],name:[22,2,1,""],param_data:[22,2,1,""],param_grad:[22,2,1,""],param_init:[22,2,1,""],param_mask:[22,2,1,""],param_name:[22,2,1,""],param_unmasked:[22,2,1,""],reset:[22,2,1,""],set_param_data:[22,2,1,""],set_param_mask:[22,2,1,""],set_param_mask_from_abs_threshold:[22,2,1,""],set_param_mask_from_sparsity:[22,2,1,""],set_param_mask_from_weights:[22,2,1,""],store_init:[22,2,1,""],store_unmasked:[22,2,1,""],track_grad_mom:[22,2,1,""]},"sparseml.pytorch.optim.modifier":{Modifier:[22,1,1,""],ModifierProp:[22,1,1,""],PyTorchModifierYAML:[22,1,1,""],ScheduledModifier:[22,1,1,""],ScheduledUpdateModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier.Modifier":{initialize:[22,2,1,""],initialize_loggers:[22,2,1,""],load_list:[22,2,1,""],load_obj:[22,2,1,""],log_update:[22,2,1,""],loggers:[22,4,1,""],loggers_initialized:[22,4,1,""],loss_update:[22,2,1,""],optimizer_post_step:[22,2,1,""],optimizer_pre_step:[22,2,1,""],update:[22,2,1,""]},"sparseml.pytorch.optim.modifier.ModifierProp":{getter:[22,2,1,""],no_serialize_val:[22,2,1,""],restrictions:[22,2,1,""],serializable:[22,2,1,""],setter:[22,2,1,""]},"sparseml.pytorch.optim.modifier.ScheduledModifier":{end_pending:[22,2,1,""],ended:[22,4,1,""],log_update:[22,2,1,""],scheduled_log_update:[22,2,1,""],scheduled_update:[22,2,1,""],start_pending:[22,2,1,""],started:[22,4,1,""],update:[22,2,1,""],update_ready:[22,2,1,""]},"sparseml.pytorch.optim.modifier.ScheduledUpdateModifier":{update:[22,2,1,""],update_ready:[22,2,1,""]},"sparseml.pytorch.optim.modifier_as":{ASRegModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_as.ASRegModifier":{alpha:[22,4,1,""],initialize:[22,2,1,""],layer_normalized:[22,4,1,""],layers:[22,4,1,""],loss_update:[22,2,1,""],optimizer_post_step:[22,2,1,""],reg_func:[22,4,1,""],reg_tens:[22,4,1,""],update:[22,2,1,""],validate:[22,2,1,""]},"sparseml.pytorch.optim.modifier_epoch":{EpochRangeModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_lr":{LearningRateModifier:[22,1,1,""],SetLearningRateModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_lr.LearningRateModifier":{constant_logging:[22,4,1,""],log_update:[22,2,1,""],update:[22,2,1,""],validate:[22,2,1,""]},"sparseml.pytorch.optim.modifier_lr.SetLearningRateModifier":{applied_learning_rate:[22,4,1,""],constant_logging:[22,4,1,""],log_update:[22,2,1,""],update:[22,2,1,""]},"sparseml.pytorch.optim.modifier_params":{GradualParamModifier:[22,1,1,""],SetParamModifier:[22,1,1,""],TrainableParamsModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_params.GradualParamModifier":{final_val:[22,4,1,""],init_val:[22,4,1,""],initialize:[22,2,1,""],inter_func:[22,4,1,""],params:[22,4,1,""],params_strict:[22,4,1,""],update:[22,2,1,""],validate:[22,2,1,""]},"sparseml.pytorch.optim.modifier_params.SetParamModifier":{initialize:[22,2,1,""],params:[22,4,1,""],params_strict:[22,4,1,""],update:[22,2,1,""],val:[22,4,1,""]},"sparseml.pytorch.optim.modifier_params.TrainableParamsModifier":{initialize:[22,2,1,""],params:[22,4,1,""],params_strict:[22,4,1,""],trainable:[22,4,1,""],update:[22,2,1,""]},"sparseml.pytorch.optim.modifier_pruning":{ConstantPruningModifier:[22,1,1,""],GMPruningModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_pruning.ConstantPruningModifier":{from_sparse_model:[22,2,1,""],initialize:[22,2,1,""],load_state_dict:[22,2,1,""],log_update:[22,2,1,""],optimizer_post_step:[22,2,1,""],params:[22,4,1,""],state_dict:[22,2,1,""],update:[22,2,1,""]},"sparseml.pytorch.optim.modifier_pruning.GMPruningModifier":{applied_sparsity:[22,4,1,""],final_sparsity:[22,4,1,""],init_sparsity:[22,4,1,""],initialize:[22,2,1,""],inter_func:[22,4,1,""],leave_enabled:[22,4,1,""],load_state_dict:[22,2,1,""],log_update:[22,2,1,""],mask_type:[22,4,1,""],optimizer_post_step:[22,2,1,""],params:[22,4,1,""],state_dict:[22,2,1,""],update:[22,2,1,""],validate:[22,2,1,""]},"sparseml.pytorch.optim.modifier_quantization":{QuantizationModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_quantization.QuantizationModifier":{disable_quantization_observer_epoch:[22,4,1,""],freeze_bn_stats_epoch:[22,4,1,""],initialize:[22,2,1,""],model_fuse_fn_name:[22,4,1,""],submodules:[22,4,1,""],update:[22,2,1,""],update_ready:[22,2,1,""]},"sparseml.pytorch.optim.modifier_regularizer":{SetWeightDecayModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_regularizer.SetWeightDecayModifier":{constant_logging:[22,4,1,""],log_update:[22,2,1,""],param_groups:[22,4,1,""],update:[22,2,1,""],weight_decay:[22,4,1,""]},"sparseml.pytorch.optim.optimizer":{ScheduledOptimizer:[22,1,1,""]},"sparseml.pytorch.optim.optimizer.ScheduledOptimizer":{add_param_group:[22,2,1,""],adjust_current_step:[22,2,1,""],learning_rate:[22,2,1,""],load_manager_state_dict:[22,2,1,""],load_state_dict:[22,2,1,""],loss_update:[22,2,1,""],manager:[22,2,1,""],manager_state_dict:[22,2,1,""],param_groups:[22,2,1,""],state_dict:[22,2,1,""],step:[22,2,1,""],zero_grad:[22,2,1,""]},"sparseml.pytorch.optim.quantization":{helpers:[23,0,0,"-"],quantize_qat_export:[23,0,0,"-"]},"sparseml.pytorch.optim.quantization.helpers":{add_quant_dequant:[23,3,1,""],fuse_module_conv_bn_relus:[23,3,1,""],get_qat_qconfig:[23,3,1,""]},"sparseml.pytorch.optim.quantization.quantize_qat_export":{QuantizationParams:[23,1,1,""],get_quantization_params:[23,3,1,""],quantize_torch_qat_export:[23,3,1,""]},"sparseml.pytorch.optim.quantization.quantize_qat_export.QuantizationParams":{scale:[23,2,1,""],target:[23,2,1,""],zero_point:[23,2,1,""]},"sparseml.pytorch.optim.sensitivity_as":{ASLayerTracker:[22,1,1,""],LayerBoostResults:[22,1,1,""],ModuleASOneShootBooster:[22,1,1,""]},"sparseml.pytorch.optim.sensitivity_as.ASLayerTracker":{clear:[22,2,1,""],disable:[22,2,1,""],enable:[22,2,1,""],tracked_input:[22,2,1,""],tracked_output:[22,2,1,""]},"sparseml.pytorch.optim.sensitivity_as.LayerBoostResults":{baseline_as:[22,2,1,""],baseline_loss:[22,2,1,""],boosted_as:[22,2,1,""],boosted_loss:[22,2,1,""],name:[22,2,1,""],threshold:[22,2,1,""]},"sparseml.pytorch.optim.sensitivity_as.ModuleASOneShootBooster":{run_layers:[22,2,1,""]},"sparseml.pytorch.optim.sensitivity_lr":{default_exponential_check_lrs:[22,3,1,""],lr_loss_sensitivity:[22,3,1,""]},"sparseml.pytorch.optim.sensitivity_pruning":{model_prunability_magnitude:[22,3,1,""],pruning_loss_sens_magnitude:[22,3,1,""],pruning_loss_sens_one_shot:[22,3,1,""]},"sparseml.pytorch.utils":{benchmarker:[24,0,0,"-"],exporter:[24,0,0,"-"],helpers:[24,0,0,"-"],logger:[24,0,0,"-"],loss:[24,0,0,"-"],model:[24,0,0,"-"],module:[24,0,0,"-"],ssd_helpers:[24,0,0,"-"],yolo_helpers:[24,0,0,"-"]},"sparseml.pytorch.utils.benchmarker":{BatchBenchmarkResults:[24,1,1,""],ModuleBenchmarker:[24,1,1,""]},"sparseml.pytorch.utils.benchmarker.BatchBenchmarkResults":{add:[24,2,1,""],batch_size:[24,2,1,""],e2e_batch_seconds:[24,2,1,""],e2e_batch_timings:[24,2,1,""],e2e_batches_per_second:[24,2,1,""],e2e_item_seconds:[24,2,1,""],e2e_items_per_second:[24,2,1,""],model_batch_seconds:[24,2,1,""],model_batch_timings:[24,2,1,""],model_batches_per_second:[24,2,1,""],model_item_seconds:[24,2,1,""],model_items_per_second:[24,2,1,""]},"sparseml.pytorch.utils.benchmarker.ModuleBenchmarker":{run_batches_on_device:[24,2,1,""]},"sparseml.pytorch.utils.exporter":{ModuleExporter:[24,1,1,""]},"sparseml.pytorch.utils.exporter.ModuleExporter":{export_onnx:[24,2,1,""],export_pytorch:[24,2,1,""],export_samples:[24,2,1,""]},"sparseml.pytorch.utils.helpers":{NamedLayerParam:[24,1,1,""],any_str_or_regex_matches_param_name:[24,3,1,""],default_device:[24,3,1,""],early_stop_data_loader:[24,3,1,""],get_conv_layers:[24,3,1,""],get_layer:[24,3,1,""],get_layer_param:[24,3,1,""],get_linear_layers:[24,3,1,""],get_named_layers_and_params_by_regex:[24,3,1,""],get_optim_learning_rate:[24,3,1,""],get_prunable_layers:[24,3,1,""],get_terminal_layers:[24,3,1,""],infinite_data_loader:[24,3,1,""],mask_difference:[24,3,1,""],set_deterministic_seeds:[24,3,1,""],set_optim_learning_rate:[24,3,1,""],tensor_density:[24,3,1,""],tensor_export:[24,3,1,""],tensor_sample:[24,3,1,""],tensor_sparsity:[24,3,1,""],tensors_batch_size:[24,3,1,""],tensors_export:[24,3,1,""],tensors_module_forward:[24,3,1,""],tensors_to_device:[24,3,1,""],tensors_to_precision:[24,3,1,""],torch_distributed_zero_first:[24,3,1,""]},"sparseml.pytorch.utils.helpers.NamedLayerParam":{layer:[24,2,1,""],layer_name:[24,2,1,""],param:[24,2,1,""],param_name:[24,2,1,""]},"sparseml.pytorch.utils.logger":{PyTorchLogger:[24,1,1,""],PythonLogger:[24,1,1,""],TensorBoardLogger:[24,1,1,""]},"sparseml.pytorch.utils.logger.PyTorchLogger":{log_histogram:[24,2,1,""],log_histogram_raw:[24,2,1,""],log_hyperparams:[24,2,1,""],log_scalar:[24,2,1,""],log_scalars:[24,2,1,""],name:[24,2,1,""]},"sparseml.pytorch.utils.logger.PythonLogger":{log_histogram:[24,2,1,""],log_histogram_raw:[24,2,1,""],log_hyperparams:[24,2,1,""],log_scalar:[24,2,1,""],log_scalars:[24,2,1,""]},"sparseml.pytorch.utils.logger.TensorBoardLogger":{log_histogram:[24,2,1,""],log_histogram_raw:[24,2,1,""],log_hyperparams:[24,2,1,""],log_scalar:[24,2,1,""],log_scalars:[24,2,1,""]},"sparseml.pytorch.utils.loss":{Accuracy:[24,1,1,""],BinaryCrossEntropyLossWrapper:[24,1,1,""],CrossEntropyLossWrapper:[24,1,1,""],InceptionCrossEntropyLossWrapper:[24,1,1,""],KDLossWrapper:[24,1,1,""],KDSettings:[24,1,1,""],LossWrapper:[24,1,1,""],SSDLossWrapper:[24,1,1,""],TopKAccuracy:[24,1,1,""],YoloLossWrapper:[24,1,1,""]},"sparseml.pytorch.utils.loss.Accuracy":{calculate:[24,2,1,""],forward:[24,2,1,""],training:[24,4,1,""]},"sparseml.pytorch.utils.loss.InceptionCrossEntropyLossWrapper":{get_preds:[24,2,1,""],loss:[24,2,1,""]},"sparseml.pytorch.utils.loss.KDLossWrapper":{forward:[24,2,1,""],get_inputs:[24,2,1,""]},"sparseml.pytorch.utils.loss.KDSettings":{contradict_hinton:[24,2,1,""],teacher:[24,2,1,""],temp_student:[24,2,1,""],temp_teacher:[24,2,1,""],weight:[24,2,1,""]},"sparseml.pytorch.utils.loss.LossWrapper":{available_losses:[24,2,1,""],forward:[24,2,1,""],get_labels:[24,2,1,""],get_preds:[24,2,1,""]},"sparseml.pytorch.utils.loss.SSDLossWrapper":{get_preds:[24,2,1,""],loss:[24,2,1,""]},"sparseml.pytorch.utils.loss.TopKAccuracy":{calculate:[24,2,1,""],forward:[24,2,1,""],training:[24,4,1,""]},"sparseml.pytorch.utils.loss.YoloLossWrapper":{forward:[24,2,1,""],get_preds:[24,2,1,""],loss:[24,2,1,""]},"sparseml.pytorch.utils.model":{device_to_name_ids:[24,3,1,""],is_parallel_model:[24,3,1,""],load_epoch:[24,3,1,""],load_model:[24,3,1,""],load_optimizer:[24,3,1,""],model_to_device:[24,3,1,""],parallelize_model:[24,3,1,""],save_model:[24,3,1,""]},"sparseml.pytorch.utils.module":{ModuleDeviceContext:[24,1,1,""],ModuleRunFuncs:[24,1,1,""],ModuleRunHooks:[24,1,1,""],ModuleRunResults:[24,1,1,""],ModuleTester:[24,1,1,""],ModuleTrainer:[24,1,1,""],def_model_backward:[24,3,1,""]},"sparseml.pytorch.utils.module.ModuleDeviceContext":{default_context:[24,2,1,""],use_mixed_precision:[24,2,1,""],world_size:[24,2,1,""]},"sparseml.pytorch.utils.module.ModuleRunFuncs":{batch_size:[24,2,1,""],copy:[24,2,1,""],model_backward:[24,2,1,""],model_forward:[24,2,1,""],to_device:[24,2,1,""]},"sparseml.pytorch.utils.module.ModuleRunHooks":{invoke_batch_backward:[24,2,1,""],invoke_batch_end:[24,2,1,""],invoke_batch_forward:[24,2,1,""],invoke_batch_loss:[24,2,1,""],invoke_batch_start:[24,2,1,""],register_batch_backward_hook:[24,2,1,""],register_batch_end_hook:[24,2,1,""],register_batch_forward_hook:[24,2,1,""],register_batch_loss_hook:[24,2,1,""],register_batch_start_hook:[24,2,1,""]},"sparseml.pytorch.utils.module.ModuleRunResults":{append:[24,2,1,""],result:[24,2,1,""],result_list_tensor:[24,2,1,""],result_mean:[24,2,1,""],result_std:[24,2,1,""],results:[24,2,1,""]},"sparseml.pytorch.utils.module.ModuleTrainer":{num_accumulated_batches:[24,2,1,""],optim_closure:[24,2,1,""],optimizer:[24,2,1,""]},"sparseml.pytorch.utils.ssd_helpers":{DefaultBoxes:[24,1,1,""],MeanAveragePrecision:[24,1,1,""],get_default_boxes_300:[24,3,1,""],ssd_random_crop:[24,3,1,""]},"sparseml.pytorch.utils.ssd_helpers.DefaultBoxes":{as_ltrb:[24,2,1,""],as_xywh:[24,2,1,""],decode_output_batch:[24,2,1,""],encode_image_box_labels:[24,2,1,""],num_default_boxes:[24,2,1,""],scale_wh:[24,2,1,""],scale_xy:[24,2,1,""]},"sparseml.pytorch.utils.ssd_helpers.MeanAveragePrecision":{batch_forward:[24,2,1,""],calculate_map:[24,2,1,""],clear:[24,2,1,""],get_recall_levels:[24,2,1,""]},"sparseml.pytorch.utils.yolo_helpers":{YoloGrids:[24,1,1,""],box_giou:[24,3,1,""],build_targets:[24,3,1,""],get_output_grid_shapes:[24,3,1,""],postprocess_yolo:[24,3,1,""],yolo_v3_anchor_groups:[24,3,1,""]},"sparseml.pytorch.utils.yolo_helpers.YoloGrids":{get_anchor_grid:[24,2,1,""],get_grid:[24,2,1,""],num_anchor_grids:[24,2,1,""]},"sparseml.tensorflow_v1":{datasets:[26,0,0,"-"],models:[28,0,0,"-"],nn:[30,0,0,"-"],optim:[31,0,0,"-"],utils:[32,0,0,"-"]},"sparseml.tensorflow_v1.datasets":{classification:[27,0,0,"-"],dataset:[26,0,0,"-"],helpers:[26,0,0,"-"],registry:[26,0,0,"-"]},"sparseml.tensorflow_v1.datasets.classification":{cifar:[27,0,0,"-"],imagefolder:[27,0,0,"-"],imagenet:[27,0,0,"-"],imagenette:[27,0,0,"-"]},"sparseml.tensorflow_v1.datasets.classification.cifar":{Cifar100DataSet:[27,1,1,""],Cifar10DataSet:[27,1,1,""]},"sparseml.tensorflow_v1.datasets.classification.cifar.Cifar100DataSet":{name_scope:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.classification.cifar.Cifar10DataSet":{name_scope:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagefolder":{ImageFolderDataset:[27,1,1,""],SplitsTransforms:[27,1,1,""],imagenet_normalizer:[27,3,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagefolder.ImageFolderDataset":{creator:[27,2,1,""],format_iterator_batch:[27,2,1,""],image_size:[27,2,1,""],name_scope:[27,2,1,""],num_classes:[27,2,1,""],num_images:[27,2,1,""],post_resize_transforms:[27,2,1,""],pre_resize_transforms:[27,2,1,""],processor:[27,2,1,""],root:[27,2,1,""],train:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagefolder.SplitsTransforms":{train:[27,2,1,""],val:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagenet":{ImageNetDataset:[27,1,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagenet.ImageNetDataset":{name_scope:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagenette":{ImagenetteDataset:[27,1,1,""],ImagenetteSize:[27,1,1,""],ImagewoofDataset:[27,1,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagenette.ImagenetteDataset":{name_scope:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagenette.ImagenetteSize":{full:[27,4,1,""],s160:[27,4,1,""],s320:[27,4,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagenette.ImagewoofDataset":{name_scope:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.dataset":{Dataset:[26,1,1,""],create_split_iterators_handle:[26,3,1,""]},"sparseml.tensorflow_v1.datasets.dataset.Dataset":{build:[26,2,1,""],build_input_fn:[26,2,1,""],creator:[26,2,1,""],format_iterator_batch:[26,2,1,""],name_scope:[26,2,1,""],processor:[26,2,1,""]},"sparseml.tensorflow_v1.datasets.helpers":{center_square_crop:[26,3,1,""],random_scaling_crop:[26,3,1,""],resize:[26,3,1,""]},"sparseml.tensorflow_v1.datasets.registry":{DatasetRegistry:[26,1,1,""]},"sparseml.tensorflow_v1.datasets.registry.DatasetRegistry":{attributes:[26,2,1,""],create:[26,2,1,""],register:[26,2,1,""]},"sparseml.tensorflow_v1.models":{classification:[29,0,0,"-"],estimator:[28,0,0,"-"],registry:[28,0,0,"-"]},"sparseml.tensorflow_v1.models.classification":{mnist:[29,0,0,"-"],mobilenet:[29,0,0,"-"],mobilenet_v2:[29,0,0,"-"],resnet:[29,0,0,"-"],vgg:[29,0,0,"-"]},"sparseml.tensorflow_v1.models.classification.mnist":{mnist_net:[29,3,1,""]},"sparseml.tensorflow_v1.models.classification.mobilenet":{MobileNetSection:[29,1,1,""],mobilenet:[29,3,1,""],mobilenet_const:[29,3,1,""]},"sparseml.tensorflow_v1.models.classification.mobilenet.MobileNetSection":{create:[29,2,1,""]},"sparseml.tensorflow_v1.models.classification.mobilenet_v2":{MobileNetV2Section:[29,1,1,""],mobilenet_v2:[29,3,1,""],mobilenet_v2_const:[29,3,1,""],mobilenet_v2_width:[29,3,1,""]},"sparseml.tensorflow_v1.models.classification.mobilenet_v2.MobileNetV2Section":{create:[29,2,1,""]},"sparseml.tensorflow_v1.models.classification.resnet":{ResNetSection:[29,1,1,""],resnet101:[29,3,1,""],resnet152:[29,3,1,""],resnet18:[29,3,1,""],resnet20:[29,3,1,""],resnet34:[29,3,1,""],resnet50:[29,3,1,""],resnet_const:[29,3,1,""]},"sparseml.tensorflow_v1.models.classification.resnet.ResNetSection":{create:[29,2,1,""]},"sparseml.tensorflow_v1.models.classification.vgg":{VGGSection:[29,1,1,""],vgg11:[29,3,1,""],vgg11bn:[29,3,1,""],vgg13:[29,3,1,""],vgg13bn:[29,3,1,""],vgg16:[29,3,1,""],vgg16bn:[29,3,1,""],vgg19:[29,3,1,""],vgg19bn:[29,3,1,""],vgg_const:[29,3,1,""]},"sparseml.tensorflow_v1.models.classification.vgg.VGGSection":{create:[29,2,1,""]},"sparseml.tensorflow_v1.models.estimator":{ClassificationEstimatorModelFn:[28,1,1,""],EstimatorModelFn:[28,1,1,""]},"sparseml.tensorflow_v1.models.estimator.ClassificationEstimatorModelFn":{create_loss:[28,2,1,""],create_metric_update_ops_hook:[28,2,1,""],create_metrics:[28,2,1,""],create_modifier_ops_and_update_hook:[28,2,1,""],create_predictions:[28,2,1,""],create_scaffold:[28,2,1,""],create_summary_op:[28,2,1,""],create_train_summary_hook:[28,2,1,""],create_training_op:[28,2,1,""]},"sparseml.tensorflow_v1.models.estimator.EstimatorModelFn":{create:[28,2,1,""],create_loss:[28,2,1,""],create_metric_update_ops_hook:[28,2,1,""],create_metrics:[28,2,1,""],create_modifier_ops_and_update_hook:[28,2,1,""],create_predictions:[28,2,1,""],create_scaffold:[28,2,1,""],create_train_summary_hook:[28,2,1,""],create_training_op:[28,2,1,""]},"sparseml.tensorflow_v1.models.registry":{ModelRegistry:[28,1,1,""]},"sparseml.tensorflow_v1.models.registry.ModelRegistry":{available_keys:[28,2,1,""],create:[28,2,1,""],create_estimator:[28,2,1,""],create_zoo_model:[28,2,1,""],input_shape:[28,2,1,""],load_pretrained:[28,2,1,""],register:[28,2,1,""],saver:[28,2,1,""]},"sparseml.tensorflow_v1.nn":{layers:[30,0,0,"-"]},"sparseml.tensorflow_v1.nn.layers":{activation:[30,3,1,""],conv2d:[30,3,1,""],conv2d_block:[30,3,1,""],dense_block:[30,3,1,""],depthwise_conv2d_block:[30,3,1,""],fc:[30,3,1,""],pool2d:[30,3,1,""]},"sparseml.tensorflow_v1.optim":{analyzer_module:[31,0,0,"-"],manager:[31,0,0,"-"],mask_creator_pruning:[31,0,0,"-"],mask_pruning:[31,0,0,"-"],modifier:[31,0,0,"-"],modifier_epoch:[31,0,0,"-"],modifier_lr:[31,0,0,"-"],modifier_params:[31,0,0,"-"],modifier_pruning:[31,0,0,"-"],schedule_lr:[31,0,0,"-"],sensitivity_pruning:[31,0,0,"-"]},"sparseml.tensorflow_v1.optim.analyzer_module":{analyze_module:[31,3,1,""]},"sparseml.tensorflow_v1.optim.manager":{ScheduledModifierManager:[31,1,1,""]},"sparseml.tensorflow_v1.optim.manager.ScheduledModifierManager":{RECAL_UPDATE:[31,4,1,""],complete_graph:[31,2,1,""],create_ops:[31,2,1,""],from_yaml:[31,2,1,""],initialize_session:[31,2,1,""],modifiers_to_string_lines:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_creator_pruning":{BlockPruningMaskCreator:[31,1,1,""],DimensionPruningMaskCreator:[31,1,1,""],GroupedPruningMaskCreator:[31,1,1,""],PruningMaskCreator:[31,1,1,""],UnstructuredPruningMaskCreator:[31,1,1,""],load_mask_creator:[31,3,1,""]},"sparseml.tensorflow_v1.optim.mask_creator_pruning.BlockPruningMaskCreator":{group_tensor:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_creator_pruning.DimensionPruningMaskCreator":{group_tensor:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_creator_pruning.GroupedPruningMaskCreator":{create_sparsity_mask:[31,2,1,""],get_grouping_op:[31,2,1,""],get_mask_initializer:[31,2,1,""],group_tensor:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_creator_pruning.PruningMaskCreator":{create_sparsity_mask:[31,2,1,""],get_mask_initializer:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_creator_pruning.UnstructuredPruningMaskCreator":{create_sparsity_mask:[31,2,1,""],get_mask_initializer:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_pruning":{PruningOpVars:[31,1,1,""],PruningScope:[31,1,1,""],apply_op_vars_masks:[31,3,1,""],create_graph_ops_pruning:[31,3,1,""],create_ks_schedule_ops:[31,3,1,""],create_ks_scheduled_constant_graph_ops:[31,3,1,""],create_op_pruning:[31,3,1,""],create_summaries_pruning:[31,3,1,""],get_or_create_graph_ops_pruning:[31,3,1,""],get_or_create_ks_schedule_ops:[31,3,1,""],get_or_create_ks_scheduled_graph_ops:[31,3,1,""]},"sparseml.tensorflow_v1.optim.mask_pruning.PruningOpVars":{mask:[31,2,1,""],masked:[31,2,1,""],op:[31,2,1,""],op_input:[31,2,1,""],update:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_pruning.PruningScope":{NM_KS:[31,4,1,""],NM_KS_OPS:[31,4,1,""],OPS:[31,4,1,""],OPS_INPUT:[31,4,1,""],OPS_SCHEDULE:[31,4,1,""],OPS_SPARSITY:[31,4,1,""],OPS_SUMMARY:[31,4,1,""],OPS_UPDATE:[31,4,1,""],OP_COND_UPDATE:[31,4,1,""],OP_MASKED_VAR:[31,4,1,""],OP_MASK_ASSIGN:[31,4,1,""],OP_MASK_UPDATE:[31,4,1,""],OP_MASK_UPDATE_NO_OP:[31,4,1,""],OP_PRUNE_VARS_ASSIGN:[31,4,1,""],OP_SAVE:[31,4,1,""],OP_SPARSITY:[31,4,1,""],OP_UPDATE_READY:[31,4,1,""],OP_WEIGHT_UPDATE:[31,4,1,""],VAR_MASK:[31,4,1,""],VAR_THRESHOLD:[31,4,1,""],collection_name:[31,2,1,""],general:[31,2,1,""],model:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier":{Modifier:[31,1,1,""],ModifierProp:[31,1,1,""],ModifierSessionRunHook:[31,1,1,""],ScheduledModifier:[31,1,1,""],ScheduledUpdateModifier:[31,1,1,""],TensorFlowModifierYAML:[31,1,1,""]},"sparseml.tensorflow_v1.optim.modifier.Modifier":{complete_graph:[31,2,1,""],create_ops:[31,2,1,""],get_group:[31,2,1,""],initialize_session:[31,2,1,""],load_list:[31,2,1,""],load_obj:[31,2,1,""],modify_estimator:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier.ModifierProp":{getter:[31,2,1,""],no_serialize_val:[31,2,1,""],restrictions:[31,2,1,""],serializable:[31,2,1,""],setter:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier.ModifierSessionRunHook":{after_run:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier.ScheduledModifier":{start_end_steps:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier.ScheduledUpdateModifier":{update_frequency_steps:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier_epoch":{EpochRangeModifier:[31,1,1,""]},"sparseml.tensorflow_v1.optim.modifier_lr":{GroupLearningRateModifier:[31,1,1,""],LearningRateModifier:[31,1,1,""],SetLearningRateModifier:[31,1,1,""]},"sparseml.tensorflow_v1.optim.modifier_lr.GroupLearningRateModifier":{create_ops:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier_lr.LearningRateModifier":{create_ops:[31,2,1,""],get_group:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier_lr.SetLearningRateModifier":{create_ops:[31,2,1,""],get_group:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier_params":{TrainableParamsModifier:[31,1,1,""]},"sparseml.tensorflow_v1.optim.modifier_params.TrainableParamsModifier":{complete_graph:[31,2,1,""],create_ops:[31,2,1,""],params:[31,4,1,""],params_strict:[31,4,1,""],trainable:[31,4,1,""],validate:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier_pruning":{ConstantPruningModifier:[31,1,1,""],GMPruningModifier:[31,1,1,""]},"sparseml.tensorflow_v1.optim.modifier_pruning.ConstantPruningModifier":{complete_graph:[31,2,1,""],create_ops:[31,2,1,""],initialize_session:[31,2,1,""],ks_group:[31,4,1,""],params:[31,4,1,""],prune_op_vars:[31,2,1,""],sparsity:[31,2,1,""],update_ready:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier_pruning.GMPruningModifier":{complete_graph:[31,2,1,""],create_ops:[31,2,1,""],exponent:[31,4,1,""],final_sparsity:[31,4,1,""],init_sparsity:[31,4,1,""],initialize_session:[31,2,1,""],inter_func:[31,4,1,""],ks_group:[31,4,1,""],leave_enabled:[31,4,1,""],mask_type:[31,4,1,""],params:[31,4,1,""],prune_op_vars:[31,2,1,""],sparsity:[31,2,1,""],update_ready:[31,2,1,""],validate:[31,2,1,""]},"sparseml.tensorflow_v1.optim.schedule_lr":{multi_step_lr_schedule:[31,3,1,""],step_lr_schedule:[31,3,1,""]},"sparseml.tensorflow_v1.optim.sensitivity_pruning":{SparsePruningOpVars:[31,1,1,""],pruning_loss_sens_magnitude:[31,3,1,""],pruning_loss_sens_one_shot:[31,3,1,""],pruning_loss_sens_op_vars:[31,3,1,""]},"sparseml.tensorflow_v1.optim.sensitivity_pruning.SparsePruningOpVars":{op_vars:[31,2,1,""],sparsity:[31,2,1,""]},"sparseml.tensorflow_v1.utils":{exporter:[32,0,0,"-"],helpers:[32,0,0,"-"],loss:[32,0,0,"-"],nets_utils:[32,0,0,"-"],summary:[32,0,0,"-"],variable:[32,0,0,"-"]},"sparseml.tensorflow_v1.utils.exporter":{GraphExporter:[32,1,1,""],default_onnx_opset:[32,3,1,""]},"sparseml.tensorflow_v1.utils.exporter.GraphExporter":{checkpoint_path:[32,2,1,""],export_checkpoint:[32,2,1,""],export_named_samples:[32,2,1,""],export_onnx:[32,2,1,""],export_pb:[32,2,1,""],export_samples:[32,2,1,""],onnx_path:[32,2,1,""],pb_path:[32,2,1,""],pb_to_onnx:[32,2,1,""],sample_inputs_path:[32,2,1,""],sample_outputs_path:[32,2,1,""],tensorflow_path:[32,2,1,""]},"sparseml.tensorflow_v1.utils.helpers":{tf_compat_div:[32,3,1,""]},"sparseml.tensorflow_v1.utils.loss":{accuracy:[32,3,1,""],batch_cross_entropy_loss:[32,3,1,""]},"sparseml.tensorflow_v1.utils.nets_utils":{get_gan_network_fn:[32,3,1,""],get_model_scope:[32,3,1,""],get_network_fn:[32,3,1,""],mobilenet_v1_arg_scope:[32,3,1,""]},"sparseml.tensorflow_v1.utils.summary":{write_simple_summary:[32,3,1,""]},"sparseml.tensorflow_v1.utils.variable":{any_str_or_regex_matches_tensor_name:[32,3,1,""],clean_tensor_name:[32,3,1,""],eval_tensor_density:[32,3,1,""],eval_tensor_sparsity:[32,3,1,""],get_op_input_var:[32,3,1,""],get_op_var_index:[32,3,1,""],get_ops_and_inputs_by_name_or_regex:[32,3,1,""],get_prunable_ops:[32,3,1,""],get_tensor_var:[32,3,1,""],is_prunable_op:[32,3,1,""]},"sparseml.utils":{datasets:[34,0,0,"-"],frameworks:[33,0,0,"-"],helpers:[33,0,0,"-"],singleton:[33,0,0,"-"],worker:[33,0,0,"-"],wrapper:[33,0,0,"-"]},"sparseml.utils.datasets":{helpers:[34,0,0,"-"],imagenet:[34,0,0,"-"],imagenette:[34,0,0,"-"]},"sparseml.utils.datasets.helpers":{default_dataset_path:[34,3,1,""]},"sparseml.utils.datasets.imagenette":{ImagenetteDownloader:[34,1,1,""],ImagenetteSize:[34,1,1,""],ImagewoofDownloader:[34,1,1,""]},"sparseml.utils.datasets.imagenette.ImagenetteDownloader":{dataset_size:[34,2,1,""],download:[34,2,1,""],download_root:[34,2,1,""],extracted_root:[34,2,1,""],split_root:[34,2,1,""]},"sparseml.utils.datasets.imagenette.ImagenetteSize":{full:[34,4,1,""],s160:[34,4,1,""],s320:[34,4,1,""]},"sparseml.utils.datasets.imagenette.ImagewoofDownloader":{dataset_size:[34,2,1,""],download:[34,2,1,""],download_root:[34,2,1,""],extracted_root:[34,2,1,""],split_root:[34,2,1,""]},"sparseml.utils.helpers":{NumpyArrayBatcher:[33,1,1,""],bucket_iterable:[33,3,1,""],clean_path:[33,3,1,""],convert_to_bool:[33,3,1,""],create_dirs:[33,3,1,""],create_parent_dirs:[33,3,1,""],create_unique_dir:[33,3,1,""],flatten_iterable:[33,3,1,""],interpolate:[33,3,1,""],interpolate_list_linear:[33,3,1,""],interpolated_integral:[33,3,1,""],is_url:[33,3,1,""],load_labeled_data:[33,3,1,""],load_numpy:[33,3,1,""],load_recipe_yaml_str:[33,3,1,""],parse_optimization_str:[33,3,1,""],path_file_count:[33,3,1,""],path_file_size:[33,3,1,""],save_numpy:[33,3,1,""],tensor_export:[33,3,1,""],tensors_export:[33,3,1,""],validate_str_iterable:[33,3,1,""]},"sparseml.utils.helpers.NumpyArrayBatcher":{append:[33,2,1,""],stack:[33,2,1,""]},"sparseml.utils.singleton":{Singleton:[33,1,1,""]},"sparseml.utils.worker":{ParallelWorker:[33,1,1,""]},"sparseml.utils.worker.ParallelWorker":{add:[33,2,1,""],add_async:[33,2,1,""],add_async_generator:[33,2,1,""],add_item:[33,2,1,""],indefinite:[33,2,1,""],shutdown:[33,2,1,""],start:[33,2,1,""]},"sparseml.utils.wrapper":{wrapper_decorator:[33,3,1,""]},sparseml:{keras:[2,0,0,"-"],log:[1,0,0,"-"],onnx:[5,0,0,"-"],optim:[9,0,0,"-"],pytorch:[10,0,0,"-"],tensorflow_v1:[25,0,0,"-"],utils:[33,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","function","Python function"],"4":["py","attribute","Python attribute"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:function","4":"py:attribute"},terms:{"00001":22,"00010671895716335979":22,"00011739085287969578":22,"00012912993816766537":22,"00014204293198443192":22,"00015624722518287512":22,"00017187194770116264":22,"00018905914247127894":22,"00020796505671840686":22,"00022876156239024756":22,"00025163771862927233":22,"0002768014904921996":22,"0003044816395414196":22,"00033492980349556157":22,"00036842278384511775":22,"0004052650622296296":22,"0004457915684525926":22,"0004903707252978519":22,"0005394077978276372":22,"000593348577610401":22,"0006526834353714411":22,"0007179517789085853":22,"0007897469567994438":22,"0008687216524793883":22,"0009555938177273272":22,"001":[3,22,31,32],"00105115319950006":22,"001156268519450066":22,"0012718953713950728":22,"0013990849085345801":22,"0015389933993880383":22,"0016928927393268422":22,"0018621820132595267":22,"0020484002145854797":22,"0022532402360440277":22,"0024785642596484307":22,"002726420685613274":22,"0029990627541746015":22,"003298969029592062":22,"0036288659325512686":22,"003991752525806396":22,"0043909277783870364":22,"004830020556225741":22,"005":[37,38],"005313022611848316":22,"005844324873033148":22,"006428757360336463":22,"00707163309637011":22,"007778796406007121":22,"008556676046607835":22,"009412343651268619":22,"010353578016395481":22,"011359662748873234":7,"01138893581803503":22,"012527829399838533":22,"013780612339822387":22,"015158673573804626":22,"01667454093118509":22,"017953205361364e":22,"0183419950243036":22,"019539741799235344":7,"020176194526733963":22,"02219381397940736":22,"02400691612424e":22,"0244131953773481":22,"02685451491508291":22,"029539966406591206":22,"03249396304725033":22,"03574335935197537":22,"03931769528717291":22,"043249464815890204":22,"04381":17,"047574411297479226":22,"052331852427227155":22,"0544702849929435e":22,"05756503766994987":22,"06332154143694486":22,"06965369558063936":22,"0766190651387033":22,"0834705943388392e":22,"08428097165257363":22,"091268053287076e":22,"092709068817831":22,"09574":22,"0th":24,"100":[8,24,29],"1000":[17,29],"10000":[3,31],"101":[16,17,18,28],"10197997569961412":22,"1113776745352607e":22,"11217797326957554":22,"1144777789251e":22,"115909044841462e":22,"123":[12,27],"1233957705965331":22,"13573534765618642":22,"1384283767210024e":22,"140274938683989e":22,"1435888100000012e":22,"14930888242180507":22,"152":[17,18],"160px":[12,27,34],"1642397706639856":22,"177248169415655e":22,"1801":17,"18066374773038418":22,"1902":22,"1918176537727232e":22,"19873012250342262":22,"1x1":17,"200":24,"2007":13,"2012":13,"2014":13,"2015":13,"2017":13,"2186031347537649":22,"21e":22,"224":[8,12,17,27,29],"240":17,"2404634482291414":22,"256":17,"25s":6,"260":17,"2645097930520556":22,"289048368510331e":22,"29096077235726114":22,"299":17,"300":[13,17,18,24],"3109994191499957e":22,"3200568495929873":22,"320px":[12,27,34],"322515441988787e":22,"3310000000000003e":22,"3333333333333333":26,"3520625345522861":22,"3579476910000015e":22,"380":17,"38726878800751474":22,"3x2":18,"3x3":17,"40024994425817e":22,"4003948586157844e":22,"4259956668082662":22,"4420993610649954e":22,"452271214393103e":22,"456":17,"4641000000000003e":22,"4685952334890929":22,"4763699237493086e":22,"5154547568380022":22,"52592555681761e":22,"528":17,"554766986187666e":22,"559917313492238e":22,"586309297171495e":22,"5937424601000017e":22,"594972986357221e":22,"600":17,"6105100000000006e":22,"626407607736664e":22,"640":[13,24],"701723378487253e":22,"727499949325609e":22,"7404343444773634e":22,"7449402268886447e":22,"7715610000000007e":22,"784":12,"7974983358324136e":22,"8102436848064327e":22,"819748525897502e":22,"849732675807628e":22,"853116706110002e":22,"9194342495775094e":22,"948717100000001e":22,"954302432552388e":22,"975":6,"978518112499371e":22,"9997":32,"abstract":[3,4,8,9,18,22,24,26,28,31],"boolean":[3,7,31,33],"break":[24,33],"byte":33,"case":[3,7,8,22,24,31],"class":[3,4,6,7,8,9,11,12,13,16,17,18,21,22,23,24,26,27,28,29,31,32,33,34,37,38],"default":[3,4,6,7,8,9,13,16,17,18,21,22,23,24,28,29,30,31,32,33,34],"enum":[4,12,22,27,34],"export":[1,2,3,10,22,23,25,31,33,35,38],"final":[3,6,17,22,24,29,31,37,38],"float":[3,4,6,7,8,9,11,13,17,21,22,24,29,30,31,32,33,38],"function":[3,7,8,9,13,16,17,18,21,22,23,24,26,27,28,31,32,33,34,37,38],"import":[22,37],"int":[1,3,4,6,7,8,9,11,12,13,17,18,21,22,24,26,27,29,30,31,32,33],"long":22,"new":[3,6,7,8,9,11,16,21,22,24,26,28,31,32,33],"null":9,"return":[1,3,4,6,7,8,9,11,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,34,37],"static":[3,6,7,8,9,11,16,17,22,24,26,28,31,32],"switch":[26,31],"true":[3,4,6,7,8,9,12,13,16,17,18,21,22,23,24,27,28,29,30,31,32,33,34,38],"try":[7,33],"var":[8,28,31],"while":[3,7,17,18,21,22,26,31,38],Axes:[6,9],For:[3,8,22,24,31,32,37,38],Its:22,Not:22,OPS:31,One:29,Ones:[29,30],The:[3,6,7,8,9,12,13,16,17,18,21,22,23,24,27,28,29,30,31,32,33,34,37,38],Then:36,There:32,Use:[3,9,22,31],Used:[21,30,31],Useful:[3,21,22,31],Uses:[28,31,32],Will:[7,8,13,18,24,33],With:37,__all__:[3,22,31,38],__loss__:22,__name__:9,_ax:[6,9],_block_shap:[3,22,31],_deepsparsebasemodelrunn:8,_dim:[3,22,31],_map_mask_to_tensor:[3,22,31],abc:[3,4,8,9,18,22,24,28,31],about:[9,18,24,33],abov:8,abs:[7,17,22],absolut:[3,8,22,31,33],accept:[3,9,21,22,31],access:[22,24],accord:[3,8,11,22,24,31],accordingli:7,account:24,accumul:24,accuraci:[22,24,32,38],achiev:[6,9],across:[6,9,21,22,24,32,33],act:[21,29,30],act_typ:21,activ:[1,3,7,9,10,22,23,24,29,30,31,32,38],adam:38,adapt:[24,32],add:[6,7,8,9,11,22,24,29,30,33,38],add_async:33,add_async_gener:33,add_item:33,add_measur:[6,9],add_model_result:[6,9],add_modifi:[3,22,31],add_ops_cr:31,add_param_group:22,add_quant_dequ:23,add_reduce_to_node_output:7,add_result:[6,9],added:[3,7,17,22,24,31,32,33],addit:[3,4,6,7,8,17,18,21,22,24,26,28,31,33,36],addition:[8,12,22,24,31,35,37,38],addtion:18,adjust:[6,22,24],adjust_current_step:22,affect:[6,9,24],after:[3,6,8,9,17,22,24,27,29,30,31,32,33,37,38],after_optim:[3,31],after_run:31,afterward:[17,18,21],again:3,against:[6,9,22,24,31,32],aggreg:[24,31],aggress:33,aka:[22,31],algorithm:35,alia:[8,23,24,27,31],all:[1,3,4,6,7,8,9,12,13,16,17,18,21,22,23,24,26,27,28,30,31,32,33,34,37,38],all_token:[3,22,31],allow:[3,6,8,9,11,22,24,31,32],along:[1,3,6,13,22,24,31,33],alongsid:[3,9,22,31],alpha:22,alreadi:[8,13,22,28,34,38],also:[3,6,8,9,22,24,26,31,37,38],although:[17,18,21],altogeth:22,alwai:22,among:24,amount:[3,8,17,22,24,31],amp:24,analys:24,analysi:[6,8,9,22,31],analyz:[0,1,6,22,31],analyze_lay:22,analyze_model:8,analyze_modul:31,analyzedlayerdesc:[9,22],analyzer_a:[1,10],analyzer_model:[1,5],analyzer_modul:[1,10,25],analyzer_prun:[1,10],ancestor:8,anchor:[18,24],anchor_group:[18,24],anchors_group:24,ani:[3,4,6,8,9,11,13,16,17,18,22,24,26,28,30,31,32,33,35,36,37,38],annot:[13,24,33],annotatedimagetransform:13,anoth:[3,31],any_str_or_regex_matches_param_nam:24,any_str_or_regex_matches_tensor_nam:32,anyth:[22,37,38],apart:[24,33],api:[8,26,35,37,38],appear:23,append:[24,33],appli:[3,6,7,8,9,11,12,13,17,21,22,24,26,27,28,29,30,31,32,33,35,37,38],applic:6,applied_learning_r:22,applied_spars:22,apply_op_vars_mask:31,apply_shape_change_mult:6,apply_softmax:28,approach:35,appropri:[22,28,33],approx_ks_loss_sensit:31,approxim:[6,21,22,31],architectur:[16,17,18,28,29],area:33,arg:[3,8,9,11,16,22,26,28,31,32],arg_scop:32,arg_scope_var:32,argument:[3,9,16,22,28,31,37,38],around:[3,24,35],arrai:[4,7,8,24,32,33],art:35,artifici:22,arxiv:[17,22],as_classifi:17,as_default:37,as_ltrb:24,as_xywh:24,as_yolo_backbon:17,ascend:33,asd932:12,asd932_:27,ask:24,aslayertrack:22,aspect:[3,22,24,26,31],aspect_ratio:24,asregmodifi:22,asresulttyp:22,assign:[3,31],associ:[8,24,32],assum:[3,8,24,33],assumpt:24,asymmetr:[23,38],async:33,attach:[8,28],attempt:32,attibut:8,attr:8,attribut:[3,4,6,8,9,11,22,26,31,33],augment:7,augmented_model_path:7,automl:[35,37,38],aux:[17,24],aux_pr:24,aux_weight:24,auxiliari:24,avail:[3,6,8,16,22,24,28,31,37,38],available_kei:[16,28],available_loss:24,averag:[6,9,24,32],avg:30,avoid:[8,32],awai:[26,31],awar:[22,23,38],axes:[6,9],axi:7,back:[8,33],backbon:[17,18],backbone_early_output_idx:18,backbone_out_channel:18,backend:[7,24],backward:[22,24],ball:[8,33],bar:[6,7,8],base:[3,4,6,7,8,9,11,12,13,16,17,18,21,22,23,24,26,27,28,29,31,32,33,34,35],base_name_scop:28,baselin:[6,9],baseline_a:22,baseline_averag:[6,9],baseline_loss:22,baseline_measurement_index:[6,9],baseline_measurement_kei:[6,9],basemanag:[3,9,22,31],basemodifi:[3,9,22,31],baseobject:9,baseprop:[3,9,22,31],baseschedul:[3,9,22,31],baseupd:[3,9,22,31],basic:[9,17,29],basic_session_run_hook:28,batch:[3,4,6,7,8,9,13,17,22,24,26,27,29,30,31,32,33,37],batch_cross_entropy_loss:32,batch_forward:[8,24],batch_norm:32,batch_norm_decai:32,batch_norm_epsilon:32,batch_norm_updates_collect:32,batch_siz:[6,8,9,22,24,26,32,37],batchbenchmarkresult:24,batcher:33,batchnorm2d:23,batchnorm:[8,29],batchnormparam:8,becaus:8,been:[3,8,22,24,28,30,31,32],befor:[3,4,6,8,17,22,24,27,30,31,37,38],begin:[4,22,31,33],begin_step:31,behav:22,behavior:[4,8,22],being:[3,9,21,22,24,30,31,32],belong:[16,28,31],below:[8,24,38],benchmark:[1,6,8,10],best:22,beta:[29,30],beta_initi:[29,30],better:[1,22,35],between:[3,6,8,9,11,21,22,23,24,26,31,33,38],bia:[6,8,22,29,30],bias_initi:[29,30],bias_nam:6,bias_shap:[6,8],bin:24,binari:24,binary_cross_entropy_with_logit:24,binarycrossentropylosswrapp:24,bit:[7,37],blob:8,block:[3,7,8,17,22,23,29,31,38],block_shap:[3,22,31],blockpruningmaskcr:[3,22,31],blog:[35,38],bn_node:8,bool:[3,4,6,7,8,9,11,12,13,16,17,18,21,22,23,24,27,28,29,30,31,32,33,34],boost:22,boosted_a:22,boosted_loss:22,booster:22,both:[21,22,38],bottleneck:[17,29],bottom:33,boudn:24,bound:[13,24],bounding_box_and_labels_to_yolo_fmt:13,box:[13,24],box_giou:24,boxes_a:24,boxes_b:24,break_batch:[24,33],broadcast:21,bucket:[24,33],bucket_count:24,bucket_iter:33,bucket_limit:24,buffer:[21,26],bug:35,build:[3,6,22,26,35],build_input_fn:26,build_target:24,built:[3,4,8,26,27,30,35,37],builtin:22,cach:[11,12,13,22,24,27],cacheabl:11,cacheabledataset:11,calcul:[3,6,8,9,17,22,24,31,33],calculate_flop:8,calculate_map:24,calibr:[5,6],calibrate_op_typ:7,calibrationsess:7,call:[3,4,6,9,16,17,18,21,22,24,26,31,32,37],callabl:[3,8,9,16,22,24,26,28,31,32,33],callback:[1,2,3,24,31,37],caller:32,came:24,can:[1,3,6,7,8,9,11,12,13,17,18,21,22,24,27,29,30,31,32,33,34,35,37,38],cannot:[3,9,22,31,38],canon:8,canonical_nam:8,cap:33,card:33,care:[17,18,21],cat:12,cent_crop:27,center:[24,26],center_i:24,center_square_crop:[26,27],center_x:24,certain:[3,8,9,22,31,38],chain:13,chan:21,chang:[3,4,6,8,9,16,22,24,31],channel:[3,7,17,18,21,22,24,29,30,31],channel_wis:21,channels_first:30,channels_last:30,chart:[6,9],chauhan:24,check:[3,7,8,9,17,18,21,22,24,31,32,33,37],check_feat_lab_inp:24,check_load_model:8,check_lr:22,check_numb:33,check_opset_vers:7,checkpoint:32,checkpoint_path:32,child:8,choos:[8,22,24,38],chosen:22,cifar100:12,cifar100dataset:[12,27],cifar10:[12,29],cifar10dataset:[12,27],cifar:[10,11,25,26],cifardataset:27,class_i:27,class_typ:[17,29],class_x:27,classif:[10,11,16,19,24,25,26,28,32,34],classifi:[17,18,29],classificationestimatormodelfn:28,clazz:9,clean:[31,32,33,37],clean_path:33,clean_tensor_nam:32,clear:[22,24],cli:35,client:[28,31,32],clone:36,close:[3,31],closest:[6,9],closur:[22,24],cnn:18,coco:[10,11,18,24],coco_2017_yolo:13,cocodetectiondataset:13,code:[2,3,4,5,6,8,9,10,11,16,22,24,25,26,28,31,32,33,35,37,38],coeffici:[24,32],collat:13,collect:[3,9,22,24,28,31,32,33],collection_nam:31,column:24,com:[8,12,24,27,33,38],combin:[8,9,22,24,31],combo:24,common:[21,22,33],commonli:38,commun:35,compar:[3,6,8,9,22,24,31,32,37],compare_index:[6,9],comparison:[6,9,24],compat:[16,22,28],compil:[24,37],complet:[6,8,22,24,31,37],complete_graph:[31,37],compress:[21,33],comput:[3,7,9,12,13,15,17,18,21,24,27,29,31,32,34],compute_output_shap:3,condit:[24,31],confid:24,confidence_threshold:24,config:[9,22,23,37],configur:[17,18,24,29,33,37,38],connect:[29,30],consid:[6,24],consist:[1,33],consol:22,constant:[3,22,31,32],constant_log:22,constantksmodifi:[22,31,38],constantli:22,constantpruningmodifi:[3,22,31,35],construct:[8,22,24],constructor:[3,9,16,21,22,28,29,31],contain:[3,4,6,8,9,17,21,22,24,26,27,28,31,32,33,37,38],content:[0,35],context:[22,24],continu:[3,8,22,24,31,33],contract:[3,31],contradict:24,contradict_hinton:24,control:[3,8,9,22,24,31,38],conv0:38,conv1:[22,31,37,38],conv1d:32,conv2:[37,38],conv2d:[23,30,31,32],conv2d_1:3,conv2d_5:3,conv2d_block:30,conv3:[37,38],conv3d:32,conv:[6,7,8,17,18,22,23,24,29,30,31,32,38],conv__224:7,conv__252:7,conv_net:[22,31],conv_node_param:8,conveni:[3,8,22,24,31,32,37,38],convers:[8,23,37],convert:[3,8,9,23,24,31,33,37,38],convert_kera:4,convert_model_initializers_to_spars:8,convert_relus_to_fat:21,convert_sparse_initializers_to_dens:8,convert_to_bool:33,convinteg:7,convnd:24,convolut:[8,17,18,22,29,30,32],coordin:[18,24],copi:[21,24],core:[6,8,9],correct:[8,9,22,24],correct_nm_analyze_model_node_id:8,corrected_lr_info:9,correctli:[4,31],correspond:[4,22,24,33],cosineannealingwarmrestart:[9,22],cost:22,could:[4,8,9],couldn:32,count:[22,24,33],counter:[4,24,33],cpu:[6,9,11,22,24,35],creat:[1,2,3,4,5,6,7,8,9,10,11,12,13,16,17,18,21,22,24,25,26,27,28,29,30,31,32,33,35,37,38],create_activ:21,create_dir:33,create_estim:28,create_extra:31,create_graph_ops_prun:31,create_ks_schedule_op:31,create_ks_scheduled_constant_graph_op:31,create_label:8,create_loss:28,create_metr:28,create_metric_update_ops_hook:28,create_modifier_ops_and_update_hook:28,create_op:[3,31,37],create_op_prun:31,create_parent_dir:33,create_predict:28,create_scaffold:28,create_sect:17,create_sparse_tensor:8,create_sparsity_mask:[3,22,31],create_sparsity_mask_from_abs_threshold:22,create_sparsity_mask_from_tensor:22,create_split_iterators_handl:26,create_summaries_prun:31,create_summary_op:28,create_train_summary_hook:28,create_training_op:28,create_unique_dir:33,create_zoo_model:[16,28],creation:[31,37,38],creator:[3,22,26,27,28,31],crop:[13,24,26],cross:[24,32],cross_entropi:24,crossentropyloss:22,crossentropylosswrapp:24,csv:9,cubic:[3,22,31,33],cuda:[22,24],cudnn:24,cumul:24,current:[3,4,6,7,8,9,16,21,22,24,26,28,29,30,31,32,33,35,37,38],curv:33,custom:[21,32,38],custom_op_handl:32,cutoff:22,cwd:[4,24],cycl:[22,31],darknet53:17,darknet:[10,16,18],darknetsectionset:17,data:[1,5,6,7,11,12,13,22,24,26,27,28,33],data_format:30,data_load:[7,8,24],data_loader_kwarg:22,data_shap:8,data_typ:8,dataload:[6,7,8,13,22,24],dataparallel:24,datapararallel:24,dataset:[1,10,16,17,18,22,24,25,28,29,33],dataset_op:26,dataset_s:[12,27,34],datasetregistri:[11,26],datasetv1:26,ddp:24,deal:31,debian:36,debug:4,debug_mod:4,decai:[22,31,32,38],decay_r:[3,31],decay_step:[3,31],decim:[8,22,38],decod:24,decode_output_batch:24,deconstruct_tensor:24,decor:[3,9,11,16,22,24,26,28,31,33],decreas:[22,31],deep:35,deepspars:[6,8,24,33,35,37],deepsparseanalyzemodelrunn:8,deepsparsemodelrunn:8,def_ignore_error_tensor:16,def_model_backward:24,default_box:13,default_context:24,default_dataset:[16,28],default_dataset_path:34,default_desc:[16,28],default_devic:24,default_exponential_check_lr:22,default_image_s:32,default_loss_kei:24,default_model_fn_cr:28,default_onnx_opset:32,default_pruning_sparsities_loss:9,default_pruning_sparsities_perf:9,default_qat_qconfig:23,defaultbox:[13,24],defin:[3,4,6,8,17,18,21,22,24,28,31,32,38],definit:37,delet:8,deliv:35,dens:[8,30],dense_block:30,densiti:[8,24,32],depend:[22,32,36,38],deploy:37,depth:[17,33,37],depthwis:[17,18,29,30,32],depthwise_conv2d_block:30,dequantize_nod:8,dequantizelinear:23,deriv:[3,4,6,22,31],desc:[8,9],desc_arg:16,descend:[21,33],descent:38,describ:[9,17,29],descript:[9,16,22,28,31,33],design:[33,37,38],desir:[8,16,22,23,24,26,28,30,31,32,34,37,38],destin:4,detail:6,detect:[8,10,11,16,17,24,28],detector:[18,24],determin:[8,22,32,33],determinist:24,dev:35,deviat:[3,11,22,24,31,32],devic:[4,22,24,32],device_context:24,device_to_name_id:24,dict:[3,6,7,8,9,11,16,17,18,21,22,24,26,27,28,31,32,33],dictionari:[3,4,6,7,8,9,21,22,24,26,28,31,32,33,38],did:[8,22],differ:[6,9,22,23,24,28,31,32,38],dim:[3,22,24,31],dimens:[3,8,9,21,22,24,31,32,33],dimensionpruningmaskcr:[3,31],dimensionsparsitymaskcr:22,dir:[4,24],direct:8,directli:22,directori:[4,7,24,28,32,33],disabl:[22,24,38],disable_bn_fus:24,disable_quantization_observer_epoch:22,disclaim:8,disk:[11,12,33],displai:[6,7,8,9],distanc:8,distil:24,distribut:[6,9,11,12,13,24,27],distributeddataparallel:24,diverg:8,divid:[3,22,31,32],divis:32,doc:[3,4,8,9,22,31,33],doc_str:4,document:[35,37],doe:[6,7,8,12,13,22,23,24,27,31,32,33,34,38],doesn:[3,11,22,31,33],dog:12,doing:[3,9,22,24,31],domain:[16,28],domainadapt:24,done:[24,37,38],doubl:17,down:[17,21,29],download:[12,13,27,34,37],download_root:34,downsampl:[17,29],downsample_out_channel:17,driven:35,drop:24,dropout:[17,30,32],dropout_r:30,dtype:[3,8,31,32],due:8,dure:[4,7,22,24,28,31,38],dynam:[7,8,21],dynamicquantizelinear:7,e2e_batch_second:24,e2e_batch_tim:24,e2e_batches_per_second:24,e2e_item_second:24,e2e_items_per_second:24,e2e_sec:24,each:[3,4,6,7,8,9,13,17,18,22,24,27,29,31,33,37,38],earli:[11,24],earlier:[17,24],early_stop:11,early_stop_data_load:24,early_stop_step:24,earlystopdataset:11,eas:37,easi:[35,37],easiest:38,easili:[3,9,11,16,22,26,28,31,38],ecosystem:35,edg:[8,33],edge_perc:33,edit:[2,5,8,10,25,31,37],editor:32,effect:[3,4,9,22,31],efficientnet:[10,16],efficientnet_b0:17,efficientnet_b1:17,efficientnet_b2:17,efficientnet_b3:17,efficientnet_b4:17,efficientnet_b5:17,efficientnet_b6:17,efficientnet_b7:17,efficientnetsectionset:17,either:[3,6,8,23,24,30,32,33,38],element:[3,24,31,33],els:[3,8,9,21,24,30,31,33],empti:[9,21,22,31],emul:[22,38],enabl:[3,9,17,22,23,24,31,35,37,38],enable_aux:17,encapsul:31,enclos:3,encod:[13,24,37,38],encode_annotation_bounding_box:13,encode_image_box_label:24,end:[3,4,9,17,22,24,29,31,32,38],end_compar:[3,9,22,31],end_epoch:[3,9,22,31,37,38],end_pend:22,end_point:32,end_step:31,enforc:[3,8,9,21,22,24,31,38],engin:[3,4,6,8,24,35,37],enhanc:3,ensur:7,entir:[3,18,22,31],entri:22,entropi:[8,24,32],enumer:38,environ:36,epoch:[3,4,9,22,24,31,35,37],epoch_end:22,epoch_start:22,epochrangemodifi:[3,22,31,37,38],epsilon:8,equal:[3,8,9,21,22,31,32,33],equat:7,equival:24,err:[3,22,31],error:[3,16,17,18,22,31,33],error_desc:33,estim:[1,25,26,31,35],estimatormodelfn:28,etc:[6,9,16,17,18,22,24,28,30,31,33],eval:[22,31],eval_tensor_dens:32,eval_tensor_spars:32,evalu:[4,8,22,24,28,32],even:32,evenli:[3,22,24,31],event:[8,22,31,33],everi:[6,11,17,18,21,22,24,31,37,38],everyth:24,exactli:[3,21,22,31],exampl:[3,7,8,22,31,32,36,37,38],exce:[3,22,31],except:[7,13,22,24,32],excit:[17,21],exclud:7,exclude_nod:7,execut:[8,9,22,24,31,32],execution_ord:9,exist:[22,24,32,33,34],exp:21,exp_channel:[17,29],exp_count:[4,24],exp_ratio:[17,29],expand:[17,21,29,33],expanded_channel:21,expans:[17,29],expansion_ratio:17,expect:[3,6,8,9,17,18,22,24,26,29,31],explor:[36,37],expon:[3,31],exponenti:[21,22,31],exponential_lr_schedul:31,exponentialdecai:[3,31],exponentiallr:[3,9,22,31,38],export_checkpoint:32,export_dir:[24,33],export_h5:4,export_kera:4,export_named_sampl:32,export_onnx:[4,24,32,37],export_pb:[32,37],export_pytorch:24,export_sampl:[4,24,32],expos:8,ext:27,extend:9,extens:33,extern:[1,10,16],extra:[3,4,6,9,13,21,22,24,28,31,32,37],extra_opset:32,extra_repr:21,extract:[7,8,18,22,24,33,34],extract_node_id:8,extract_node_shap:8,extract_nodes_shapes_ort:8,extract_nodes_shapes_shape_infer:8,extract_shap:8,extracted_root:34,extractor:[17,18],extrat:18,extrem:[6,9],factor:24,fake:23,fall:33,fals:[3,6,7,8,9,12,13,16,17,18,21,22,23,24,27,28,29,30,31,32,33,34],far:24,fast:18,fastai:[12,27],faster:[22,31,35],fat:21,fat_exp_relu:21,fat_pw_relu:21,fat_relu:21,fat_sig_relu:21,fatrelu:[1,10,22],featur:[17,18,24,26,32,35,37],feature_map:24,fed:24,feed:[4,24,26,31,32],feed_dict_cr:31,few:[32,35,37],field:[8,12,13,14,15,17,18,21,23,24,27,29,31,34],figur:[6,9,22,32],file:[1,3,4,6,7,8,9,16,18,22,24,27,28,31,32,33,34,37,38],file_path:[3,9,22,27,31,33],filepath:7,filewrit:32,fill:37,filter:[3,22,31],final_lr:22,final_spars:[3,22,31,37,38],final_v:22,find:[8,12,13,24,27,32],find_weight_data:7,fine:[8,22,38],first:[6,8,9,18,22,24,32,33,37],fit:37,fit_gener:37,fix:24,fix_data_parallel:24,flatten:[12,33],flatten_iter:33,flexibl:37,flip:13,float16:24,float32:[7,24,37],float64:32,flop:[6,8,9,22],flow:[8,31],fold:8,fold_conv_bn:8,foldabl:8,foldable_nod:8,folder:[12,13,27],follow:[3,7,8,12,22,24,31,32,33,37,38],footprint:[22,35],forc:[7,21],force_fus:7,form:[12,27,33],format:[1,3,4,6,7,9,13,22,24,31,32,33,37,38],format_iterator_batch:[26,27],format_repr:9,format_str:9,former:[17,18,21],formula:[3,22,31],forward:[3,17,18,21,22,24,31],found:[3,6,8,9,12,13,16,17,18,21,22,23,24,27,29,31,32,34,35,37],fp32:8,fraction:[3,9,22,24,31,32,38],framework:[0,1,2,3,4,5,6,9,10,18,22,24,25,26,27,28,29,30,31,32,37,38],free:3,freez:24,freeze_bn_stats_epoch:22,frequenc:[3,4,22,31],from:[1,3,4,6,7,8,9,11,12,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,37,38],from_dict:[6,9],from_model_random:8,from_random:8,from_sparse_model:22,from_train:32,from_yaml:[3,22,31,37],front:[33,37,38],frozen:[22,38],full:[3,7,9,12,22,24,27,31,32,34],full_precis:24,fulli:[29,30,38],func:[6,9,12,22,27,31],func_get:[3,9,22,31],func_set:[3,9,22,31],further:[17,29],fuse:[7,22,23,24],fuse_dynamic_qu:7,fuse_modul:22,fuse_module_conv_bn_relu:[22,23],fusion:[7,23],futur:8,gama:29,gamma:[22,30,31,37,38],gamma_initi:[29,30],gan:32,gather:[9,21],gemm:[6,7,8],gemm_node_param:8,gen:33,gener:[1,4,6,7,8,9,10,18,21,22,24,26,31,32,33,34,35,37,38],generate_augmented_model:7,get:[3,6,8,9,22,24,26,28,31,32,33,34,38],get_anchor_grid:24,get_attr_float_val_for_nod:8,get_available_provid:8,get_batch_norm_param:8,get_conv_lay:24,get_default_boxes_300:24,get_default_graph:[31,32],get_default_sess:32,get_feature_extractor:18,get_gan_network_fn:32,get_grid:24,get_group:31,get_grouping_fn:22,get_grouping_op:[3,31],get_init_by_nam:8,get_input:24,get_kernel_shap:8,get_label:24,get_lay:24,get_layer_name_from_param:3,get_layer_param:24,get_linear_lay:24,get_main_logg:1,get_mask_initi:[3,31],get_model_input_nam:7,get_model_scop:32,get_named_layers_and_params_by_regex:24,get_network_fn:32,get_nm_root_logg:1,get_nod:6,get_node_attribut:8,get_node_by_id:8,get_node_input:8,get_node_input_nod:8,get_node_output:8,get_node_output_nod:8,get_node_param:8,get_nodes_by_input_id:8,get_nodes_by_output_id:8,get_numpy_dtyp:8,get_op_input_var:32,get_op_var_index:32,get_ops_and_inputs_by_name_or_regex:32,get_optim_learning_r:24,get_or_create_global_step:31,get_or_create_graph_ops_prun:31,get_or_create_ks_schedule_op:31,get_or_create_ks_scheduled_graph_op:31,get_output_grid_shap:24,get_pr:24,get_prunable_lay:24,get_prunable_nod:8,get_prunable_node_from_fold:8,get_prunable_op:32,get_qat_qconfig:23,get_quantization_param:23,get_quantization_params_dict:7,get_quantize_parent_for_dequantize_nod:8,get_recall_level:24,get_result:[6,9],get_tensor_var:32,get_terminal_lay:24,get_threshold:21,getter:[3,9,22,31],giou:24,github:[8,12,24,27,35],give:[8,16,18,22,24,27,28,30,38],given:[3,4,6,7,8,9,11,13,16,17,21,22,23,24,26,27,28,29,30,31,32,33,38],glob:[8,33],global:[4,22,24,31,32],global_avg:30,global_step:[3,31],global_variables_initi:[31,37],glorotuniform:[29,30],gmp:38,gmpruningmodifi:[3,22,31,37],goe:[22,24,31],gpu:24,grab:[8,22,24,32],grad:22,grad_scal:24,gradient:[22,24,38],gradscal:24,gradual:[3,22,31,37,38],gradualksmodifi:[22,31,38],gradualparammodifi:22,grain:38,granular:8,graph:[3,6,7,8,22,23,24,26,27,28,29,30,31,32,37],graph_editor:[1,5],graph_optim:[1,5],graphexport:[32,37],graphkei:32,greater:[3,9,22,24,31],grid:24,grid_shap:24,ground:[24,28],ground_truth_annot:24,group:[3,8,9,17,22,24,30,31,33],group_idx:24,group_tensor:[3,22,31],groupedpruningmaskcr:[3,22,31],grouping_fn_nam:22,grouping_op_nam:[3,31],grouplearningratemodifi:31,guarante:[8,22],guid:[24,32],hack:22,had:24,half:[24,29,38],han_mobilenet:17,hand:[37,38],handl:[1,3,4,6,8,9,11,12,22,24,26,27,31,33,37,38],handler:32,happen:[4,22],hard:[24,38],hard_swish:21,hardcod:8,hardswish:21,has:[3,6,7,8,9,11,22,31,32,38],has_baselin:[6,9],have:[3,8,11,16,19,22,23,24,28,30,31,32,38],hdf5:4,head:18,height:[24,26,32],help:[1,4,24,35],helper:[0,1,4,5,6,10,11,22,25],here:[3,12,13,17,18,21,22,27,29,34,37],hidden:17,hidden_channel:17,higher:31,highest:33,highli:35,hinton:24,his:37,histogram:24,hold:[3,22,31],hook:[17,18,21,22,24,28,31],horizont:13,host:35,how:[3,6,8,9,17,22,24,29,31],howev:[3,22,37,38],http:[8,12,17,22,24,27,33,38],human:[6,9],hyper:24,id_:[6,9],id_or_nam:[6,9],ident:[7,8],identif:[4,24],identifi:[4,6,9,24,31],ides:24,ids:[8,24],ignor:[16,17,18,21,24,28,31,33],ignore_error_tensor:[16,17,18,24],iin:8,imag:[12,13,17,24,26,27,29,32,34],image_s:[12,13,24,26,27],imagefold:[10,11,25,26],imagefolderdataset:[12,27],imagenet:[1,10,11,17,18,19,25,26,28,33],imagenet_norm:27,imagenetdataset:[12,27],imagenett:[1,10,11,25,26,33],imagenettedataset:[12,27],imagenettedownload:[12,27,34],imagenettes:[12,27,34],imagewoof:[12,27,34],imagewoofdataset:[12,27],imagewoofdownload:[12,27,34],imagewoofs:[12,27],img:[6,9,27],immedi:[3,9,22,31],impl:26,implement:[3,4,6,8,9,12,13,17,18,21,22,23,24,26,27,29,31,33,34,37,38],impos:[8,22,31],imposed_k:8,improv:22,in_chan:30,in_channel:17,incept:[17,24],inception_v3:[10,16],inceptioncrossentropylosswrapp:24,inceptionv3:17,inclin:38,includ:[3,6,8,22,23,24,30,31,33,35,38],include_bia:30,include_bn:30,include_modifi:24,include_nod:7,include_target:23,include_valu:8,inclus:24,incom:24,increas:[3,9,21,22,31,33,38],indefinit:[26,33],independ:22,index:[3,4,6,9,11,18,22,24,28,31,32,33,38],indic:[3,17,22,24,30,31,32],individu:[6,8,9,22,31],induc:[3,22,31],infer:[6,8,24,29,35,37],inferencesess:8,infinit:8,infinite_data_load:24,info:[1,4,6,8,9,12,13,17,22,24,27,29,33,34],inform:[3,4,6,8,9,18,21,22,24,35,37],inherit:[3,9,22,31],init:22,init_lr:[3,9,22,31,37,38],init_nam:8,init_op:[29,30],init_sect:[17,29],init_spars:[3,22,31,37,38],init_v:22,initi:[3,7,8,9,17,21,22,23,29,30,31,32,38],initial_learning_r:[3,31],initialize_logg:22,initialize_sess:31,inject:31,inp:[17,18,21,22],inp_dict:32,inp_tensor:32,inp_val:32,inplac:[8,21,23],input1:8,input2:8,input:[3,4,6,7,8,9,12,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,37],input_batch:7,input_fn:26,input_func:22,input_id:8,input_nam:[6,7,37],input_op:31,input_qtyp:7,input_shap:[3,6,8,9,16,24,28],input_tensor:3,inputs_sampl:22,inputs_sample_max:22,inputs_sample_mean:22,inputs_sample_min:22,inputs_sample_s:22,inputs_sample_std:22,inputs_spars:22,inputs_sparsity_max:22,inputs_sparsity_mean:22,inputs_sparsity_min:22,inputs_sparsity_std:22,insid:[37,38],instal:[19,35],instanc:[3,4,6,8,9,17,18,21,22,24,26,28,31,32,33],instanti:[11,16,26],instead:[3,7,8,9,17,18,21,22,24,31,32,38],instruct:[37,38],int8:[7,38],integ:[3,4,7,8,9,22,31,32,33],integerop:7,integr:[6,9,19,31,33,35,37],intend:37,intens:11,inter_func:[3,22,31,33],interact:24,interfac:[33,35],intermedi:[7,8,32],intern:28,interpol:[3,21,22,31,33],interpolate_list_linear:33,interpolated_integr:33,intersect:24,interv:[22,31,38],intial:7,intput:32,intro:35,introduc:[6,8,9,22],invers:24,inverse_cub:[3,22,31,33],invert:[17,29],invoc:[26,31],invok:37,invoke_batch_backward:24,invoke_batch_end:24,invoke_batch_forward:24,invoke_batch_loss:24,invoke_batch_start:24,iou:24,iou_step:24,iou_threshold:24,irregular:22,is_activ:21,is_after_end_step:31,is_foldable_nod:8,is_parallel_model:24,is_prunable_nod:8,is_prunable_op:32,is_pruning_step:3,is_train:32,is_url:33,issu:[17,18],item:[8,11,13,22,24,33],iter:[3,6,7,8,11,22,24,26,27,31,33],iter_batch:[26,27],iter_step:8,iterations_per_check:6,iters_sleep_tim:6,its:[3,4,7,8,9,13,21,22,24,31,32,33,37,38],itself:22,jekyllrb:33,join:37,json:[6,9],just:[13,24],kd_set:24,kdlosswrapp:24,kdset:24,keep:[3,6,8,9,22,24,28,31,33,36],keep_param:8,kei:[3,6,8,9,11,16,19,21,22,24,26,28,31,33],kept:38,kera:[0,1,35],keras2onnx:37,keraslogg:[3,4],kerasmodifieryaml:3,kernel:[3,6,8,9,17,22,29,30,31],kernel_initi:[29,30],kernel_s:[17,30],kernel_shap:8,keyword:[3,9,16,22,28,31],kl_diverg:[6,8],knowledg:24,known:22,ks_group:31,ks_layer_desc:22,ks_loss_sensitivity_op_var:31,kslosssensitivityanalysi:[6,9],kslosssensitivityresult:[6,9],ksperfsensitivityanalysi:[6,9],kssensitivityprogress:6,kwarg:[3,4,6,8,9,11,16,21,22,26,28,31],lab:24,label:[4,8,13,24,26,27,28,32,33],label_shap:8,labeled_data:8,larg:[22,35],larger:[22,24,31,38],last:[17,21,22,32],later:[3,9,22,31],latter:[17,18,21],layer1:22,layer2:22,layer:[1,3,4,6,8,9,17,18,21,22,23,24,25,29,31,32,38],layer_desc:22,layer_nam:[3,21,22,24],layer_norm:22,layerboostresult:22,layerwis:8,lead:31,learn:[3,9,22,24,28,31,37],learning_r:[0,1,3,22,31,38],learningr:[3,9,22,31],learningratemodifi:[3,22,31,37],least:[22,28],leav:[13,22],leave_en:[3,22,31,38],left:[24,30],len:37,length:[11,24],less:[3,9,22,31,38],lesser:38,lev:22,level:[1,3,6,8,9,11,17,22,24,31,38],librari:[35,37],life:31,lifecycl:[22,24],lifetim:4,like:[3,6,9,21,22,24,31,32,36,37,38],limit:[8,22,38],line:[9,21,31,35,37],linear:[3,6,7,22,23,24,31,33],linearli:[7,33],linux:36,list:[3,4,6,7,8,9,11,13,16,17,18,21,22,24,26,28,29,31,32,33,37,38],lite:18,littl:37,load:[3,6,7,8,9,11,16,17,18,22,24,26,27,28,31,32,33],load_desc:9,load_epoch:24,load_framework_list:9,load_framework_obj:9,load_json:[6,9],load_labeled_data:33,load_list:[3,22,31],load_manag:22,load_manager_state_dict:22,load_mask_cr:[3,22,31],load_model:24,load_numpi:33,load_obj:[3,22,31],load_optim:24,load_pretrain:28,load_recipe_yaml_str:33,load_state_dict:[21,22],load_strict:[16,17,18],loader:[8,11,13,24],local:[4,12,24,27,32,34],local_rank:24,locat:[22,24,27,37],log:[0,3,4,6,22,24,31,33,35],log_dir:4,log_histogram:24,log_histogram_raw:24,log_hyperparam:24,log_nam:24,log_path:24,log_scalar:[4,24],log_step:24,log_summari:24,log_typ:[3,9,22,31],log_upd:22,logger:[1,2,3,9,10,22,31,33],loggers_initi:22,loggersettingcallback:4,loggingmod:4,logic:[3,33],logit:[17,24,29,32,37],longer:22,look:[24,33,37,38],lookup:31,loop:22,loss:[1,4,5,6,9,10,13,22,25,28,31,38],loss_fn:[22,24],loss_kei:22,loss_measur:9,loss_tensor:31,loss_upd:22,lossesandmetricsloggingcallback:4,losswrapp:[22,24],lower:[8,22],lowest:[8,22,31,33],lr_class:[3,9,22,31,37,38],lr_kwarg:[3,9,22,31,37,38],lr_loss_sensit:22,lr_modifi:31,lr_mult:22,lrelu:21,lrlosssensitivityanalysi:[9,22],lrs:22,ltrb:[13,24],made:[3,8,22,24,31],magic:[2,5,6,8,10,25,35],magnitud:[3,8,22,31,37,38],mai:[22,24,38],main:1,make:[3,9,16,22,24,31,37],make_one_shot_iter:26,manag:[0,1,2,10,24,25,28,37],manager_state_dict:22,mani:22,manual:[22,24],map:[7,8,9,21,22,24,26,31,33],map_loc:[22,24],mark:[3,9,22,31,38],markdown:[33,37,38],mask:[3,22,24,31],mask_creat:[3,22,31],mask_creator_prun:[1,10,25],mask_differ:24,mask_prun:[1,2,10,25],mask_pruning_cr:[1,2],mask_typ:[3,22,31,37,38],mask_updat:3,maskedlay:3,master:8,match:[3,6,7,8,9,11,18,21,22,24,31,32,33,38],matmul:[7,8,31,32],matmul_node_param:8,matmulinteg:7,matplotlib:[6,9],matter:[33,37,38],max:[3,7,22,24,26,30,31,33],max_available_cor:8,max_bin:24,max_detect:24,max_epoch:9,max_node_dist:8,max_source_s:33,max_step:8,max_target_metric_loss:22,max_val:[21,24],maxim:6,maximum:[6,7,8,24,33],mdoel:8,mean:[3,6,8,9,11,12,22,24,27,31],meanaverageprecis:24,meant:[9,33],measur:[6,8,9,22,24,31,33],memori:[8,11,21,22,24,26,33],merg:[9,33],merge_desc:9,meta_canonical_nam:8,metaclass:33,metadata:6,method:[3,8,9,21,22,24,31,37],metric:[4,22,24,28],metric_increas:22,metric_kei:22,metrics_dict:28,metrics_initializers_dict:28,metricupdateopshook:28,microsoft:8,middl:38,might:3,mileston:[22,31,37,38],milestone_step:31,min:[3,22,24,26,31,33],min_end:[3,9,22,31],min_epoch:9,min_frequ:[3,9,22,31],min_start:[3,9,22,31],min_val:[21,24],min_valu:8,mine:24,minim:[6,28],minimum:[3,7,8,9,22,24,31,33],miss:[3,21,22,31],missing_kei:21,mix:24,mnist:[10,11,16,18,25,28,37],mnist_net:[17,29,37,38],mnistdataset:12,mnistnet:17,mobilenet:[10,16,18,25,28,32],mobilenet_const:29,mobilenet_v1_arg_scop:32,mobilenet_v2:[10,16,19,25,28],mobilenet_v2_const:29,mobilenet_v2_width:[17,29],mobilenetsect:29,mobilenetsectionset:17,mobilenetv1:32,mobilenetv2:[17,29],mobilenetv2sect:29,mobilenetv2sectionset:17,mod_extra:[31,37],mod_op:[31,37],mode:[4,7,21,22,28,29,30,31,32],model:[1,2,3,5,6,7,9,10,11,13,21,22,23,25,26,31,32,33,35,37,38],model_aug:7,model_backward:24,model_batch_second:24,model_batch_tim:24,model_batches_per_second:24,model_const:28,model_dir:28,model_fn:31,model_fn_nam:19,model_fn_param:28,model_forward:24,model_fuse_fn_nam:22,model_input:8,model_item_second:24,model_items_per_second:24,model_nam:32,model_output:[8,24],model_prunability_magnitud:22,model_quantize_qat_export:38,model_sec:24,model_to_devic:24,modelanalyz:6,modelexport:[4,37],modelproto:[6,7,8,23],modelregistri:[16,28],modelrunn:8,moder:[16,28,33],modestli:22,modif:[22,37,38],modifi:[0,1,2,4,8,10,23,24,25,28,32,35,37],modifier_a:[1,10],modifier_epoch:[1,2,10,25],modifier_idx:22,modifier_lr:[1,2,10,25],modifier_manag:28,modifier_param:[1,2,10,25],modifier_prun:[1,2,10,25],modifier_quant:[1,10],modifier_regular:[1,10],modifierprop:[3,9,22,31],modifiers_to_string_lin:[9,31],modifiersessionrunhook:[28,31],modifieryaml:[3,9,22,31],modify_estim:[31,37],modoel:29,modul:[0,35],moduleanalyz:22,moduleasanalyz:22,moduleasoneshootboost:22,modulebenchmark:24,moduledevicecontext:24,moduleexport:[24,37],moduleparampruningmask:22,modulepruninganalyz:22,modulerunfunc:[22,24],modulerunhook:24,modulerunn:24,modulerunresult:[22,24],moduletest:[22,24],moduletrain:[22,24],momentum:[8,22],monitor:[6,22],monitored_sess:28,more:[6,8,12,13,21,22,27,33,34,37,38],most:[22,32,37,38],move:[6,17,22,31,32],much:[6,9,22,24,31],multi:[3,9,17,21,22,29,31,33],multi_step_lr_schedul:31,multibox:24,multipl:[3,7,9,22,24,31,33,38],multipli:[17,22,29,31,38],multisteplr:[3,9,22,31,37,38],must:[3,4,8,9,11,19,21,22,23,24,28,31,33,34,37,38],n_box:24,name:[3,4,6,7,8,9,11,16,18,21,22,24,26,27,28,29,30,31,32,33,34,37,38],name_or_regex_pattern:[24,32],name_prefix:[24,33],name_scop:[26,27],named_modul:[22,23],namedlayerparam:24,namedtupl:21,namespac:1,nativ:[37,38],nbit:7,ndarrai:[7,8,24,32,33],necessari:[3,7,8,24,31,37],need:[3,17,18,21,22,31,37,38],neg:[21,24],nest:33,net:[28,30,32],net_output:28,nets_factori:32,nets_util:[1,25],network:[6,8,9,17,18,21,22,29,30,32,35,37],network_fn:32,network_input_shap:8,neural:[2,5,6,8,9,10,21,22,25,30,35,37],neuralmag:38,neuralmagicml:38,never:[3,9,22,24],new_mask:24,new_quantized_nam:7,newli:24,next:[4,26],nightli:35,nlp:[16,28],nm_conditional_upd:31,nm_dataset:[12,13,27],nm_k:31,nm_ks_op:31,nm_mask:31,nm_mask_assign:31,nm_mask_upd:31,nm_mask_update_no_op:31,nm_masked_var:31,nm_prune_vars_assign:31,nm_result:8,nm_root:1,nm_save:31,nm_sparsiti:31,nm_threshold:31,nm_update_readi:31,nm_weight_upd:31,nms:24,no_fus:22,no_serialize_v:[3,9,22,31],node:[6,7,8,9,23],node_id:8,node_shap:6,nodeanalyz:6,nodearg:8,nodeparam:8,nodeproto:[7,8,23],nodes_to_exclud:7,nodes_to_quant:7,nodeshap:[6,8],nois:[6,9,11],noisydataset:11,non:[3,8,22,24,31,32],none:[3,4,6,7,8,9,12,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,37],nonzero:[3,31],norm:[8,17,22,24,29,30,32],normal:[6,8,9,11,13,22,27,29,32,37],normalizer_fn:32,note:[3,8,11,21,22,24,28,31,33,37,38],notebook:36,noth:[8,24],now:[7,33],npy:[24,33],npz:[7,24,33],nsdf3:[12,27],nthread:8,num:21,num_accumulated_batch:24,num_anchor:24,num_anchor_grid:24,num_block:[17,29],num_bucket:33,num_channel:21,num_class:[12,17,18,24,27,29,32],num_cor:[6,8,9],num_default_box:24,num_featur:32,num_imag:27,num_iter:8,num_parallel_cal:26,num_recall_level:24,num_sampl:8,num_train_batch:37,num_upd:31,num_val:24,num_warmup_iter:8,num_work:[11,33],number:[3,4,6,7,8,9,11,17,18,21,22,23,24,26,27,29,30,31,32,33,37,38],numer:[3,31],numpi:[4,7,8,24,32,33],numpyarraybatch:33,obj:[3,22,31],object:[3,4,6,7,8,9,11,13,16,17,18,22,23,24,26,28,29,30,31,32,33,34,37],observ:[22,23],obtain:8,occur:33,off:[3,9,12,22,31],offer:[3,31],offici:[35,37],offset:[13,24],old:24,old_mask:24,omit:[17,32],on_epoch_begin:4,on_epoch_end:4,on_predict_batch_begin:4,on_predict_batch_end:4,on_predict_begin:4,on_predict_end:4,on_test_batch_begin:4,on_test_batch_end:4,on_test_begin:4,on_test_end:4,on_train_batch_begin:4,on_train_batch_end:4,on_train_begin:4,on_train_end:4,onc:[3,8,9,22,31,37,38],one:[3,6,7,8,17,18,21,22,24,28,30,31,32,33,38],one_shot_ks_loss_sensit:31,ones:[3,21,22,31],onli:[3,7,8,9,11,13,21,22,24,28,31,33,37,38],only_serializ:9,onnx:[0,1,4,22,23,24,32,35,38],onnx_fil:[7,8],onnx_nodes_spars:8,onnx_onnx_rel_1_6_ml_pb2:[6,7,8,23],onnx_path:32,onnx_runtime_graph_optim:8,onnxquant:7,onnxruntim:[6,8],onto:[22,24,31],oop:21,op_cond_upd:31,op_input:[31,32],op_mask_assign:31,op_mask_upd:31,op_mask_update_no_op:31,op_masked_var:31,op_nam:31,op_prune_vars_assign:31,op_sav:31,op_spars:31,op_ten:31,op_typ:[6,7,8,31],op_update_readi:31,op_var:31,op_weight_upd:31,openvino:8,openvinomodelrunn:8,oper:[3,6,7,8,9,21,23,24,28,30,31,32],ops:[3,4,7,8,22,23,26,27,28,29,30,31,32,37],ops_input:31,ops_schedul:31,ops_spars:31,ops_summari:31,ops_upd:31,opset:[4,7,24,32],optim:[0,1,2,5,8,10,16,17,18,24,25,28,33,35],optim_categori:33,optim_closur:24,optim_full_nam:33,optim_nam:33,optim_target:33,optimization_level:[6,8],optimizer_post_step:22,optimizer_pre_step:22,optimizer_v2:3,optimizers_post_step:22,optimizerv2:3,option:[3,4,6,7,8,9,12,13,16,17,18,21,22,24,26,27,28,29,30,31,32,33,38],order:[6,7,9,22,33,36],ordereddict:33,org:[17,22],org_model:7,orig:[9,26],origin:[3,7,8,9,11,12,13,17,22,24,27,31,32,34],ort:8,ortmodelrunn:8,other:[1,3,6,8,9,18,22,24,31,32,33,38],otherwis:[3,6,8,9,12,13,16,17,21,22,23,24,26,27,29,30,31,32,33,34],ouput:8,out:[3,6,9,17,18,22,24,29,31,32,37],out_chan:30,out_channel:[17,18,29],out_dict:32,out_tensor:32,output:[3,4,6,7,8,9,12,13,17,18,21,22,23,24,26,27,28,29,30,32,33,37],output_block:17,output_dir:[4,24,32,37],output_edg:7,output_func:22,output_id:8,output_model_path:7,output_nam:[6,37],output_shap:[6,8,9],outputs_sampl:22,outputs_sample_max:22,outputs_sample_mean:22,outputs_sample_min:22,outputs_sample_s:22,outputs_sample_std:22,outputs_spars:22,outputs_sparsity_max:22,outputs_sparsity_mean:22,outputs_sparsity_min:22,outputs_sparsity_std:22,outsid:[22,31,33],over:[3,8,21,22,24,31,38],overal:[6,8,9,22,24],overrid:[8,17,18,22,24,28,31,32,37],overridden:[17,18,21,22],override_model_batch_s:8,overwrit:[8,21],overwrite_input_nam:8,overwritten:[22,31],own:[4,21,24,33,38],pack:7,packag:[0,35,37],pad:[6,26,30],pair:[24,32],paper:[17,18,21,22,24,29],parallel:[22,24,26,33],parallelize_model:24,parallelwork:33,param:[3,6,8,9,17,18,22,23,24,28,31,32,33,35,37],param_data:22,param_grad:22,param_group:22,param_init:22,param_mask:22,param_nam:[7,8,22,24],param_spars:22,param_sparsity_dim:22,param_unmask:22,paramet:[1,3,4,6,7,8,9,11,12,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,34,38],params_count:8,params_dim:9,params_strict:[3,22,24,31],params_zero_count:8,parent:[9,33],pars:[23,26,37,38],parse_optimization_str:33,part:[7,24],particular:[8,24],pass:[3,4,6,8,9,17,18,21,22,24,28,31,32,33,37],path:[3,4,6,7,8,9,16,17,18,22,24,27,28,31,32,33,34,37],path_file_count:33,path_file_s:33,pattern:[3,8,22,24,31,32,33,38],pb_path:32,pb_to_onnx:32,penalti:[22,38],pend:22,per:[3,7,17,18,21,22,24,31,37,38],per_channel:7,percent:33,percentag:[22,33,38],perf:[6,9,16,17,18,28],perform:[1,2,5,6,7,8,9,10,13,17,18,21,22,23,24,25,35,38],period:[3,22,31,38],permiss:32,persist:21,physic:[6,8,9],pick:32,piecewis:21,pil:[13,24],pip:36,pipelin:[13,35,38],pixel:24,place:[3,8,21,22,23,24],placehold:37,plot:[6,9],plot_integr:[6,9],plot_loss_kei:9,plugin:[3,9,22,31],png:12,point:[3,7,8,9,13,17,18,22,23,24,31,38],pool2d:30,pool:[30,33],pool_siz:30,portion:38,posit:[8,24,28,33],possibl:[8,33,37],post:[7,24],post_resize_transform:27,postprocess_yolo:24,postprocessing_fn:24,potenti:24,power:38,pre:[7,13,22,28,37,38],pre_resize_transform:27,precis:[22,24,38],preconfigur:[17,18,28],pred:24,predict:[4,8,24,28,31],predicted_box:24,predicted_l:24,predicted_label:24,predictor:24,prefetch:26,prefetch_buffer_s:26,prefix:[3,22,24,31,32,33,38],prelu:21,prepare_qat:22,prepopul:[6,9],preprocess_for_ev:27,preprocess_for_train:27,preprocessing_typ:13,present:[8,33],preserv:[24,38],pretrain:[16,17,18,28,33],pretrained_backbon:18,pretrained_dataset:[16,17,18,28],pretrained_path:[16,17,18,28],pretrained_path_backbon:18,previou:[6,8,9],previous:[6,22,24,31],primit:33,print:[4,6,9,21,24],print_r:[6,9],prior:22,probabl:13,process:[3,6,7,8,9,13,22,24,26,27,31,33,37,38],process_batch:7,processor:[26,27],profil:22,programmat:22,progress:[6,7,22,24,31],proj_channel:[17,29],project:[17,29,33],promot:22,prop:[3,9,22,31],propag:24,proper:[8,22,24,30,31,32],properli:[9,13,33],properti:[3,4,6,7,8,9,12,13,18,21,22,23,24,27,31,32,33,34],proport:24,proto:8,protobuf:37,provid:[3,7,8,13,16,17,18,22,24,28,32,37,38],prunabl:[3,6,8,9,22,24,31,32],prunable_equation_sensit:6,prunable_lay:3,prunable_param:[6,9],prunable_params_dim:9,prunable_params_zero:6,prune:[3,6,8,9,16,22,28,31,33,35,37],prune_model_one_shot:8,prune_model_one_shot_it:8,prune_op_var:31,prune_unstructur:8,pruned_lay:3,pruning_loss_sens_approx:6,pruning_loss_sens_magnitud:[6,22,31],pruning_loss_sens_magnitude_it:6,pruning_loss_sens_one_shot:[6,22,31],pruning_loss_sens_one_shot_it:6,pruning_loss_sens_op_var:31,pruning_op_var:31,pruning_perf_sens_one_shot:6,pruning_perf_sens_one_shot_it:6,pruning_schedul:3,pruning_var:3,pruninglosssensitivityanalysi:[6,9,22,31],pruningmaskcr:[3,22,31],pruningopvar:31,pruningperfsensitivityanalysi:[6,9],pruningschedul:3,pruningscop:31,pruningsensitivityresult:[6,9],pth:[22,24],pull:[31,33],push:24,put:[6,9,17,22,24,26,29,31],pypi:35,python:[3,4,8,24,26,27,28,29,30,31,32,33,36],pythonlogg:[4,24],pytorch:[0,1,28,29,35,38],pytorchlogg:[22,24],pytorchmodifieryaml:22,qat:[22,23,38],qconfig:23,qlinear:7,qlinearconv:7,qlinearmatmul:7,qlinearop:7,qtype:7,quantiz:[5,6,8,10,22,35],quantization_mod:7,quantization_param:7,quantizationmod:7,quantizationmodifi:[22,38],quantizationparam:23,quantize_data:7,quantize_model:7,quantize_model_post_train:[5,6],quantize_qat_export:[10,22,38],quantize_rang:7,quantize_resnet_identity_add_input:8,quantize_torch_qat_export:23,quantized_data:7,quantized_model:8,quantized_value_typ:7,quantizediniti:7,quantizedvalu:7,quantizedvaluetyp:7,quantizelinear:23,quantizerd:38,quantwrapp:23,queue:33,quick:35,quickli:38,rais:[3,7,8,9,17,18,22,24,31,32,33],raise_on_error:33,rand_crop:27,rand_tran:[12,13,27],randn:37,randndataset:11,random:[8,11,24,26],random_flip_left_right:27,random_flip_up_down:27,random_horizontal_flip_image_and_annot:13,random_scaling_crop:[26,27],randomcrop:[12,13,27],randomhorizontalflip:[12,13,27],randomli:[13,22,26],rang:[3,6,9,22,24,31,33,38],rank:[3,22,24,31],rate:[3,9,22,24,30,31,35,37],ratio:[17,24,26,29],ratio_rang:26,reach:[3,22,24,31],read:[23,32,37],readabl:[6,9],readi:[3,9,22,31],real:7,reappli:22,reason:[6,9,33],recal:24,recal_upd:31,recalibr:[3,6,9,22,31],receiv:22,recent:22,recip:[17,18,21,33,35,37],recogn:32,recommend:[10,11,16,36],record:[6,9,22,24,31],recov:38,recreat:[3,9,22,31],reduc:[3,7,22,31,32],reduce_rang:23,reducemax:7,reducemin:7,ref:[26,27],refer:[16,24,28],referenc:22,reg:22,reg_func:22,reg_ten:22,regex:[3,22,24,31,32,38],region:21,regist:[11,16,17,18,19,21,26,28],register_batch_backward_hook:24,register_batch_end_hook:24,register_batch_forward_hook:24,register_batch_loss_hook:24,register_batch_start_hook:24,register_wrapped_model_constructor:16,registri:[1,10,13,19,25],regular:[22,32],regularize_depthwis:32,relat:[3,6,9,11,12,13,14,15,16,17,18,20,21,22,24,26,27,28,29,31,33],relev:8,reli:4,relu6:[21,30],relu:[7,8,21,22,23,29,30],relu_1:7,relu_2:7,remain:[32,38],remov:[3,8,22,24,28,31,32,37],removablehandl:24,remove_dynamic_tl_var:28,remove_node_and_params_from_graph:8,remove_pruning_mask:3,reorder:31,repeat:[24,26,37],repeat_count:26,replac:[8,21],replace_activ:21,repo:[16,19,28],repo_sourc:[16,28],report:[6,22,24],repositori:[35,36],repr:9,repres:[3,6,7,9,13,18,22,24,26,31,32,33],represent:[3,6,8,9,21,22,24,31,33,37],request:[22,24,35],requir:[3,8,22,28,31,36,37,38],reset:[6,22,24,28,31],reshap:[8,27],residu:17,resiz:[12,26,27,34],resnet101:[17,29],resnet101_2xwidth:17,resnet152:[17,29],resnet18:[17,29],resnet20:29,resnet34:[17,29],resnet50:[17,29],resnet50_2xwidth:17,resnet:[7,8,10,16,18,25,28],resnet_const:29,resnet_model:7,resnetsect:29,resnetsectionset:17,resnetv2_101:17,resnetv2_152:17,resnetv2_18:17,resnetv2_34:17,resnetv2_50:17,resnext101:17,resnext152:17,resnext50:17,resnext:17,respect:[8,24],respons:24,rest:[33,37,38],restor:28,restrict:[3,9,22,31],restrict_en:[3,9,22,31],restrict_extra:[3,9,22,31],restrict_initi:[3,9,22,31],result:[3,6,8,9,22,24,28,31,37],result_list_tensor:24,result_mean:24,result_std:24,result_typ:22,results_max:22,results_mean:22,results_min:22,results_model:[6,9],results_std:22,retrain:[6,8,22,31],retriev:[4,8,16,28,31,38],reus:31,revert:22,rewrit:8,right:[3,24],rmax:7,rmin:7,root:[1,12,13,27,34],round:24,routin:7,rule:38,run:[3,4,6,7,8,9,11,17,18,21,22,24,26,28,29,30,31,32,33,34,37,38],run_batches_on_devic:24,run_config:28,run_context:31,run_extra_opt:7,run_func:24,run_it:8,run_lay:22,run_valu:31,runconfig:28,runner:8,runtim:8,runtimeerror:22,s160:[12,27,34],s320:[12,27,34],same:[8,22,23,24,30,32],sampl:[3,4,8,17,22,24,29,31,32,37,38],sample_batch:[4,24,37],sample_inputs_path:32,sample_label:[4,24],sample_outputs_path:32,sample_s:24,save:[4,6,7,9,24,28,31,32,33,34,37],save_desc:9,save_json:[6,9],save_model:[24,37],save_numpi:33,saver:[28,32],scaffold:[28,31],scale:[7,8,12,22,23,24,26],scale_nam:7,scale_rang:26,scale_wh:24,scale_xi:24,scaler:24,schedul:[3,9,22,31,38],schedule_lr:[1,25],schedule_op:31,scheduled_log_upd:22,scheduled_upd:22,scheduledmodif:22,scheduledmodifi:[3,9,22,31],scheduledmodifiermanag:[3,22,24,28,31,37],scheduledoptim:[22,24,37],scheduledupdatemodifi:[3,22,31],scope:[26,27,29,30,31,32],score:24,score_threhsold:24,script:[1,35,36,38],se_mod:17,se_ratio:17,seamlessli:37,search:8,sec_set:[17,29],second:[6,8,9,24,33,38],section:[17,29,37,38],see:[4,12,27,32],seed:24,segment:13,select:[3,22,31],self:[3,22,27,31],sensit:[0,1,6,22,24,31],sensitivity_a:[1,10],sensitivity_lr:[1,10],sensitivity_prun:[1,5,10,25],separ:[17,21,22,29],sequenc:31,sequenti:[17,18,23],serial:[3,9,22,24,31,32],serializ:[3,9,22,31],sess:[28,31,32,37],session:[23,28,31,32,35],session_run_hook:31,sessionrunhook:[28,31],sessionrunvalu:31,set:[1,3,4,6,7,8,9,17,21,22,23,24,26,29,30,31,32,37,38],set_deterministic_se:24,set_logging_level:1,set_optim_learning_r:24,set_param_data:22,set_param_mask:22,set_param_mask_from_abs_threshold:22,set_param_mask_from_spars:22,set_param_mask_from_weight:22,set_relu_to_fat:21,set_threshold:21,set_to_non:22,setlearningr:[3,9,22,31],setlearningratemodifi:[3,22,31],setparammodifi:22,setter:[3,9,22,31],setup:[1,8,22,24,37,38],setweightdecaymodifi:22,shall:4,shape:[3,6,8,9,11,16,17,18,22,24,28,29,31,32,33],shape_overrid:32,share:[3,8,9,24],shift:[8,24],shot:[6,8,18,22,31],should:[3,4,6,7,8,9,11,12,16,17,18,21,22,24,26,27,28,29,31,32,33,38],should_prun:3,show:8,show_progress:[6,7,8,22,31],shuffl:26,shuffle_buffer_s:26,shutdown:33,side:26,sigmoid:[21,29,30],sign:7,signal:31,signatur:32,silent:[17,18,21],similarli:24,simpl:[3,17,22,24,29,31,32,35],simpler:37,simplif:35,simplifi:29,simplified_arch:29,sinc:[17,18,21],singl:[4,8,17,18,21,22,24,29,33],singleton:[0,1],size:[6,8,9,12,13,17,18,22,24,26,27,29,30,32,33,34],size_i:24,size_x:24,skip:22,slash:31,sleep:6,slice:33,slightli:24,slim:32,slope:21,small:[22,32],smaller:[35,38],smallest:22,smoother:17,softmax:[24,28,29,30],solut:[3,22,31],some:[3,4,8,22,24,31,37],someth:24,somewher:38,sort:[22,33],sort_highest:33,sort_kei:33,sourc:[1,3,4,6,7,8,9,11,12,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,34],space:24,sparisti:31,spars:[3,6,8,9,22,31,35,38],sparse_averag:[6,9],sparse_comparison:[6,9],sparse_integr:[6,9],sparse_measur:[6,9],sparse_tensor:[1,5],sparse_tensor_to_dens:8,sparseml:[36,37,38],sparsepruningopvar:31,sparsetensorproto:8,sparsezoo:[16,28,33,35,37,38],sparsif:22,sparsifi:[22,35,37,38],sparsiti:[3,4,6,8,9,21,22,24,31,32,38],sparsity_level:[6,22,31],sparsity_mask:22,sparsity_op:31,sparsity_threshold:8,sparsitymaskcr:[3,22,31],sparsitymeasur:8,sparsti:9,sparstii:9,spec:[8,28],special:[7,24],specif:[3,6,9,16,17,18,21,22,24,28,31,34,38],specifi:[3,7,8,11,16,22,24,26,28,29,31,38],specific_result_typ:22,split:[8,24,26,34],split_canonical_nam:8,split_dataset:26,split_root:34,splitstransform:27,spp:18,squar:[24,26],squeez:[17,21],squeezed_channel:21,squeezeexcit:21,src:24,ssd300:[18,24],ssd300_resnet101:18,ssd300_resnet152:18,ssd300_resnet18:18,ssd300_resnet34:18,ssd300_resnet50:18,ssd300lite:18,ssd300lite_mobilenetv2:18,ssd300mobilenetbackbon:18,ssd300resnetbackbon:18,ssd:[10,13,16,24],ssd_collate_fn:13,ssd_helper:[1,10,13],ssd_lite:[10,16],ssd_mobilenet:[10,16],ssd_random_crop:[13,24],ssd_random_crop_image_and_annot:13,ssd_resnet:[10,16],ssdbackbon:18,ssdlite:18,ssdlosswrapp:24,ssummarysaverhook:28,stabl:35,stack:[13,24,33],stage:24,standard:[1,3,9,11,12,13,17,18,21,22,24,27,29,31,32,33,38],start:[3,4,9,22,24,31,33,38],start_end_step:[3,31],start_epoch:[3,9,22,31,37,38],start_pend:22,start_step:[4,31],startup:38,stat:22,state:[3,16,17,18,22,24,31,33,35],state_dict:[21,22],std:[6,9,12,27],stddev:32,stdev:11,step:[3,4,6,8,9,22,24,31,32,37,38],step_count:24,step_lr_schedul:31,step_siz:31,steplr:[3,9,22,31,38],steps_per_epoch:[3,9,22,31,37],steps_per_measur:[6,22,31],still:37,stochast:38,stop:[3,9,11,22,24,31,33,38],storag:31,store:[6,7,8,9,11,22,24,31,33,37,38],store_init:22,store_unmask:22,str:[3,4,6,7,8,9,11,12,13,16,17,18,21,22,24,26,27,28,29,30,31,32,33,34],strict:[21,24],strictli:[21,22],stride:[6,9,17,29,30],string:[3,4,8,9,16,17,18,21,22,24,28,30,31,32,33,38],strip:8,strip_first_dim:8,structur:[3,6,22,27,31,38],strucur:[3,31],student:24,style:[32,37,38],sub:[16,18,26,28,32],sub_arch:18,sub_architectur:[16,28],sub_domain:[16,28],subarrai:33,subclass:[3,8,17,18,21,22,31],submodul:[0,2,5,10,25,35],subpackag:[0,35],subsect:38,subsequ:[22,24,31],subset:9,suggest:24,sum:24,sum_squar:24,sum_val:24,summari:[1,24,25,28,31,37],summary_op:31,summarysaverhook:28,summarywrit:[4,24],suppli:[4,6,8,9,16,17,18,22,24,28,29,31,32],support:[3,7,8,9,13,17,22,24,29,30,31,33,35,37,38],suppress:24,sure:[3,9,16,22,31,37],surround:23,swap_node_output:8,swish:21,symmetr:[7,23,38],symmetric_activ:7,symmetric_pad2d:30,symmetric_weight:7,syntax:[3,9,22,31],system:[2,5,8,10,22,24,25,31,32,33,36,37,38],tag:[4,22,24,32],take:[3,4,6,8,13,17,18,21,22,24,26,31,33,37],taken:[3,4,9,22,24,26,31],tar:[8,33],target:[3,9,18,22,23,24,31,33,38],target_spars:3,task:[24,33],teacher:24,temp_stud:24,temp_teach:24,temperatur:24,ten:[18,21,22,24,31,32],tensor:[3,4,8,9,13,16,17,18,21,22,24,26,27,28,29,30,31,32,33],tensor_dens:24,tensor_export:[24,33],tensor_nam:32,tensor_sampl:24,tensor_spars:24,tensorboard:[4,22,24,31,32,37],tensorboardlogg:[4,24],tensorflow:[3,4,24,26,27,28,29,30,31,32,35,38],tensorflow_estim:[28,31],tensorflow_path:32,tensorflow_v1:[0,1,37],tensorflowmodifieryaml:31,tensorproto:[7,8],tensors_batch_s:24,tensors_export:[24,33],tensors_module_forward:24,tensors_to_devic:24,tensors_to_precis:24,termin:[9,24],terminolog:24,test:[1,4,6,9,22,24,36],test_siz:24,tester_logg:22,tester_run_func:22,tf2onnx:37,tf_compat:37,tf_compat_div:32,than:[3,9,22,24,31,38],thei:[3,8,9,22,24,31,38],them:[3,8,17,18,21,22,24,31,33],themselv:[3,31,38],therefor:8,thi:[3,4,6,7,8,9,11,12,13,17,18,21,22,23,24,27,31,32,33,36,37,38],thing:[3,6,9,22,31],those:[8,13,24,31,38],thread:[6,8,33],three:[13,24],threshold:[7,8,21,22,24,31],through:[3,4,6,7,8,9,11,17,22,24,31,32,37,38],throughout:33,til:31,time:[3,4,6,8,9,11,22,24,26,31],titl:[6,9],tl_ignore_ten:28,to_devic:24,to_string_lin:9,togeth:[3,17,22,29,31,33],token:[3,22,31,33],too:[6,9],took:24,tool:[1,7,23,37],toolkit:35,top1:24,top1acc:22,top5:24,top5acc:22,top:[11,22,24,33,37],topk:24,topkaccuraci:24,topmost:32,torch:[11,13,16,17,18,21,22,23,24,37],torch_distributed_zero_first:24,torchvis:[10,12,13,16],total:[8,9,11,24,33],total_flop:9,tour:35,toward:[6,38],tqdm:[6,7,8],track:[9,22,24,31],track_grad_mom:22,track_input:22,track_inputs_spars:22,track_output:22,track_outputs_spars:22,tracked_input:22,tracked_output:22,trail:31,trailing_slash:31,train:[1,3,4,7,9,12,13,17,18,21,22,23,24,26,27,28,29,30,31,32,34,35,37],train_data:37,train_on_batch:37,trainabl:[3,22,31,32,38],trainable_vari:32,trainableparamsmodifi:[3,22,31],trainer_logg:22,trainer_run_func:22,transfer:[3,22,24,28,31,38],transform:[3,7,12,13,22,27,31],trasnform:7,travers:8,traverse_previ:8,treat:[24,32,33],treatment:32,tri:22,truncat:8,trunctat:32,truth:[24,28],truthi:[3,9,22,31],tune:22,tupl:[3,6,7,8,9,11,13,16,17,18,22,23,24,26,27,28,30,31,32,33],twice:[29,38],two:[8,13,22,24,31],type:[3,4,6,7,8,9,13,18,21,22,24,27,30,31,33,34],type_:[9,30],typic:[8,24],uint8:7,unchang:38,under:[9,24,26,27,28,29,30,31,32,33,37],unexpect:21,unexpected_kei:21,union:[3,4,6,7,8,9,11,12,16,17,18,21,22,24,26,27,28,29,30,31,32,33],uniqu:[8,33],unit:[6,9],unless:22,unmask:[22,24],unpreced:35,unset:[26,30],unsign:7,unstructur:[3,8,22,31,37,38],unstructuredpruningmaskcr:[3,22,31],until:[3,22,31,33,38],unus:[3,9,22,30,31],updat:[3,4,6,7,8,9,22,24,28,31,32,37,38],update_freq:4,update_frequ:[3,9,22,31,37,38],update_frequency_step:[3,31],update_model_param:8,update_op:[31,32],update_readi:[3,22,31],update_step_freq:31,upper:24,url:33,use:[3,4,6,7,8,9,12,16,17,18,21,22,24,26,27,28,29,30,31,32,33,35,37,38],use_batchnorm:29,use_deepsparse_infer:6,use_mixed_precis:24,use_s:17,use_zipfile_serialization_if_avail:24,used:[1,3,4,6,7,8,9,11,13,16,18,22,24,26,28,31,32,33,37,38],useful:[22,38],user:[17,22,29,33,38],uses:[17,18,22,24,29,30,31],using:[3,4,7,8,13,17,21,22,24,26,27,28,29,31,32,36,37,38],util:[0,1,2,5,6,7,10,11,12,13,22,25,26,27,37],utk:24,val:[8,22,27,32,33],valid:[3,4,6,8,9,12,13,22,27,31,33,34],validate_learning_r:9,validate_lr_info:9,validate_onnx_fil:8,validate_schedul:9,validate_str_iter:33,validate_upd:9,valu:[3,4,6,7,8,9,12,13,21,22,23,24,27,31,32,33,34,38],valueerror:[3,8,9,31,32,33],valueinfoproto:7,var_index:32,var_index_from_train:32,var_mask:31,var_nam:[31,32],var_ten:32,var_threshold:31,variabl:[1,3,22,25,28,29,30,31,35],variablev1:[31,32],varianc:32,variou:18,verif:7,version:[6,7,9,16,17,18,22,24,28,29,32,33,37,38],vgg11:[17,29],vgg11bn:[17,29],vgg13:[17,29],vgg13bn:[17,29],vgg16:[17,29],vgg16bn:[17,29],vgg19:[17,29],vgg19bn:[17,29],vgg:[10,16,25,28],vgg_const:29,vggsection:29,vggsectionset:17,via:35,video:[10,11],view:[4,24],virtual:36,vision:[12,13,15,17,18,27,29,34],visual:[4,22,24],voc:[10,11,24],vocdetect:13,vocdetectiondataset:13,vocsegment:13,vocsegmentationdataset:13,wai:[8,23,24,38],wait:24,wait_between_it:6,wall:[4,24],wall_tim:[4,24],want:7,warmup:8,warmup_iterations_per_check:6,warmup_s:24,warn:33,wasn:33,websit:35,weight:[3,6,7,8,16,17,18,22,23,24,28,31,32,37,38],weight_decai:[22,32,38],weight_nam:6,weight_qtyp:7,weight_shap:[6,8],well:[4,8,24,26,32],were:[8,24],what:[3,8,9,22,31,33],when:[3,4,6,7,8,9,11,13,22,24,28,31,33,38],where:[3,6,7,8,9,17,22,24,31,33,34],whether:[4,21,22,29,32],which:[7,21,22,26,31,32,34,37,38],whole:11,whose:[8,18,24],width:[17,24,26,29,32],width_mult:[17,29],wildcard:32,window:30,wise:17,within:[3,7,8,9,17,18,21,22,24,31,32,33,35],without:[3,7,22,24,31,33],won:22,word:[3,9,22,31],work:[2,5,9,10,13,21,24,25,26,28,31,32,33,38],worker:[0,1],worker_func:33,world:24,world_siz:24,wors:22,would:[8,36],wrap:[3,9,13,16,22,23,24,31,33,37],wrapped_constructor:16,wrapper:[0,1,3,7,12,13,16,21,22,24,27],wrapper_decor:33,write:[32,37],write_simple_summari:32,writer:[4,24,32],written:[37,38],x_cur:33,x_ten:[17,21,29,30],x_val:33,xavier:32,xml:8,xxx:[12,27],xxy:[12,27],xxz:[12,27],xywh:24,yaml:[3,9,22,31,33,37,38],yaml_kei:9,yaml_str:[3,9,22,31],year:13,yeild:6,yet:22,yield:[6,24],yolo:[13,17,18,24],yolo_collate_fn:13,yolo_grid:24,yolo_help:[1,10],yolo_v3:[10,16],yolo_v3_anchor_group:24,yologrid:24,yololosswrapp:24,yolov3:18,you:[21,36,37,38],your:[21,36,37,38],zero:[3,6,7,8,9,21,22,23,24,29,30,31,32,33,38],zero_grad:22,zero_point:[7,8,23],zero_point_nam:7,zeroed_param:9,zeroth:24,zipfil:24,zoo:[16,28]},titles:["sparseml","sparseml package","sparseml.keras package","sparseml.keras.optim package","sparseml.keras.utils package","sparseml.onnx package","sparseml.onnx.optim package","sparseml.onnx.optim.quantization package","sparseml.onnx.utils package","sparseml.optim package","sparseml.pytorch package","sparseml.pytorch.datasets package","sparseml.pytorch.datasets.classification package","sparseml.pytorch.datasets.detection package","sparseml.pytorch.datasets.recommendation package","sparseml.pytorch.datasets.video package","sparseml.pytorch.models package","sparseml.pytorch.models.classification package","sparseml.pytorch.models.detection package","sparseml.pytorch.models.external package","sparseml.pytorch.models.recommendation package","sparseml.pytorch.nn package","sparseml.pytorch.optim package","sparseml.pytorch.optim.quantization package","sparseml.pytorch.utils package","sparseml.tensorflow_v1 package","sparseml.tensorflow_v1.datasets package","sparseml.tensorflow_v1.datasets.classification package","sparseml.tensorflow_v1.models package","sparseml.tensorflow_v1.models.classification package","sparseml.tensorflow_v1.nn package","sparseml.tensorflow_v1.optim package","sparseml.tensorflow_v1.utils package","sparseml.utils package","sparseml.utils.datasets package","SparseML 0.1","Installation","Quick Tour","Optimization Recipes"],titleterms:{"export":[4,24,32,37],activ:21,analyz:9,analyzer_a:22,analyzer_model:6,analyzer_modul:[22,31],analyzer_prun:22,base:37,benchmark:24,calibr:7,callback:4,cifar:[12,27],classif:[12,17,27,29],coco:13,constantpruningmodifi:38,content:[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34],darknet:17,data:8,dataset:[11,12,13,14,15,26,27,34],detect:[13,18],efficientnet:17,epoch:38,estim:[28,37],extern:19,fatrelu:21,framework:33,gener:11,gmpruningmodifi:38,graph_editor:8,graph_optim:8,helper:[8,13,23,24,26,32,33,34],histori:35,imagefold:[12,27],imagenet:[12,27,34],imagenett:[12,27,34],inception_v3:17,instal:36,intro:38,kera:[2,3,4,37],layer:30,learn:[35,38],learning_r:9,learningratemodifi:38,log:1,logger:[4,24],loss:[8,24,32],manag:[3,9,22,31],mask_creator_prun:[22,31],mask_prun:[3,22,31],mask_pruning_cr:3,mnist:[12,17,29],mobilenet:[17,29],mobilenet_v2:[17,29],model:[4,8,16,17,18,19,20,24,28,29],modifi:[3,9,22,31,38],modifier_a:22,modifier_epoch:[3,22,31],modifier_lr:[3,22,31],modifier_param:[3,22,31],modifier_prun:[3,22,31],modifier_quant:22,modifier_regular:22,modul:[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34],more:35,nets_util:32,onnx:[5,6,7,8,37],optim:[3,6,7,9,22,23,31,37,38],overview:35,packag:[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34],param:38,pipelin:37,product:35,prune:38,pytorch:[10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,37],quantiz:[7,23,38],quantize_model_post_train:7,quantize_qat_export:23,quick:37,rate:38,recip:38,recommend:[14,20],registri:[11,16,26,28],relat:35,releas:35,resnet:[17,29],resourc:35,schedule_lr:31,sensit:9,sensitivity_a:22,sensitivity_lr:22,sensitivity_prun:[6,22,31],session:37,setlearningratemodifi:38,setweightdecaymodifi:38,singleton:33,sparse_tensor:8,sparseml:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35],ssd:18,ssd_helper:24,ssd_lite:18,ssd_mobilenet:18,ssd_resnet:18,submodul:[1,3,4,6,7,8,9,11,12,13,16,17,18,19,21,22,23,24,26,27,28,29,30,31,32,33,34],subpackag:[1,2,5,6,10,11,16,22,25,26,28,33],summari:32,tensorflow:37,tensorflow_v1:[25,26,27,28,29,30,31,32],torchvis:19,tour:37,train:38,trainableparamsmodifi:38,util:[3,4,8,24,32,33,34],variabl:[32,38],vgg:[17,29],video:15,voc:13,worker:33,wrapper:33,yolo_help:24,yolo_v3:18}}) \ No newline at end of file +Search.setIndex({docnames:["api/modules","api/sparseml","api/sparseml.keras","api/sparseml.keras.optim","api/sparseml.keras.utils","api/sparseml.onnx","api/sparseml.onnx.optim","api/sparseml.onnx.optim.quantization","api/sparseml.onnx.utils","api/sparseml.optim","api/sparseml.pytorch","api/sparseml.pytorch.datasets","api/sparseml.pytorch.datasets.classification","api/sparseml.pytorch.datasets.detection","api/sparseml.pytorch.datasets.recommendation","api/sparseml.pytorch.datasets.video","api/sparseml.pytorch.models","api/sparseml.pytorch.models.classification","api/sparseml.pytorch.models.detection","api/sparseml.pytorch.models.external","api/sparseml.pytorch.models.recommendation","api/sparseml.pytorch.nn","api/sparseml.pytorch.optim","api/sparseml.pytorch.optim.quantization","api/sparseml.pytorch.utils","api/sparseml.tensorflow_v1","api/sparseml.tensorflow_v1.datasets","api/sparseml.tensorflow_v1.datasets.classification","api/sparseml.tensorflow_v1.models","api/sparseml.tensorflow_v1.models.classification","api/sparseml.tensorflow_v1.nn","api/sparseml.tensorflow_v1.optim","api/sparseml.tensorflow_v1.utils","api/sparseml.utils","api/sparseml.utils.datasets","index","installation","quicktour","recipes"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":3,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["api/modules.rst","api/sparseml.rst","api/sparseml.keras.rst","api/sparseml.keras.optim.rst","api/sparseml.keras.utils.rst","api/sparseml.onnx.rst","api/sparseml.onnx.optim.rst","api/sparseml.onnx.optim.quantization.rst","api/sparseml.onnx.utils.rst","api/sparseml.optim.rst","api/sparseml.pytorch.rst","api/sparseml.pytorch.datasets.rst","api/sparseml.pytorch.datasets.classification.rst","api/sparseml.pytorch.datasets.detection.rst","api/sparseml.pytorch.datasets.recommendation.rst","api/sparseml.pytorch.datasets.video.rst","api/sparseml.pytorch.models.rst","api/sparseml.pytorch.models.classification.rst","api/sparseml.pytorch.models.detection.rst","api/sparseml.pytorch.models.external.rst","api/sparseml.pytorch.models.recommendation.rst","api/sparseml.pytorch.nn.rst","api/sparseml.pytorch.optim.rst","api/sparseml.pytorch.optim.quantization.rst","api/sparseml.pytorch.utils.rst","api/sparseml.tensorflow_v1.rst","api/sparseml.tensorflow_v1.datasets.rst","api/sparseml.tensorflow_v1.datasets.classification.rst","api/sparseml.tensorflow_v1.models.rst","api/sparseml.tensorflow_v1.models.classification.rst","api/sparseml.tensorflow_v1.nn.rst","api/sparseml.tensorflow_v1.optim.rst","api/sparseml.tensorflow_v1.utils.rst","api/sparseml.utils.rst","api/sparseml.utils.datasets.rst","index.rst","installation.md","quicktour.md","recipes.md"],objects:{"":{sparseml:[1,0,0,"-"]},"sparseml.keras":{optim:[3,0,0,"-"],utils:[4,0,0,"-"]},"sparseml.keras.optim":{manager:[3,0,0,"-"],mask_pruning:[3,0,0,"-"],mask_pruning_creator:[3,0,0,"-"],modifier:[3,0,0,"-"],modifier_epoch:[3,0,0,"-"],modifier_lr:[3,0,0,"-"],modifier_params:[3,0,0,"-"],modifier_pruning:[3,0,0,"-"],utils:[3,0,0,"-"]},"sparseml.keras.optim.manager":{ScheduledModifierManager:[3,1,1,""]},"sparseml.keras.optim.manager.ScheduledModifierManager":{finalize:[3,2,1,""],from_yaml:[3,2,1,""],modify:[3,2,1,""]},"sparseml.keras.optim.mask_pruning":{MaskedLayer:[3,1,1,""],PruningScheduler:[3,1,1,""],remove_pruning_masks:[3,3,1,""]},"sparseml.keras.optim.mask_pruning.MaskedLayer":{build:[3,2,1,""],call:[3,2,1,""],compute_output_shape:[3,2,1,""],from_config:[3,2,1,""],get_config:[3,2,1,""],global_step:[3,2,1,""],mask_updater:[3,2,1,""],masked_layer:[3,2,1,""],masks:[3,2,1,""],pruned_layer:[3,2,1,""],pruning_vars:[3,2,1,""]},"sparseml.keras.optim.mask_pruning.PruningScheduler":{deserialize:[3,2,1,""],get_config:[3,2,1,""],should_prune:[3,2,1,""],target_sparsity:[3,2,1,""]},"sparseml.keras.optim.mask_pruning_creator":{BlockPruningMaskCreator:[3,1,1,""],DimensionPruningMaskCreator:[3,1,1,""],GroupedPruningMaskCreator:[3,1,1,""],PruningMaskCreator:[3,1,1,""],UnstructuredPruningMaskCreator:[3,1,1,""],load_mask_creator:[3,3,1,""]},"sparseml.keras.optim.mask_pruning_creator.BlockPruningMaskCreator":{group_tensor:[3,2,1,""]},"sparseml.keras.optim.mask_pruning_creator.DimensionPruningMaskCreator":{group_tensor:[3,2,1,""]},"sparseml.keras.optim.mask_pruning_creator.GroupedPruningMaskCreator":{create_sparsity_mask:[3,2,1,""],get_grouping_op:[3,2,1,""],get_mask_initializer:[3,2,1,""],group_tensor:[3,2,1,""]},"sparseml.keras.optim.mask_pruning_creator.PruningMaskCreator":{create_sparsity_mask:[3,2,1,""],get_mask_initializer:[3,2,1,""]},"sparseml.keras.optim.mask_pruning_creator.UnstructuredPruningMaskCreator":{create_sparsity_mask:[3,2,1,""],get_mask_initializer:[3,2,1,""]},"sparseml.keras.optim.modifier":{KerasModifierYAML:[3,1,1,""],Modifier:[3,1,1,""],ModifierProp:[3,1,1,""],ScheduledModifier:[3,1,1,""],ScheduledUpdateModifier:[3,1,1,""]},"sparseml.keras.optim.modifier.Modifier":{finalize:[3,2,1,""],load_list:[3,2,1,""],load_obj:[3,2,1,""],modify:[3,2,1,""]},"sparseml.keras.optim.modifier.ModifierProp":{getter:[3,2,1,""],no_serialize_val:[3,2,1,""],restrictions:[3,2,1,""],serializable:[3,2,1,""],setter:[3,2,1,""]},"sparseml.keras.optim.modifier.ScheduledModifier":{end_epoch:[3,2,1,""],start_end_steps:[3,2,1,""],start_epoch:[3,2,1,""]},"sparseml.keras.optim.modifier.ScheduledUpdateModifier":{update_frequency_steps:[3,2,1,""]},"sparseml.keras.optim.modifier_epoch":{EpochRangeModifier:[3,1,1,""]},"sparseml.keras.optim.modifier_lr":{LearningRateModifier:[3,1,1,""],SetLearningRateModifier:[3,1,1,""]},"sparseml.keras.optim.modifier_lr.LearningRateModifier":{modify:[3,2,1,""]},"sparseml.keras.optim.modifier_lr.SetLearningRateModifier":{modify:[3,2,1,""]},"sparseml.keras.optim.modifier_params":{TrainableParamsModifier:[3,1,1,""]},"sparseml.keras.optim.modifier_params.TrainableParamsModifier":{layer_names:[3,2,1,""],modify:[3,2,1,""],params:[3,4,1,""],params_strict:[3,4,1,""],trainable:[3,4,1,""],validate:[3,2,1,""]},"sparseml.keras.optim.modifier_pruning":{ConstantPruningModifier:[3,1,1,""],GMPruningModifier:[3,1,1,""]},"sparseml.keras.optim.modifier_pruning.ConstantPruningModifier":{finalize:[3,2,1,""],is_pruning_step:[3,2,1,""],layer_names:[3,2,1,""],modify:[3,2,1,""],params:[3,4,1,""],sparsity:[3,2,1,""],update_ready:[3,2,1,""]},"sparseml.keras.optim.modifier_pruning.GMPruningModifier":{exponent:[3,4,1,""],final_sparsity:[3,4,1,""],finalize:[3,2,1,""],init_sparsity:[3,4,1,""],inter_func:[3,4,1,""],layer_names:[3,2,1,""],leave_enabled:[3,4,1,""],mask_type:[3,4,1,""],modify:[3,2,1,""],params:[3,4,1,""],prunable_layers:[3,2,1,""],sparsity:[3,2,1,""],update_ready:[3,2,1,""],validate:[3,2,1,""]},"sparseml.keras.optim.utils":{get_layer_name_from_param:[3,3,1,""]},"sparseml.keras.utils":{callbacks:[4,0,0,"-"],exporter:[4,0,0,"-"],logger:[4,0,0,"-"],model:[4,0,0,"-"]},"sparseml.keras.utils.callbacks":{LoggerSettingCallback:[4,1,1,""],LossesAndMetricsLoggingCallback:[4,1,1,""]},"sparseml.keras.utils.callbacks.LoggerSettingCallback":{on_epoch_begin:[4,2,1,""],on_epoch_end:[4,2,1,""],on_predict_batch_begin:[4,2,1,""],on_predict_batch_end:[4,2,1,""],on_predict_begin:[4,2,1,""],on_predict_end:[4,2,1,""],on_test_batch_begin:[4,2,1,""],on_test_batch_end:[4,2,1,""],on_test_begin:[4,2,1,""],on_test_end:[4,2,1,""],on_train_batch_begin:[4,2,1,""],on_train_batch_end:[4,2,1,""],on_train_begin:[4,2,1,""],on_train_end:[4,2,1,""]},"sparseml.keras.utils.callbacks.LossesAndMetricsLoggingCallback":{on_epoch_end:[4,2,1,""],on_test_end:[4,2,1,""],on_train_batch_end:[4,2,1,""],on_train_begin:[4,2,1,""]},"sparseml.keras.utils.exporter":{ModelExporter:[4,1,1,""]},"sparseml.keras.utils.exporter.ModelExporter":{export_h5:[4,2,1,""],export_keras:[4,2,1,""],export_onnx:[4,2,1,""],export_samples:[4,2,1,""]},"sparseml.keras.utils.logger":{KerasLogger:[4,1,1,""],LoggingMode:[4,1,1,""],PythonLogger:[4,1,1,""],TensorBoardLogger:[4,1,1,""]},"sparseml.keras.utils.logger.KerasLogger":{log_scalar:[4,2,1,""],mode:[4,2,1,""],name:[4,2,1,""],update_freq:[4,2,1,""]},"sparseml.keras.utils.logger.LoggingMode":{PREDICT:[4,4,1,""],TEST:[4,4,1,""],TRAIN:[4,4,1,""]},"sparseml.keras.utils.logger.PythonLogger":{log_scalar:[4,2,1,""]},"sparseml.keras.utils.logger.TensorBoardLogger":{log_scalar:[4,2,1,""]},"sparseml.keras.utils.model":{sparsity:[4,3,1,""]},"sparseml.log":{get_main_logger:[1,3,1,""],get_nm_root_logger:[1,3,1,""],set_logging_level:[1,3,1,""]},"sparseml.onnx":{optim:[6,0,0,"-"],utils:[8,0,0,"-"]},"sparseml.onnx.optim":{analyzer_model:[6,0,0,"-"],quantization:[7,0,0,"-"],sensitivity_pruning:[6,0,0,"-"]},"sparseml.onnx.optim.analyzer_model":{ModelAnalyzer:[6,1,1,""],NodeAnalyzer:[6,1,1,""]},"sparseml.onnx.optim.analyzer_model.ModelAnalyzer":{dict:[6,2,1,""],from_dict:[6,2,1,""],get_node:[6,2,1,""],load_json:[6,2,1,""],nodes:[6,2,1,""],save_json:[6,2,1,""]},"sparseml.onnx.optim.analyzer_model.NodeAnalyzer":{attributes:[6,2,1,""],bias_name:[6,2,1,""],bias_shape:[6,2,1,""],dict:[6,2,1,""],flops:[6,2,1,""],id_:[6,2,1,""],input_names:[6,2,1,""],input_shapes:[6,2,1,""],op_type:[6,2,1,""],output_names:[6,2,1,""],output_shapes:[6,2,1,""],params:[6,2,1,""],prunable:[6,2,1,""],prunable_equation_sensitivity:[6,2,1,""],prunable_params:[6,2,1,""],prunable_params_zeroed:[6,2,1,""],weight_name:[6,2,1,""],weight_shape:[6,2,1,""]},"sparseml.onnx.optim.quantization":{calibration:[7,0,0,"-"],quantize:[7,0,0,"-"],quantize_model_post_training:[7,0,0,"-"]},"sparseml.onnx.optim.quantization.calibration":{CalibrationSession:[7,1,1,""]},"sparseml.onnx.optim.quantization.calibration.CalibrationSession":{add_reduce_to_node_output:[7,2,1,""],generate_augmented_model:[7,2,1,""],get_model_input_names:[7,2,1,""],get_quantization_params_dict:[7,2,1,""],model:[7,2,1,""],model_augmented:[7,2,1,""],process_batch:[7,2,1,""]},"sparseml.onnx.optim.quantization.quantize":{ONNXQuantizer:[7,1,1,""],QuantizationMode:[7,1,1,""],QuantizedInitializer:[7,1,1,""],QuantizedValue:[7,1,1,""],QuantizedValueType:[7,1,1,""],check_opset_version:[7,3,1,""],quantize:[7,3,1,""],quantize_data:[7,3,1,""]},"sparseml.onnx.optim.quantization.quantize.ONNXQuantizer":{find_weight_data:[7,2,1,""],quantize_model:[7,2,1,""]},"sparseml.onnx.optim.quantization.quantize.QuantizationMode":{IntegerOps:[7,4,1,""],QLinearOps:[7,4,1,""]},"sparseml.onnx.optim.quantization.quantize.QuantizedValueType":{Initializer:[7,4,1,""],Input:[7,4,1,""]},"sparseml.onnx.optim.quantization.quantize_model_post_training":{quantize_model_post_training:[7,3,1,""]},"sparseml.onnx.optim.sensitivity_pruning":{PruningLossSensitivityAnalysis:[6,1,1,""],PruningPerfSensitivityAnalysis:[6,1,1,""],PruningSensitivityResult:[6,1,1,""],pruning_loss_sens_approx:[6,3,1,""],pruning_loss_sens_magnitude:[6,3,1,""],pruning_loss_sens_magnitude_iter:[6,3,1,""],pruning_loss_sens_one_shot:[6,3,1,""],pruning_loss_sens_one_shot_iter:[6,3,1,""],pruning_perf_sens_one_shot:[6,3,1,""],pruning_perf_sens_one_shot_iter:[6,3,1,""]},"sparseml.onnx.optim.sensitivity_pruning.PruningLossSensitivityAnalysis":{add_result:[6,2,1,""],dict:[6,2,1,""],from_dict:[6,2,1,""],get_result:[6,2,1,""],load_json:[6,2,1,""],plot:[6,2,1,""],print_res:[6,2,1,""],results:[6,2,1,""],results_model:[6,2,1,""],save_json:[6,2,1,""]},"sparseml.onnx.optim.sensitivity_pruning.PruningPerfSensitivityAnalysis":{add_model_result:[6,2,1,""],add_result:[6,2,1,""],batch_size:[6,2,1,""],dict:[6,2,1,""],from_dict:[6,2,1,""],get_result:[6,2,1,""],load_json:[6,2,1,""],num_cores:[6,2,1,""],plot:[6,2,1,""],print_res:[6,2,1,""],results:[6,2,1,""],results_model:[6,2,1,""],save_json:[6,2,1,""]},"sparseml.onnx.optim.sensitivity_pruning.PruningSensitivityResult":{add_measurement:[6,2,1,""],averages:[6,2,1,""],baseline_average:[6,2,1,""],baseline_measurement_index:[6,2,1,""],baseline_measurement_key:[6,2,1,""],dict:[6,2,1,""],from_dict:[6,2,1,""],has_baseline:[6,2,1,""],id_:[6,2,1,""],index:[6,2,1,""],name:[6,2,1,""],sparse_average:[6,2,1,""],sparse_comparison:[6,2,1,""],sparse_integral:[6,2,1,""],sparse_measurements:[6,2,1,""]},"sparseml.onnx.utils":{data:[8,0,0,"-"],graph_editor:[8,0,0,"-"],graph_optimizer:[8,0,0,"-"],helpers:[8,0,0,"-"],loss:[8,0,0,"-"],model:[8,0,0,"-"],sparse_tensor:[8,0,0,"-"]},"sparseml.onnx.utils.data":{DataLoader:[8,1,1,""]},"sparseml.onnx.utils.data.DataLoader":{batch_size:[8,2,1,""],from_model_random:[8,2,1,""],from_random:[8,2,1,""],infinite:[8,2,1,""],iter_steps:[8,2,1,""],labeled_data:[8,2,1,""]},"sparseml.onnx.utils.graph_editor":{override_model_batch_size:[8,3,1,""],prune_model_one_shot:[8,3,1,""],prune_model_one_shot_iter:[8,3,1,""],prune_unstructured:[8,3,1,""],remove_node_and_params_from_graph:[8,3,1,""],swap_node_output:[8,3,1,""],update_model_param:[8,3,1,""]},"sparseml.onnx.utils.graph_optimizer":{fold_conv_bns:[8,3,1,""],quantize_resnet_identity_add_inputs:[8,3,1,""]},"sparseml.onnx.utils.helpers":{BatchNormParams:[8,1,1,""],NodeParam:[8,1,1,""],NodeShape:[8,1,1,""],SparsityMeasurement:[8,1,1,""],calculate_flops:[8,3,1,""],check_load_model:[8,3,1,""],conv_node_params:[8,3,1,""],extract_node_id:[8,3,1,""],extract_node_shapes:[8,3,1,""],extract_nodes_shapes_ort:[8,3,1,""],extract_nodes_shapes_shape_inference:[8,3,1,""],extract_shape:[8,3,1,""],gemm_node_params:[8,3,1,""],get_attr_float_val_for_node:[8,3,1,""],get_batch_norm_params:[8,3,1,""],get_init_by_name:[8,3,1,""],get_kernel_shape:[8,3,1,""],get_node_attributes:[8,3,1,""],get_node_by_id:[8,3,1,""],get_node_input_nodes:[8,3,1,""],get_node_inputs:[8,3,1,""],get_node_output_nodes:[8,3,1,""],get_node_outputs:[8,3,1,""],get_node_params:[8,3,1,""],get_nodes_by_input_id:[8,3,1,""],get_nodes_by_output_id:[8,3,1,""],get_numpy_dtype:[8,3,1,""],get_prunable_node_from_foldable:[8,3,1,""],get_prunable_nodes:[8,3,1,""],get_quantize_parent_for_dequantize_node:[8,3,1,""],is_foldable_node:[8,3,1,""],is_prunable_node:[8,3,1,""],matmul_node_params:[8,3,1,""],model_inputs:[8,3,1,""],model_outputs:[8,3,1,""],onnx_nodes_sparsities:[8,3,1,""],validate_onnx_file:[8,3,1,""]},"sparseml.onnx.utils.helpers.BatchNormParams":{"var":[8,2,1,""],bias:[8,2,1,""],epsilon:[8,2,1,""],mean:[8,2,1,""],momentum:[8,2,1,""],scale:[8,2,1,""]},"sparseml.onnx.utils.helpers.NodeParam":{name:[8,2,1,""],val:[8,2,1,""]},"sparseml.onnx.utils.helpers.NodeShape":{id:[8,2,1,""],input_shapes:[8,2,1,""],output_shapes:[8,2,1,""]},"sparseml.onnx.utils.helpers.SparsityMeasurement":{density:[8,2,1,""],node_id:[8,2,1,""],params_count:[8,2,1,""],params_zero_count:[8,2,1,""],sparsity:[8,2,1,""]},"sparseml.onnx.utils.loss":{kl_divergence:[8,3,1,""]},"sparseml.onnx.utils.model":{DeepSparseAnalyzeModelRunner:[8,1,1,""],DeepSparseModelRunner:[8,1,1,""],ModelRunner:[8,1,1,""],ORTModelRunner:[8,1,1,""],OpenVINOModelRunner:[8,1,1,""],correct_nm_analyze_model_node_ids:[8,3,1,""],max_available_cores:[8,3,1,""],split_canonical_names:[8,3,1,""]},"sparseml.onnx.utils.model.DeepSparseAnalyzeModelRunner":{batch_forward:[8,2,1,""],run:[8,2,1,""]},"sparseml.onnx.utils.model.DeepSparseModelRunner":{batch_forward:[8,2,1,""],run:[8,2,1,""]},"sparseml.onnx.utils.model.ModelRunner":{batch_forward:[8,2,1,""],run:[8,2,1,""],run_iter:[8,2,1,""]},"sparseml.onnx.utils.model.ORTModelRunner":{batch_forward:[8,2,1,""],run:[8,2,1,""]},"sparseml.onnx.utils.model.OpenVINOModelRunner":{available:[8,2,1,""],batch_forward:[8,2,1,""],network_input_shapes:[8,2,1,""]},"sparseml.onnx.utils.sparse_tensor":{convert_model_initializers_to_sparse:[8,3,1,""],convert_sparse_initializers_to_dense:[8,3,1,""],create_sparse_tensor:[8,3,1,""],sparse_tensor_to_dense:[8,3,1,""]},"sparseml.optim":{analyzer:[9,0,0,"-"],learning_rate:[9,0,0,"-"],manager:[9,0,0,"-"],modifier:[9,0,0,"-"],sensitivity:[9,0,0,"-"]},"sparseml.optim.analyzer":{AnalyzedLayerDesc:[9,1,1,""]},"sparseml.optim.analyzer.AnalyzedLayerDesc":{dict:[9,2,1,""],load_descs:[9,2,1,""],merge_descs:[9,2,1,""],prunable:[9,2,1,""],save_descs:[9,2,1,""],terminal:[9,2,1,""]},"sparseml.optim.learning_rate":{LearningRate:[9,1,1,""],SetLearningRate:[9,1,1,""]},"sparseml.optim.learning_rate.LearningRate":{corrected_lr_info:[9,2,1,""],init_lr:[9,4,1,""],lr_class:[9,4,1,""],lr_kwargs:[9,4,1,""],validate_lr_info:[9,2,1,""]},"sparseml.optim.learning_rate.SetLearningRate":{learning_rate:[9,4,1,""],validate_learning_rate:[9,2,1,""]},"sparseml.optim.manager":{BaseManager:[9,1,1,""]},"sparseml.optim.manager.BaseManager":{max_epochs:[9,4,1,""],min_epochs:[9,4,1,""],modifiers:[9,4,1,""],modifiers_to_string_lines:[9,2,1,""],save:[9,2,1,""],to_string_lines:[9,2,1,""]},"sparseml.optim.modifier":{BaseModifier:[9,1,1,""],BaseObject:[9,1,1,""],BaseProp:[9,1,1,""],BaseScheduled:[9,1,1,""],BaseUpdate:[9,1,1,""],ModifierProp:[9,1,1,""],ModifierYAML:[9,1,1,""]},"sparseml.optim.modifier.BaseModifier":{enabled:[9,4,1,""],initialized:[9,4,1,""],load_framework_list:[9,2,1,""],load_framework_obj:[9,2,1,""],log_types:[9,4,1,""],props:[9,2,1,""],yaml_key:[9,2,1,""]},"sparseml.optim.modifier.BaseProp":{getter:[9,2,1,""],setter:[9,2,1,""]},"sparseml.optim.modifier.BaseScheduled":{end_epoch:[9,4,1,""],start_epoch:[9,4,1,""],validate_schedule:[9,2,1,""]},"sparseml.optim.modifier.BaseUpdate":{update_frequency:[9,4,1,""],validate_update:[9,2,1,""]},"sparseml.optim.modifier.ModifierProp":{getter:[9,2,1,""],no_serialize_val:[9,2,1,""],restrictions:[9,2,1,""],serializable:[9,2,1,""],setter:[9,2,1,""]},"sparseml.optim.sensitivity":{LRLossSensitivityAnalysis:[9,1,1,""],PruningLossSensitivityAnalysis:[9,1,1,""],PruningPerfSensitivityAnalysis:[9,1,1,""],PruningSensitivityResult:[9,1,1,""],default_pruning_sparsities_loss:[9,3,1,""],default_pruning_sparsities_perf:[9,3,1,""]},"sparseml.optim.sensitivity.LRLossSensitivityAnalysis":{add_result:[9,2,1,""],dict:[9,2,1,""],load_json:[9,2,1,""],plot:[9,2,1,""],print_res:[9,2,1,""],results:[9,2,1,""],save_json:[9,2,1,""]},"sparseml.optim.sensitivity.PruningLossSensitivityAnalysis":{add_result:[9,2,1,""],dict:[9,2,1,""],from_dict:[9,2,1,""],get_result:[9,2,1,""],load_json:[9,2,1,""],plot:[9,2,1,""],print_res:[9,2,1,""],results:[9,2,1,""],results_model:[9,2,1,""],save_json:[9,2,1,""]},"sparseml.optim.sensitivity.PruningPerfSensitivityAnalysis":{add_model_result:[9,2,1,""],add_result:[9,2,1,""],batch_size:[9,2,1,""],dict:[9,2,1,""],from_dict:[9,2,1,""],get_result:[9,2,1,""],load_json:[9,2,1,""],num_cores:[9,2,1,""],plot:[9,2,1,""],print_res:[9,2,1,""],results:[9,2,1,""],results_model:[9,2,1,""],save_json:[9,2,1,""]},"sparseml.optim.sensitivity.PruningSensitivityResult":{add_measurement:[9,2,1,""],averages:[9,2,1,""],baseline_average:[9,2,1,""],baseline_measurement_index:[9,2,1,""],baseline_measurement_key:[9,2,1,""],dict:[9,2,1,""],from_dict:[9,2,1,""],has_baseline:[9,2,1,""],id_:[9,2,1,""],index:[9,2,1,""],name:[9,2,1,""],sparse_average:[9,2,1,""],sparse_comparison:[9,2,1,""],sparse_integral:[9,2,1,""],sparse_measurements:[9,2,1,""]},"sparseml.pytorch":{datasets:[11,0,0,"-"],models:[16,0,0,"-"],nn:[21,0,0,"-"],optim:[22,0,0,"-"],utils:[24,0,0,"-"]},"sparseml.pytorch.datasets":{classification:[12,0,0,"-"],detection:[13,0,0,"-"],generic:[11,0,0,"-"],recommendation:[14,0,0,"-"],registry:[11,0,0,"-"],video:[15,0,0,"-"]},"sparseml.pytorch.datasets.classification":{cifar:[12,0,0,"-"],imagefolder:[12,0,0,"-"],imagenet:[12,0,0,"-"],imagenette:[12,0,0,"-"],mnist:[12,0,0,"-"]},"sparseml.pytorch.datasets.classification.cifar":{CIFAR100Dataset:[12,1,1,""],CIFAR10Dataset:[12,1,1,""]},"sparseml.pytorch.datasets.classification.imagefolder":{ImageFolderDataset:[12,1,1,""]},"sparseml.pytorch.datasets.classification.imagefolder.ImageFolderDataset":{num_classes:[12,2,1,""]},"sparseml.pytorch.datasets.classification.imagenet":{ImageNetDataset:[12,1,1,""]},"sparseml.pytorch.datasets.classification.imagenette":{ImagenetteDataset:[12,1,1,""],ImagenetteSize:[12,1,1,""],ImagewoofDataset:[12,1,1,""]},"sparseml.pytorch.datasets.classification.imagenette.ImagenetteSize":{full:[12,4,1,""],s160:[12,4,1,""],s320:[12,4,1,""]},"sparseml.pytorch.datasets.classification.mnist":{MNISTDataset:[12,1,1,""]},"sparseml.pytorch.datasets.detection":{coco:[13,0,0,"-"],helpers:[13,0,0,"-"],voc:[13,0,0,"-"]},"sparseml.pytorch.datasets.detection.coco":{CocoDetectionDataset:[13,1,1,""],coco_2017_yolo:[13,3,1,""]},"sparseml.pytorch.datasets.detection.coco.CocoDetectionDataset":{default_boxes:[13,2,1,""]},"sparseml.pytorch.datasets.detection.helpers":{AnnotatedImageTransforms:[13,1,1,""],bounding_box_and_labels_to_yolo_fmt:[13,3,1,""],random_horizontal_flip_image_and_annotations:[13,3,1,""],ssd_collate_fn:[13,3,1,""],ssd_random_crop_image_and_annotations:[13,3,1,""],yolo_collate_fn:[13,3,1,""]},"sparseml.pytorch.datasets.detection.helpers.AnnotatedImageTransforms":{transforms:[13,2,1,""]},"sparseml.pytorch.datasets.detection.voc":{VOCDetectionDataset:[13,1,1,""],VOCSegmentationDataset:[13,1,1,""]},"sparseml.pytorch.datasets.detection.voc.VOCDetectionDataset":{default_boxes:[13,2,1,""]},"sparseml.pytorch.datasets.generic":{CacheableDataset:[11,1,1,""],EarlyStopDataset:[11,1,1,""],NoisyDataset:[11,1,1,""],RandNDataset:[11,1,1,""]},"sparseml.pytorch.datasets.registry":{DatasetRegistry:[11,1,1,""]},"sparseml.pytorch.datasets.registry.DatasetRegistry":{attributes:[11,2,1,""],create:[11,2,1,""],register:[11,2,1,""]},"sparseml.pytorch.models":{classification:[17,0,0,"-"],detection:[18,0,0,"-"],external:[19,0,0,"-"],recommendation:[20,0,0,"-"],registry:[16,0,0,"-"]},"sparseml.pytorch.models.classification":{darknet:[17,0,0,"-"],efficientnet:[17,0,0,"-"],inception_v3:[17,0,0,"-"],mnist:[17,0,0,"-"],mobilenet:[17,0,0,"-"],mobilenet_v2:[17,0,0,"-"],resnet:[17,0,0,"-"],vgg:[17,0,0,"-"]},"sparseml.pytorch.models.classification.darknet":{DarkNet:[17,1,1,""],DarkNetSectionSettings:[17,1,1,""],darknet53:[17,3,1,""]},"sparseml.pytorch.models.classification.darknet.DarkNet":{as_classifier:[17,2,1,""],as_yolo_backbone:[17,2,1,""],create_section:[17,2,1,""],forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.efficientnet":{EfficientNet:[17,1,1,""],EfficientNetSectionSettings:[17,1,1,""],efficientnet_b0:[17,3,1,""],efficientnet_b1:[17,3,1,""],efficientnet_b2:[17,3,1,""],efficientnet_b3:[17,3,1,""],efficientnet_b4:[17,3,1,""],efficientnet_b5:[17,3,1,""],efficientnet_b6:[17,3,1,""],efficientnet_b7:[17,3,1,""]},"sparseml.pytorch.models.classification.efficientnet.EfficientNet":{create_section:[17,2,1,""],forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.inception_v3":{InceptionV3:[17,1,1,""],inception_v3:[17,3,1,""]},"sparseml.pytorch.models.classification.inception_v3.InceptionV3":{forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.mnist":{MnistNet:[17,1,1,""],mnist_net:[17,3,1,""]},"sparseml.pytorch.models.classification.mnist.MnistNet":{forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.mobilenet":{MobileNet:[17,1,1,""],MobileNetSectionSettings:[17,1,1,""],han_mobilenet:[17,3,1,""],mobilenet:[17,3,1,""]},"sparseml.pytorch.models.classification.mobilenet.MobileNet":{create_section:[17,2,1,""],forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.mobilenet_v2":{MobilenetV2:[17,1,1,""],MobilenetV2SectionSettings:[17,1,1,""],mobilenet_v2:[17,3,1,""],mobilenet_v2_width:[17,3,1,""]},"sparseml.pytorch.models.classification.mobilenet_v2.MobilenetV2":{create_section:[17,2,1,""],forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.resnet":{ResNet:[17,1,1,""],ResNetSectionSettings:[17,1,1,""],resnet101:[17,3,1,""],resnet101_2xwidth:[17,3,1,""],resnet152:[17,3,1,""],resnet18:[17,3,1,""],resnet34:[17,3,1,""],resnet50:[17,3,1,""],resnet50_2xwidth:[17,3,1,""],resnetv2_101:[17,3,1,""],resnetv2_152:[17,3,1,""],resnetv2_18:[17,3,1,""],resnetv2_34:[17,3,1,""],resnetv2_50:[17,3,1,""],resnext101:[17,3,1,""],resnext152:[17,3,1,""],resnext50:[17,3,1,""]},"sparseml.pytorch.models.classification.resnet.ResNet":{create_section:[17,2,1,""],forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.classification.vgg":{VGG:[17,1,1,""],vgg11:[17,3,1,""],vgg11bn:[17,3,1,""],vgg13:[17,3,1,""],vgg13bn:[17,3,1,""],vgg16:[17,3,1,""],vgg16bn:[17,3,1,""],vgg19:[17,3,1,""],vgg19bn:[17,3,1,""]},"sparseml.pytorch.models.classification.vgg.VGG":{create_section:[17,2,1,""],forward:[17,2,1,""],training:[17,4,1,""]},"sparseml.pytorch.models.detection":{ssd:[18,0,0,"-"],ssd_lite:[18,0,0,"-"],ssd_mobilenet:[18,0,0,"-"],ssd_resnet:[18,0,0,"-"],yolo_v3:[18,0,0,"-"]},"sparseml.pytorch.models.detection.ssd":{SSD300:[18,1,1,""],SSDBackbone:[18,1,1,""]},"sparseml.pytorch.models.detection.ssd.SSD300":{forward:[18,2,1,""],training:[18,4,1,""]},"sparseml.pytorch.models.detection.ssd.SSDBackbone":{get_feature_extractor:[18,2,1,""],out_channels:[18,2,1,""]},"sparseml.pytorch.models.detection.ssd_lite":{SSD300Lite:[18,1,1,""]},"sparseml.pytorch.models.detection.ssd_lite.SSD300Lite":{forward:[18,2,1,""],training:[18,4,1,""]},"sparseml.pytorch.models.detection.ssd_mobilenet":{SSD300MobileNetBackbone:[18,1,1,""],ssd300lite_mobilenetv2:[18,3,1,""]},"sparseml.pytorch.models.detection.ssd_mobilenet.SSD300MobileNetBackbone":{get_feature_extractor:[18,2,1,""],out_channels:[18,2,1,""]},"sparseml.pytorch.models.detection.ssd_resnet":{SSD300ResNetBackbone:[18,1,1,""],ssd300_resnet101:[18,3,1,""],ssd300_resnet152:[18,3,1,""],ssd300_resnet18:[18,3,1,""],ssd300_resnet34:[18,3,1,""],ssd300_resnet50:[18,3,1,""]},"sparseml.pytorch.models.detection.ssd_resnet.SSD300ResNetBackbone":{get_feature_extractor:[18,2,1,""],out_channels:[18,2,1,""]},"sparseml.pytorch.models.detection.yolo_v3":{YoloV3:[18,1,1,""],yolo_v3:[18,3,1,""]},"sparseml.pytorch.models.detection.yolo_v3.YoloV3":{forward:[18,2,1,""],training:[18,4,1,""]},"sparseml.pytorch.models.external":{torchvision:[19,0,0,"-"]},"sparseml.pytorch.models.registry":{ModelRegistry:[16,1,1,""]},"sparseml.pytorch.models.registry.ModelRegistry":{available_keys:[16,2,1,""],create:[16,2,1,""],create_zoo_model:[16,2,1,""],input_shape:[16,2,1,""],register:[16,2,1,""],register_wrapped_model_constructor:[16,2,1,""]},"sparseml.pytorch.nn":{activations:[21,0,0,"-"],fatrelu:[21,0,0,"-"],se:[21,0,0,"-"]},"sparseml.pytorch.nn.activations":{Hardswish:[21,1,1,""],ReLU6:[21,1,1,""],ReLU:[21,1,1,""],Swish:[21,1,1,""],create_activation:[21,3,1,""],hard_swish:[21,3,1,""],is_activation:[21,3,1,""],replace_activation:[21,3,1,""],swish:[21,3,1,""]},"sparseml.pytorch.nn.activations.Hardswish":{forward:[21,2,1,""],training:[21,4,1,""]},"sparseml.pytorch.nn.activations.ReLU":{inplace:[21,4,1,""]},"sparseml.pytorch.nn.activations.ReLU6":{inplace:[21,4,1,""],max_val:[21,4,1,""],min_val:[21,4,1,""]},"sparseml.pytorch.nn.activations.Swish":{forward:[21,2,1,""],training:[21,4,1,""]},"sparseml.pytorch.nn.fatrelu":{FATReLU:[21,1,1,""],convert_relus_to_fat:[21,3,1,""],fat_exp_relu:[21,3,1,""],fat_pw_relu:[21,3,1,""],fat_relu:[21,3,1,""],fat_sig_relu:[21,3,1,""],set_relu_to_fat:[21,3,1,""]},"sparseml.pytorch.nn.fatrelu.FATReLU":{channel_wise:[21,2,1,""],dynamic:[21,2,1,""],extra_repr:[21,2,1,""],forward:[21,2,1,""],get_threshold:[21,2,1,""],load_state_dict:[21,2,1,""],num_channels:[21,2,1,""],set_threshold:[21,2,1,""],training:[21,4,1,""]},"sparseml.pytorch.nn.se":{SqueezeExcite:[21,1,1,""]},"sparseml.pytorch.nn.se.SqueezeExcite":{forward:[21,2,1,""],training:[21,4,1,""]},"sparseml.pytorch.optim":{analyzer_as:[22,0,0,"-"],analyzer_module:[22,0,0,"-"],analyzer_pruning:[22,0,0,"-"],manager:[22,0,0,"-"],mask_creator_pruning:[22,0,0,"-"],mask_pruning:[22,0,0,"-"],modifier:[22,0,0,"-"],modifier_as:[22,0,0,"-"],modifier_epoch:[22,0,0,"-"],modifier_lr:[22,0,0,"-"],modifier_params:[22,0,0,"-"],modifier_pruning:[22,0,0,"-"],modifier_quantization:[22,0,0,"-"],modifier_regularizer:[22,0,0,"-"],optimizer:[22,0,0,"-"],quantization:[23,0,0,"-"],sensitivity_as:[22,0,0,"-"],sensitivity_lr:[22,0,0,"-"],sensitivity_pruning:[22,0,0,"-"]},"sparseml.pytorch.optim.analyzer_as":{ASResultType:[22,1,1,""],ModuleASAnalyzer:[22,1,1,""]},"sparseml.pytorch.optim.analyzer_as.ASResultType":{inputs_sample:[22,4,1,""],inputs_sparsity:[22,4,1,""],outputs_sample:[22,4,1,""],outputs_sparsity:[22,4,1,""]},"sparseml.pytorch.optim.analyzer_as.ModuleASAnalyzer":{analyze_layers:[22,2,1,""],clear:[22,2,1,""],dim:[22,2,1,""],disable:[22,2,1,""],enable:[22,2,1,""],enabled:[22,2,1,""],inputs_sample:[22,2,1,""],inputs_sample_max:[22,2,1,""],inputs_sample_mean:[22,2,1,""],inputs_sample_min:[22,2,1,""],inputs_sample_size:[22,2,1,""],inputs_sample_std:[22,2,1,""],inputs_sparsity:[22,2,1,""],inputs_sparsity_max:[22,2,1,""],inputs_sparsity_mean:[22,2,1,""],inputs_sparsity_min:[22,2,1,""],inputs_sparsity_std:[22,2,1,""],module:[22,2,1,""],outputs_sample:[22,2,1,""],outputs_sample_max:[22,2,1,""],outputs_sample_mean:[22,2,1,""],outputs_sample_min:[22,2,1,""],outputs_sample_size:[22,2,1,""],outputs_sample_std:[22,2,1,""],outputs_sparsity:[22,2,1,""],outputs_sparsity_max:[22,2,1,""],outputs_sparsity_mean:[22,2,1,""],outputs_sparsity_min:[22,2,1,""],outputs_sparsity_std:[22,2,1,""],results:[22,2,1,""],results_max:[22,2,1,""],results_mean:[22,2,1,""],results_min:[22,2,1,""],results_std:[22,2,1,""],track_inputs_sparsity:[22,2,1,""],track_outputs_sparsity:[22,2,1,""]},"sparseml.pytorch.optim.analyzer_module":{ModuleAnalyzer:[22,1,1,""]},"sparseml.pytorch.optim.analyzer_module.ModuleAnalyzer":{enabled:[22,2,1,""],ks_layer_descs:[22,2,1,""],layer_desc:[22,2,1,""],module:[22,2,1,""]},"sparseml.pytorch.optim.analyzer_pruning":{ModulePruningAnalyzer:[22,1,1,""]},"sparseml.pytorch.optim.analyzer_pruning.ModulePruningAnalyzer":{analyze_layers:[22,2,1,""],module:[22,2,1,""],name:[22,2,1,""],param:[22,2,1,""],param_name:[22,2,1,""],param_sparsity:[22,2,1,""],param_sparsity_dim:[22,2,1,""],tag:[22,2,1,""]},"sparseml.pytorch.optim.manager":{ScheduledModifierManager:[22,1,1,""],load_manager:[22,3,1,""]},"sparseml.pytorch.optim.manager.ScheduledModifierManager":{from_yaml:[22,2,1,""],initialize:[22,2,1,""],initialize_loggers:[22,2,1,""],load_state_dict:[22,2,1,""],loss_update:[22,2,1,""],optimizer_post_step:[22,2,1,""],optimizer_pre_step:[22,2,1,""],state_dict:[22,2,1,""],update:[22,2,1,""]},"sparseml.pytorch.optim.mask_creator_pruning":{BlockPruningMaskCreator:[22,1,1,""],DimensionSparsityMaskCreator:[22,1,1,""],GroupedPruningMaskCreator:[22,1,1,""],PruningMaskCreator:[22,1,1,""],UnstructuredPruningMaskCreator:[22,1,1,""],load_mask_creator:[22,3,1,""]},"sparseml.pytorch.optim.mask_creator_pruning.BlockPruningMaskCreator":{group_tensor:[22,2,1,""]},"sparseml.pytorch.optim.mask_creator_pruning.DimensionSparsityMaskCreator":{group_tensor:[22,2,1,""]},"sparseml.pytorch.optim.mask_creator_pruning.GroupedPruningMaskCreator":{create_sparsity_mask:[22,2,1,""],create_sparsity_mask_from_abs_threshold:[22,2,1,""],create_sparsity_mask_from_tensor:[22,2,1,""],get_grouping_fn:[22,2,1,""],group_tensor:[22,2,1,""]},"sparseml.pytorch.optim.mask_creator_pruning.PruningMaskCreator":{create_sparsity_mask:[22,2,1,""],create_sparsity_mask_from_abs_threshold:[22,2,1,""],create_sparsity_mask_from_tensor:[22,2,1,""]},"sparseml.pytorch.optim.mask_creator_pruning.UnstructuredPruningMaskCreator":{create_sparsity_mask:[22,2,1,""],create_sparsity_mask_from_abs_threshold:[22,2,1,""]},"sparseml.pytorch.optim.mask_pruning":{ModuleParamPruningMask:[22,1,1,""]},"sparseml.pytorch.optim.mask_pruning.ModuleParamPruningMask":{apply:[22,2,1,""],enabled:[22,2,1,""],layer:[22,2,1,""],layer_name:[22,2,1,""],mask_creator:[22,2,1,""],name:[22,2,1,""],param_data:[22,2,1,""],param_grad:[22,2,1,""],param_init:[22,2,1,""],param_mask:[22,2,1,""],param_name:[22,2,1,""],param_unmasked:[22,2,1,""],reset:[22,2,1,""],set_param_data:[22,2,1,""],set_param_mask:[22,2,1,""],set_param_mask_from_abs_threshold:[22,2,1,""],set_param_mask_from_sparsity:[22,2,1,""],set_param_mask_from_weights:[22,2,1,""],store_init:[22,2,1,""],store_unmasked:[22,2,1,""],track_grad_mom:[22,2,1,""]},"sparseml.pytorch.optim.modifier":{Modifier:[22,1,1,""],ModifierProp:[22,1,1,""],PyTorchModifierYAML:[22,1,1,""],ScheduledModifier:[22,1,1,""],ScheduledUpdateModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier.Modifier":{initialize:[22,2,1,""],initialize_loggers:[22,2,1,""],load_list:[22,2,1,""],load_obj:[22,2,1,""],log_update:[22,2,1,""],loggers:[22,4,1,""],loggers_initialized:[22,4,1,""],loss_update:[22,2,1,""],optimizer_post_step:[22,2,1,""],optimizer_pre_step:[22,2,1,""],update:[22,2,1,""]},"sparseml.pytorch.optim.modifier.ModifierProp":{getter:[22,2,1,""],no_serialize_val:[22,2,1,""],restrictions:[22,2,1,""],serializable:[22,2,1,""],setter:[22,2,1,""]},"sparseml.pytorch.optim.modifier.ScheduledModifier":{end_pending:[22,2,1,""],ended:[22,4,1,""],log_update:[22,2,1,""],scheduled_log_update:[22,2,1,""],scheduled_update:[22,2,1,""],start_pending:[22,2,1,""],started:[22,4,1,""],update:[22,2,1,""],update_ready:[22,2,1,""]},"sparseml.pytorch.optim.modifier.ScheduledUpdateModifier":{update:[22,2,1,""],update_ready:[22,2,1,""]},"sparseml.pytorch.optim.modifier_as":{ASRegModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_as.ASRegModifier":{alpha:[22,4,1,""],initialize:[22,2,1,""],layer_normalized:[22,4,1,""],layers:[22,4,1,""],loss_update:[22,2,1,""],optimizer_post_step:[22,2,1,""],reg_func:[22,4,1,""],reg_tens:[22,4,1,""],update:[22,2,1,""],validate:[22,2,1,""]},"sparseml.pytorch.optim.modifier_epoch":{EpochRangeModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_lr":{LearningRateModifier:[22,1,1,""],SetLearningRateModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_lr.LearningRateModifier":{constant_logging:[22,4,1,""],log_update:[22,2,1,""],update:[22,2,1,""],validate:[22,2,1,""]},"sparseml.pytorch.optim.modifier_lr.SetLearningRateModifier":{applied_learning_rate:[22,4,1,""],constant_logging:[22,4,1,""],log_update:[22,2,1,""],update:[22,2,1,""]},"sparseml.pytorch.optim.modifier_params":{GradualParamModifier:[22,1,1,""],SetParamModifier:[22,1,1,""],TrainableParamsModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_params.GradualParamModifier":{final_val:[22,4,1,""],init_val:[22,4,1,""],initialize:[22,2,1,""],inter_func:[22,4,1,""],params:[22,4,1,""],params_strict:[22,4,1,""],update:[22,2,1,""],validate:[22,2,1,""]},"sparseml.pytorch.optim.modifier_params.SetParamModifier":{initialize:[22,2,1,""],params:[22,4,1,""],params_strict:[22,4,1,""],update:[22,2,1,""],val:[22,4,1,""]},"sparseml.pytorch.optim.modifier_params.TrainableParamsModifier":{initialize:[22,2,1,""],params:[22,4,1,""],params_strict:[22,4,1,""],trainable:[22,4,1,""],update:[22,2,1,""]},"sparseml.pytorch.optim.modifier_pruning":{ConstantPruningModifier:[22,1,1,""],GMPruningModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_pruning.ConstantPruningModifier":{from_sparse_model:[22,2,1,""],initialize:[22,2,1,""],load_state_dict:[22,2,1,""],log_update:[22,2,1,""],optimizer_post_step:[22,2,1,""],params:[22,4,1,""],state_dict:[22,2,1,""],update:[22,2,1,""]},"sparseml.pytorch.optim.modifier_pruning.GMPruningModifier":{applied_sparsity:[22,4,1,""],final_sparsity:[22,4,1,""],init_sparsity:[22,4,1,""],initialize:[22,2,1,""],inter_func:[22,4,1,""],leave_enabled:[22,4,1,""],load_state_dict:[22,2,1,""],log_update:[22,2,1,""],mask_type:[22,4,1,""],optimizer_post_step:[22,2,1,""],params:[22,4,1,""],state_dict:[22,2,1,""],update:[22,2,1,""],validate:[22,2,1,""]},"sparseml.pytorch.optim.modifier_quantization":{QuantizationModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_quantization.QuantizationModifier":{disable_quantization_observer_epoch:[22,4,1,""],freeze_bn_stats_epoch:[22,4,1,""],initialize:[22,2,1,""],model_fuse_fn_name:[22,4,1,""],submodules:[22,4,1,""],update:[22,2,1,""],update_ready:[22,2,1,""]},"sparseml.pytorch.optim.modifier_regularizer":{SetWeightDecayModifier:[22,1,1,""]},"sparseml.pytorch.optim.modifier_regularizer.SetWeightDecayModifier":{constant_logging:[22,4,1,""],log_update:[22,2,1,""],param_groups:[22,4,1,""],update:[22,2,1,""],weight_decay:[22,4,1,""]},"sparseml.pytorch.optim.optimizer":{ScheduledOptimizer:[22,1,1,""]},"sparseml.pytorch.optim.optimizer.ScheduledOptimizer":{add_param_group:[22,2,1,""],adjust_current_step:[22,2,1,""],learning_rate:[22,2,1,""],load_manager_state_dict:[22,2,1,""],load_state_dict:[22,2,1,""],loss_update:[22,2,1,""],manager:[22,2,1,""],manager_state_dict:[22,2,1,""],param_groups:[22,2,1,""],state_dict:[22,2,1,""],step:[22,2,1,""],zero_grad:[22,2,1,""]},"sparseml.pytorch.optim.quantization":{helpers:[23,0,0,"-"],quantize_qat_export:[23,0,0,"-"]},"sparseml.pytorch.optim.quantization.helpers":{add_quant_dequant:[23,3,1,""],fuse_module_conv_bn_relus:[23,3,1,""],get_qat_qconfig:[23,3,1,""]},"sparseml.pytorch.optim.quantization.quantize_qat_export":{QuantizationParams:[23,1,1,""],get_quantization_params:[23,3,1,""],quantize_torch_qat_export:[23,3,1,""]},"sparseml.pytorch.optim.quantization.quantize_qat_export.QuantizationParams":{scale:[23,2,1,""],target:[23,2,1,""],zero_point:[23,2,1,""]},"sparseml.pytorch.optim.sensitivity_as":{ASLayerTracker:[22,1,1,""],LayerBoostResults:[22,1,1,""],ModuleASOneShootBooster:[22,1,1,""]},"sparseml.pytorch.optim.sensitivity_as.ASLayerTracker":{clear:[22,2,1,""],disable:[22,2,1,""],enable:[22,2,1,""],tracked_input:[22,2,1,""],tracked_output:[22,2,1,""]},"sparseml.pytorch.optim.sensitivity_as.LayerBoostResults":{baseline_as:[22,2,1,""],baseline_loss:[22,2,1,""],boosted_as:[22,2,1,""],boosted_loss:[22,2,1,""],name:[22,2,1,""],threshold:[22,2,1,""]},"sparseml.pytorch.optim.sensitivity_as.ModuleASOneShootBooster":{run_layers:[22,2,1,""]},"sparseml.pytorch.optim.sensitivity_lr":{default_exponential_check_lrs:[22,3,1,""],lr_loss_sensitivity:[22,3,1,""]},"sparseml.pytorch.optim.sensitivity_pruning":{model_prunability_magnitude:[22,3,1,""],pruning_loss_sens_magnitude:[22,3,1,""],pruning_loss_sens_one_shot:[22,3,1,""]},"sparseml.pytorch.utils":{benchmarker:[24,0,0,"-"],exporter:[24,0,0,"-"],helpers:[24,0,0,"-"],logger:[24,0,0,"-"],loss:[24,0,0,"-"],model:[24,0,0,"-"],module:[24,0,0,"-"],ssd_helpers:[24,0,0,"-"],yolo_helpers:[24,0,0,"-"]},"sparseml.pytorch.utils.benchmarker":{BatchBenchmarkResults:[24,1,1,""],ModuleBenchmarker:[24,1,1,""]},"sparseml.pytorch.utils.benchmarker.BatchBenchmarkResults":{add:[24,2,1,""],batch_size:[24,2,1,""],e2e_batch_seconds:[24,2,1,""],e2e_batch_timings:[24,2,1,""],e2e_batches_per_second:[24,2,1,""],e2e_item_seconds:[24,2,1,""],e2e_items_per_second:[24,2,1,""],model_batch_seconds:[24,2,1,""],model_batch_timings:[24,2,1,""],model_batches_per_second:[24,2,1,""],model_item_seconds:[24,2,1,""],model_items_per_second:[24,2,1,""]},"sparseml.pytorch.utils.benchmarker.ModuleBenchmarker":{run_batches_on_device:[24,2,1,""]},"sparseml.pytorch.utils.exporter":{ModuleExporter:[24,1,1,""]},"sparseml.pytorch.utils.exporter.ModuleExporter":{export_onnx:[24,2,1,""],export_pytorch:[24,2,1,""],export_samples:[24,2,1,""]},"sparseml.pytorch.utils.helpers":{NamedLayerParam:[24,1,1,""],any_str_or_regex_matches_param_name:[24,3,1,""],default_device:[24,3,1,""],early_stop_data_loader:[24,3,1,""],get_conv_layers:[24,3,1,""],get_layer:[24,3,1,""],get_layer_param:[24,3,1,""],get_linear_layers:[24,3,1,""],get_named_layers_and_params_by_regex:[24,3,1,""],get_optim_learning_rate:[24,3,1,""],get_prunable_layers:[24,3,1,""],get_terminal_layers:[24,3,1,""],infinite_data_loader:[24,3,1,""],mask_difference:[24,3,1,""],set_deterministic_seeds:[24,3,1,""],set_optim_learning_rate:[24,3,1,""],tensor_density:[24,3,1,""],tensor_export:[24,3,1,""],tensor_sample:[24,3,1,""],tensor_sparsity:[24,3,1,""],tensors_batch_size:[24,3,1,""],tensors_export:[24,3,1,""],tensors_module_forward:[24,3,1,""],tensors_to_device:[24,3,1,""],tensors_to_precision:[24,3,1,""],torch_distributed_zero_first:[24,3,1,""]},"sparseml.pytorch.utils.helpers.NamedLayerParam":{layer:[24,2,1,""],layer_name:[24,2,1,""],param:[24,2,1,""],param_name:[24,2,1,""]},"sparseml.pytorch.utils.logger":{PyTorchLogger:[24,1,1,""],PythonLogger:[24,1,1,""],TensorBoardLogger:[24,1,1,""]},"sparseml.pytorch.utils.logger.PyTorchLogger":{log_histogram:[24,2,1,""],log_histogram_raw:[24,2,1,""],log_hyperparams:[24,2,1,""],log_scalar:[24,2,1,""],log_scalars:[24,2,1,""],name:[24,2,1,""]},"sparseml.pytorch.utils.logger.PythonLogger":{log_histogram:[24,2,1,""],log_histogram_raw:[24,2,1,""],log_hyperparams:[24,2,1,""],log_scalar:[24,2,1,""],log_scalars:[24,2,1,""]},"sparseml.pytorch.utils.logger.TensorBoardLogger":{log_histogram:[24,2,1,""],log_histogram_raw:[24,2,1,""],log_hyperparams:[24,2,1,""],log_scalar:[24,2,1,""],log_scalars:[24,2,1,""]},"sparseml.pytorch.utils.loss":{Accuracy:[24,1,1,""],BinaryCrossEntropyLossWrapper:[24,1,1,""],CrossEntropyLossWrapper:[24,1,1,""],InceptionCrossEntropyLossWrapper:[24,1,1,""],KDLossWrapper:[24,1,1,""],KDSettings:[24,1,1,""],LossWrapper:[24,1,1,""],SSDLossWrapper:[24,1,1,""],TopKAccuracy:[24,1,1,""],YoloLossWrapper:[24,1,1,""]},"sparseml.pytorch.utils.loss.Accuracy":{calculate:[24,2,1,""],forward:[24,2,1,""],training:[24,4,1,""]},"sparseml.pytorch.utils.loss.InceptionCrossEntropyLossWrapper":{get_preds:[24,2,1,""],loss:[24,2,1,""]},"sparseml.pytorch.utils.loss.KDLossWrapper":{forward:[24,2,1,""],get_inputs:[24,2,1,""]},"sparseml.pytorch.utils.loss.KDSettings":{contradict_hinton:[24,2,1,""],teacher:[24,2,1,""],temp_student:[24,2,1,""],temp_teacher:[24,2,1,""],weight:[24,2,1,""]},"sparseml.pytorch.utils.loss.LossWrapper":{available_losses:[24,2,1,""],forward:[24,2,1,""],get_labels:[24,2,1,""],get_preds:[24,2,1,""]},"sparseml.pytorch.utils.loss.SSDLossWrapper":{get_preds:[24,2,1,""],loss:[24,2,1,""]},"sparseml.pytorch.utils.loss.TopKAccuracy":{calculate:[24,2,1,""],forward:[24,2,1,""],training:[24,4,1,""]},"sparseml.pytorch.utils.loss.YoloLossWrapper":{forward:[24,2,1,""],get_preds:[24,2,1,""],loss:[24,2,1,""]},"sparseml.pytorch.utils.model":{device_to_name_ids:[24,3,1,""],is_parallel_model:[24,3,1,""],load_epoch:[24,3,1,""],load_model:[24,3,1,""],load_optimizer:[24,3,1,""],model_to_device:[24,3,1,""],parallelize_model:[24,3,1,""],save_model:[24,3,1,""]},"sparseml.pytorch.utils.module":{ModuleDeviceContext:[24,1,1,""],ModuleRunFuncs:[24,1,1,""],ModuleRunHooks:[24,1,1,""],ModuleRunResults:[24,1,1,""],ModuleTester:[24,1,1,""],ModuleTrainer:[24,1,1,""],def_model_backward:[24,3,1,""]},"sparseml.pytorch.utils.module.ModuleDeviceContext":{default_context:[24,2,1,""],use_mixed_precision:[24,2,1,""],world_size:[24,2,1,""]},"sparseml.pytorch.utils.module.ModuleRunFuncs":{batch_size:[24,2,1,""],copy:[24,2,1,""],model_backward:[24,2,1,""],model_forward:[24,2,1,""],to_device:[24,2,1,""]},"sparseml.pytorch.utils.module.ModuleRunHooks":{invoke_batch_backward:[24,2,1,""],invoke_batch_end:[24,2,1,""],invoke_batch_forward:[24,2,1,""],invoke_batch_loss:[24,2,1,""],invoke_batch_start:[24,2,1,""],register_batch_backward_hook:[24,2,1,""],register_batch_end_hook:[24,2,1,""],register_batch_forward_hook:[24,2,1,""],register_batch_loss_hook:[24,2,1,""],register_batch_start_hook:[24,2,1,""]},"sparseml.pytorch.utils.module.ModuleRunResults":{append:[24,2,1,""],result:[24,2,1,""],result_list_tensor:[24,2,1,""],result_mean:[24,2,1,""],result_std:[24,2,1,""],results:[24,2,1,""]},"sparseml.pytorch.utils.module.ModuleTrainer":{num_accumulated_batches:[24,2,1,""],optim_closure:[24,2,1,""],optimizer:[24,2,1,""]},"sparseml.pytorch.utils.ssd_helpers":{DefaultBoxes:[24,1,1,""],MeanAveragePrecision:[24,1,1,""],get_default_boxes_300:[24,3,1,""],ssd_random_crop:[24,3,1,""]},"sparseml.pytorch.utils.ssd_helpers.DefaultBoxes":{as_ltrb:[24,2,1,""],as_xywh:[24,2,1,""],decode_output_batch:[24,2,1,""],encode_image_box_labels:[24,2,1,""],num_default_boxes:[24,2,1,""],scale_wh:[24,2,1,""],scale_xy:[24,2,1,""]},"sparseml.pytorch.utils.ssd_helpers.MeanAveragePrecision":{batch_forward:[24,2,1,""],calculate_map:[24,2,1,""],clear:[24,2,1,""],get_recall_levels:[24,2,1,""]},"sparseml.pytorch.utils.yolo_helpers":{YoloGrids:[24,1,1,""],box_giou:[24,3,1,""],build_targets:[24,3,1,""],get_output_grid_shapes:[24,3,1,""],postprocess_yolo:[24,3,1,""],yolo_v3_anchor_groups:[24,3,1,""]},"sparseml.pytorch.utils.yolo_helpers.YoloGrids":{get_anchor_grid:[24,2,1,""],get_grid:[24,2,1,""],num_anchor_grids:[24,2,1,""]},"sparseml.tensorflow_v1":{datasets:[26,0,0,"-"],models:[28,0,0,"-"],nn:[30,0,0,"-"],optim:[31,0,0,"-"],utils:[32,0,0,"-"]},"sparseml.tensorflow_v1.datasets":{classification:[27,0,0,"-"],dataset:[26,0,0,"-"],helpers:[26,0,0,"-"],registry:[26,0,0,"-"]},"sparseml.tensorflow_v1.datasets.classification":{cifar:[27,0,0,"-"],imagefolder:[27,0,0,"-"],imagenet:[27,0,0,"-"],imagenette:[27,0,0,"-"]},"sparseml.tensorflow_v1.datasets.classification.cifar":{Cifar100DataSet:[27,1,1,""],Cifar10DataSet:[27,1,1,""]},"sparseml.tensorflow_v1.datasets.classification.cifar.Cifar100DataSet":{name_scope:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.classification.cifar.Cifar10DataSet":{name_scope:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagefolder":{ImageFolderDataset:[27,1,1,""],SplitsTransforms:[27,1,1,""],imagenet_normalizer:[27,3,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagefolder.ImageFolderDataset":{creator:[27,2,1,""],format_iterator_batch:[27,2,1,""],image_size:[27,2,1,""],name_scope:[27,2,1,""],num_classes:[27,2,1,""],num_images:[27,2,1,""],post_resize_transforms:[27,2,1,""],pre_resize_transforms:[27,2,1,""],processor:[27,2,1,""],root:[27,2,1,""],train:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagefolder.SplitsTransforms":{train:[27,2,1,""],val:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagenet":{ImageNetDataset:[27,1,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagenet.ImageNetDataset":{name_scope:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagenette":{ImagenetteDataset:[27,1,1,""],ImagenetteSize:[27,1,1,""],ImagewoofDataset:[27,1,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagenette.ImagenetteDataset":{name_scope:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagenette.ImagenetteSize":{full:[27,4,1,""],s160:[27,4,1,""],s320:[27,4,1,""]},"sparseml.tensorflow_v1.datasets.classification.imagenette.ImagewoofDataset":{name_scope:[27,2,1,""]},"sparseml.tensorflow_v1.datasets.dataset":{Dataset:[26,1,1,""],create_split_iterators_handle:[26,3,1,""]},"sparseml.tensorflow_v1.datasets.dataset.Dataset":{build:[26,2,1,""],build_input_fn:[26,2,1,""],creator:[26,2,1,""],format_iterator_batch:[26,2,1,""],name_scope:[26,2,1,""],processor:[26,2,1,""]},"sparseml.tensorflow_v1.datasets.helpers":{center_square_crop:[26,3,1,""],random_scaling_crop:[26,3,1,""],resize:[26,3,1,""]},"sparseml.tensorflow_v1.datasets.registry":{DatasetRegistry:[26,1,1,""]},"sparseml.tensorflow_v1.datasets.registry.DatasetRegistry":{attributes:[26,2,1,""],create:[26,2,1,""],register:[26,2,1,""]},"sparseml.tensorflow_v1.models":{classification:[29,0,0,"-"],estimator:[28,0,0,"-"],registry:[28,0,0,"-"]},"sparseml.tensorflow_v1.models.classification":{mnist:[29,0,0,"-"],mobilenet:[29,0,0,"-"],mobilenet_v2:[29,0,0,"-"],resnet:[29,0,0,"-"],vgg:[29,0,0,"-"]},"sparseml.tensorflow_v1.models.classification.mnist":{mnist_net:[29,3,1,""]},"sparseml.tensorflow_v1.models.classification.mobilenet":{MobileNetSection:[29,1,1,""],mobilenet:[29,3,1,""],mobilenet_const:[29,3,1,""]},"sparseml.tensorflow_v1.models.classification.mobilenet.MobileNetSection":{create:[29,2,1,""]},"sparseml.tensorflow_v1.models.classification.mobilenet_v2":{MobileNetV2Section:[29,1,1,""],mobilenet_v2:[29,3,1,""],mobilenet_v2_const:[29,3,1,""],mobilenet_v2_width:[29,3,1,""]},"sparseml.tensorflow_v1.models.classification.mobilenet_v2.MobileNetV2Section":{create:[29,2,1,""]},"sparseml.tensorflow_v1.models.classification.resnet":{ResNetSection:[29,1,1,""],resnet101:[29,3,1,""],resnet152:[29,3,1,""],resnet18:[29,3,1,""],resnet20:[29,3,1,""],resnet34:[29,3,1,""],resnet50:[29,3,1,""],resnet_const:[29,3,1,""]},"sparseml.tensorflow_v1.models.classification.resnet.ResNetSection":{create:[29,2,1,""]},"sparseml.tensorflow_v1.models.classification.vgg":{VGGSection:[29,1,1,""],vgg11:[29,3,1,""],vgg11bn:[29,3,1,""],vgg13:[29,3,1,""],vgg13bn:[29,3,1,""],vgg16:[29,3,1,""],vgg16bn:[29,3,1,""],vgg19:[29,3,1,""],vgg19bn:[29,3,1,""],vgg_const:[29,3,1,""]},"sparseml.tensorflow_v1.models.classification.vgg.VGGSection":{create:[29,2,1,""]},"sparseml.tensorflow_v1.models.estimator":{ClassificationEstimatorModelFn:[28,1,1,""],EstimatorModelFn:[28,1,1,""]},"sparseml.tensorflow_v1.models.estimator.ClassificationEstimatorModelFn":{create_loss:[28,2,1,""],create_metric_update_ops_hook:[28,2,1,""],create_metrics:[28,2,1,""],create_modifier_ops_and_update_hook:[28,2,1,""],create_predictions:[28,2,1,""],create_scaffold:[28,2,1,""],create_summary_op:[28,2,1,""],create_train_summary_hook:[28,2,1,""],create_training_op:[28,2,1,""]},"sparseml.tensorflow_v1.models.estimator.EstimatorModelFn":{create:[28,2,1,""],create_loss:[28,2,1,""],create_metric_update_ops_hook:[28,2,1,""],create_metrics:[28,2,1,""],create_modifier_ops_and_update_hook:[28,2,1,""],create_predictions:[28,2,1,""],create_scaffold:[28,2,1,""],create_train_summary_hook:[28,2,1,""],create_training_op:[28,2,1,""]},"sparseml.tensorflow_v1.models.registry":{ModelRegistry:[28,1,1,""]},"sparseml.tensorflow_v1.models.registry.ModelRegistry":{available_keys:[28,2,1,""],create:[28,2,1,""],create_estimator:[28,2,1,""],create_zoo_model:[28,2,1,""],input_shape:[28,2,1,""],load_pretrained:[28,2,1,""],register:[28,2,1,""],saver:[28,2,1,""]},"sparseml.tensorflow_v1.nn":{layers:[30,0,0,"-"]},"sparseml.tensorflow_v1.nn.layers":{activation:[30,3,1,""],conv2d:[30,3,1,""],conv2d_block:[30,3,1,""],dense_block:[30,3,1,""],depthwise_conv2d_block:[30,3,1,""],fc:[30,3,1,""],pool2d:[30,3,1,""]},"sparseml.tensorflow_v1.optim":{analyzer_module:[31,0,0,"-"],manager:[31,0,0,"-"],mask_creator_pruning:[31,0,0,"-"],mask_pruning:[31,0,0,"-"],modifier:[31,0,0,"-"],modifier_epoch:[31,0,0,"-"],modifier_lr:[31,0,0,"-"],modifier_params:[31,0,0,"-"],modifier_pruning:[31,0,0,"-"],schedule_lr:[31,0,0,"-"],sensitivity_pruning:[31,0,0,"-"]},"sparseml.tensorflow_v1.optim.analyzer_module":{analyze_module:[31,3,1,""]},"sparseml.tensorflow_v1.optim.manager":{ScheduledModifierManager:[31,1,1,""]},"sparseml.tensorflow_v1.optim.manager.ScheduledModifierManager":{RECAL_UPDATE:[31,4,1,""],complete_graph:[31,2,1,""],create_ops:[31,2,1,""],from_yaml:[31,2,1,""],initialize_session:[31,2,1,""],modifiers_to_string_lines:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_creator_pruning":{BlockPruningMaskCreator:[31,1,1,""],DimensionPruningMaskCreator:[31,1,1,""],GroupedPruningMaskCreator:[31,1,1,""],PruningMaskCreator:[31,1,1,""],UnstructuredPruningMaskCreator:[31,1,1,""],load_mask_creator:[31,3,1,""]},"sparseml.tensorflow_v1.optim.mask_creator_pruning.BlockPruningMaskCreator":{group_tensor:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_creator_pruning.DimensionPruningMaskCreator":{group_tensor:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_creator_pruning.GroupedPruningMaskCreator":{create_sparsity_mask:[31,2,1,""],get_grouping_op:[31,2,1,""],get_mask_initializer:[31,2,1,""],group_tensor:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_creator_pruning.PruningMaskCreator":{create_sparsity_mask:[31,2,1,""],get_mask_initializer:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_creator_pruning.UnstructuredPruningMaskCreator":{create_sparsity_mask:[31,2,1,""],get_mask_initializer:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_pruning":{PruningOpVars:[31,1,1,""],PruningScope:[31,1,1,""],apply_op_vars_masks:[31,3,1,""],create_graph_ops_pruning:[31,3,1,""],create_ks_schedule_ops:[31,3,1,""],create_ks_scheduled_constant_graph_ops:[31,3,1,""],create_op_pruning:[31,3,1,""],create_summaries_pruning:[31,3,1,""],get_or_create_graph_ops_pruning:[31,3,1,""],get_or_create_ks_schedule_ops:[31,3,1,""],get_or_create_ks_scheduled_graph_ops:[31,3,1,""]},"sparseml.tensorflow_v1.optim.mask_pruning.PruningOpVars":{mask:[31,2,1,""],masked:[31,2,1,""],op:[31,2,1,""],op_input:[31,2,1,""],update:[31,2,1,""]},"sparseml.tensorflow_v1.optim.mask_pruning.PruningScope":{NM_KS:[31,4,1,""],NM_KS_OPS:[31,4,1,""],OPS:[31,4,1,""],OPS_INPUT:[31,4,1,""],OPS_SCHEDULE:[31,4,1,""],OPS_SPARSITY:[31,4,1,""],OPS_SUMMARY:[31,4,1,""],OPS_UPDATE:[31,4,1,""],OP_COND_UPDATE:[31,4,1,""],OP_MASKED_VAR:[31,4,1,""],OP_MASK_ASSIGN:[31,4,1,""],OP_MASK_UPDATE:[31,4,1,""],OP_MASK_UPDATE_NO_OP:[31,4,1,""],OP_PRUNE_VARS_ASSIGN:[31,4,1,""],OP_SAVE:[31,4,1,""],OP_SPARSITY:[31,4,1,""],OP_UPDATE_READY:[31,4,1,""],OP_WEIGHT_UPDATE:[31,4,1,""],VAR_MASK:[31,4,1,""],VAR_THRESHOLD:[31,4,1,""],collection_name:[31,2,1,""],general:[31,2,1,""],model:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier":{Modifier:[31,1,1,""],ModifierProp:[31,1,1,""],ModifierSessionRunHook:[31,1,1,""],ScheduledModifier:[31,1,1,""],ScheduledUpdateModifier:[31,1,1,""],TensorFlowModifierYAML:[31,1,1,""]},"sparseml.tensorflow_v1.optim.modifier.Modifier":{complete_graph:[31,2,1,""],create_ops:[31,2,1,""],get_group:[31,2,1,""],initialize_session:[31,2,1,""],load_list:[31,2,1,""],load_obj:[31,2,1,""],modify_estimator:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier.ModifierProp":{getter:[31,2,1,""],no_serialize_val:[31,2,1,""],restrictions:[31,2,1,""],serializable:[31,2,1,""],setter:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier.ModifierSessionRunHook":{after_run:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier.ScheduledModifier":{start_end_steps:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier.ScheduledUpdateModifier":{update_frequency_steps:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier_epoch":{EpochRangeModifier:[31,1,1,""]},"sparseml.tensorflow_v1.optim.modifier_lr":{GroupLearningRateModifier:[31,1,1,""],LearningRateModifier:[31,1,1,""],SetLearningRateModifier:[31,1,1,""]},"sparseml.tensorflow_v1.optim.modifier_lr.GroupLearningRateModifier":{create_ops:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier_lr.LearningRateModifier":{create_ops:[31,2,1,""],get_group:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier_lr.SetLearningRateModifier":{create_ops:[31,2,1,""],get_group:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier_params":{TrainableParamsModifier:[31,1,1,""]},"sparseml.tensorflow_v1.optim.modifier_params.TrainableParamsModifier":{complete_graph:[31,2,1,""],create_ops:[31,2,1,""],params:[31,4,1,""],params_strict:[31,4,1,""],trainable:[31,4,1,""],validate:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier_pruning":{ConstantPruningModifier:[31,1,1,""],GMPruningModifier:[31,1,1,""]},"sparseml.tensorflow_v1.optim.modifier_pruning.ConstantPruningModifier":{complete_graph:[31,2,1,""],create_ops:[31,2,1,""],initialize_session:[31,2,1,""],ks_group:[31,4,1,""],params:[31,4,1,""],prune_op_vars:[31,2,1,""],sparsity:[31,2,1,""],update_ready:[31,2,1,""]},"sparseml.tensorflow_v1.optim.modifier_pruning.GMPruningModifier":{complete_graph:[31,2,1,""],create_ops:[31,2,1,""],exponent:[31,4,1,""],final_sparsity:[31,4,1,""],init_sparsity:[31,4,1,""],initialize_session:[31,2,1,""],inter_func:[31,4,1,""],ks_group:[31,4,1,""],leave_enabled:[31,4,1,""],mask_type:[31,4,1,""],params:[31,4,1,""],prune_op_vars:[31,2,1,""],sparsity:[31,2,1,""],update_ready:[31,2,1,""],validate:[31,2,1,""]},"sparseml.tensorflow_v1.optim.schedule_lr":{multi_step_lr_schedule:[31,3,1,""],step_lr_schedule:[31,3,1,""]},"sparseml.tensorflow_v1.optim.sensitivity_pruning":{SparsePruningOpVars:[31,1,1,""],pruning_loss_sens_magnitude:[31,3,1,""],pruning_loss_sens_one_shot:[31,3,1,""],pruning_loss_sens_op_vars:[31,3,1,""]},"sparseml.tensorflow_v1.optim.sensitivity_pruning.SparsePruningOpVars":{op_vars:[31,2,1,""],sparsity:[31,2,1,""]},"sparseml.tensorflow_v1.utils":{exporter:[32,0,0,"-"],helpers:[32,0,0,"-"],loss:[32,0,0,"-"],nets_utils:[32,0,0,"-"],summary:[32,0,0,"-"],variable:[32,0,0,"-"]},"sparseml.tensorflow_v1.utils.exporter":{GraphExporter:[32,1,1,""],default_onnx_opset:[32,3,1,""]},"sparseml.tensorflow_v1.utils.exporter.GraphExporter":{checkpoint_path:[32,2,1,""],export_checkpoint:[32,2,1,""],export_named_samples:[32,2,1,""],export_onnx:[32,2,1,""],export_pb:[32,2,1,""],export_samples:[32,2,1,""],onnx_path:[32,2,1,""],pb_path:[32,2,1,""],pb_to_onnx:[32,2,1,""],sample_inputs_path:[32,2,1,""],sample_outputs_path:[32,2,1,""],tensorflow_path:[32,2,1,""]},"sparseml.tensorflow_v1.utils.helpers":{tf_compat_div:[32,3,1,""]},"sparseml.tensorflow_v1.utils.loss":{accuracy:[32,3,1,""],batch_cross_entropy_loss:[32,3,1,""]},"sparseml.tensorflow_v1.utils.nets_utils":{get_gan_network_fn:[32,3,1,""],get_model_scope:[32,3,1,""],get_network_fn:[32,3,1,""],mobilenet_v1_arg_scope:[32,3,1,""]},"sparseml.tensorflow_v1.utils.summary":{write_simple_summary:[32,3,1,""]},"sparseml.tensorflow_v1.utils.variable":{any_str_or_regex_matches_tensor_name:[32,3,1,""],clean_tensor_name:[32,3,1,""],eval_tensor_density:[32,3,1,""],eval_tensor_sparsity:[32,3,1,""],get_op_input_var:[32,3,1,""],get_op_var_index:[32,3,1,""],get_ops_and_inputs_by_name_or_regex:[32,3,1,""],get_prunable_ops:[32,3,1,""],get_tensor_var:[32,3,1,""],is_prunable_op:[32,3,1,""]},"sparseml.utils":{datasets:[34,0,0,"-"],frameworks:[33,0,0,"-"],helpers:[33,0,0,"-"],singleton:[33,0,0,"-"],worker:[33,0,0,"-"],wrapper:[33,0,0,"-"]},"sparseml.utils.datasets":{helpers:[34,0,0,"-"],imagenet:[34,0,0,"-"],imagenette:[34,0,0,"-"]},"sparseml.utils.datasets.helpers":{default_dataset_path:[34,3,1,""]},"sparseml.utils.datasets.imagenette":{ImagenetteDownloader:[34,1,1,""],ImagenetteSize:[34,1,1,""],ImagewoofDownloader:[34,1,1,""]},"sparseml.utils.datasets.imagenette.ImagenetteDownloader":{dataset_size:[34,2,1,""],download:[34,2,1,""],download_root:[34,2,1,""],extracted_root:[34,2,1,""],split_root:[34,2,1,""]},"sparseml.utils.datasets.imagenette.ImagenetteSize":{full:[34,4,1,""],s160:[34,4,1,""],s320:[34,4,1,""]},"sparseml.utils.datasets.imagenette.ImagewoofDownloader":{dataset_size:[34,2,1,""],download:[34,2,1,""],download_root:[34,2,1,""],extracted_root:[34,2,1,""],split_root:[34,2,1,""]},"sparseml.utils.helpers":{NumpyArrayBatcher:[33,1,1,""],bucket_iterable:[33,3,1,""],clean_path:[33,3,1,""],convert_to_bool:[33,3,1,""],create_dirs:[33,3,1,""],create_parent_dirs:[33,3,1,""],create_unique_dir:[33,3,1,""],flatten_iterable:[33,3,1,""],interpolate:[33,3,1,""],interpolate_list_linear:[33,3,1,""],interpolated_integral:[33,3,1,""],is_url:[33,3,1,""],load_labeled_data:[33,3,1,""],load_numpy:[33,3,1,""],load_recipe_yaml_str:[33,3,1,""],parse_optimization_str:[33,3,1,""],path_file_count:[33,3,1,""],path_file_size:[33,3,1,""],save_numpy:[33,3,1,""],tensor_export:[33,3,1,""],tensors_export:[33,3,1,""],validate_str_iterable:[33,3,1,""]},"sparseml.utils.helpers.NumpyArrayBatcher":{append:[33,2,1,""],stack:[33,2,1,""]},"sparseml.utils.singleton":{Singleton:[33,1,1,""]},"sparseml.utils.worker":{ParallelWorker:[33,1,1,""]},"sparseml.utils.worker.ParallelWorker":{add:[33,2,1,""],add_async:[33,2,1,""],add_async_generator:[33,2,1,""],add_item:[33,2,1,""],indefinite:[33,2,1,""],shutdown:[33,2,1,""],start:[33,2,1,""]},"sparseml.utils.wrapper":{wrapper_decorator:[33,3,1,""]},sparseml:{keras:[2,0,0,"-"],log:[1,0,0,"-"],onnx:[5,0,0,"-"],optim:[9,0,0,"-"],pytorch:[10,0,0,"-"],tensorflow_v1:[25,0,0,"-"],utils:[33,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","function","Python function"],"4":["py","attribute","Python attribute"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:function","4":"py:attribute"},terms:{"00001":22,"00010671895716335979":22,"00011739085287969578":22,"00012912993816766537":22,"00014204293198443192":22,"00015624722518287512":22,"00017187194770116264":22,"00018905914247127894":22,"00020796505671840686":22,"00022876156239024756":22,"00025163771862927233":22,"0002768014904921996":22,"0003044816395414196":22,"00033492980349556157":22,"00036842278384511775":22,"0004052650622296296":22,"0004457915684525926":22,"0004903707252978519":22,"0005394077978276372":22,"000593348577610401":22,"0006526834353714411":22,"0007179517789085853":22,"0007897469567994438":22,"0008687216524793883":22,"0009555938177273272":22,"001":[3,22,31,32],"00105115319950006":22,"001156268519450066":22,"0012718953713950728":22,"0013990849085345801":22,"0015389933993880383":22,"0016928927393268422":22,"0018621820132595267":22,"0020484002145854797":22,"0022532402360440277":22,"0024785642596484307":22,"002726420685613274":22,"0029990627541746015":22,"003298969029592062":22,"0036288659325512686":22,"003991752525806396":22,"0043909277783870364":22,"004830020556225741":22,"005":[37,38],"005313022611848316":22,"005844324873033148":22,"006428757360336463":22,"00707163309637011":22,"007778796406007121":22,"008556676046607835":22,"009412343651268619":22,"010353578016395481":22,"011359662748873234":7,"01138893581803503":22,"012527829399838533":22,"013780612339822387":22,"015158673573804626":22,"01667454093118509":22,"017953205361364e":22,"0183419950243036":22,"019539741799235344":7,"020176194526733963":22,"02219381397940736":22,"02400691612424e":22,"0244131953773481":22,"02685451491508291":22,"029539966406591206":22,"03249396304725033":22,"03574335935197537":22,"03931769528717291":22,"043249464815890204":22,"04381":17,"047574411297479226":22,"052331852427227155":22,"0544702849929435e":22,"05756503766994987":22,"06332154143694486":22,"06965369558063936":22,"0766190651387033":22,"0834705943388392e":22,"08428097165257363":22,"091268053287076e":22,"092709068817831":22,"09574":22,"0th":24,"100":[8,24,29],"1000":[17,29],"10000":[3,31],"101":[16,17,18,28],"10197997569961412":22,"1113776745352607e":22,"11217797326957554":22,"1144777789251e":22,"115909044841462e":22,"123":[12,27],"1233957705965331":22,"13573534765618642":22,"1384283767210024e":22,"140274938683989e":22,"1435888100000012e":22,"14930888242180507":22,"152":[17,18],"160px":[12,27,34],"1642397706639856":22,"177248169415655e":22,"1801":17,"18066374773038418":22,"1902":22,"1918176537727232e":22,"19873012250342262":22,"1x1":17,"200":24,"2007":13,"2012":13,"2014":13,"2015":13,"2017":13,"2186031347537649":22,"21e":22,"224":[8,12,17,27,29],"240":17,"2404634482291414":22,"256":17,"25s":6,"260":17,"2645097930520556":22,"289048368510331e":22,"29096077235726114":22,"299":17,"300":[13,17,18,24],"3109994191499957e":22,"3200568495929873":22,"320px":[12,27,34],"322515441988787e":22,"3310000000000003e":22,"3333333333333333":26,"3520625345522861":22,"3579476910000015e":22,"380":17,"38726878800751474":22,"3x2":18,"3x3":17,"40024994425817e":22,"4003948586157844e":22,"4259956668082662":22,"4420993610649954e":22,"452271214393103e":22,"456":17,"4641000000000003e":22,"4685952334890929":22,"4763699237493086e":22,"5154547568380022":22,"52592555681761e":22,"528":17,"554766986187666e":22,"559917313492238e":22,"586309297171495e":22,"5937424601000017e":22,"594972986357221e":22,"600":17,"6105100000000006e":22,"626407607736664e":22,"640":[13,24],"701723378487253e":22,"727499949325609e":22,"7404343444773634e":22,"7449402268886447e":22,"7715610000000007e":22,"784":12,"7974983358324136e":22,"8102436848064327e":22,"819748525897502e":22,"849732675807628e":22,"853116706110002e":22,"9194342495775094e":22,"948717100000001e":22,"954302432552388e":22,"960px":35,"975":6,"978518112499371e":22,"9997":32,"abstract":[3,4,8,9,18,22,24,26,28,31],"boolean":[3,7,31,33],"break":[24,33],"byte":33,"case":[3,7,8,22,24,31],"class":[3,4,6,7,8,9,11,12,13,16,17,18,21,22,23,24,26,27,28,29,31,32,33,34,37,38],"default":[3,4,6,7,8,9,13,16,17,18,21,22,23,24,28,29,30,31,32,33,34],"enum":[4,12,22,27,34],"export":[1,2,3,10,22,23,25,31,33,35,38],"final":[3,6,17,22,24,29,31,35,37,38],"float":[3,4,6,7,8,9,11,13,17,21,22,24,29,30,31,32,33,38],"function":[3,7,8,9,13,16,17,18,21,22,23,24,26,27,28,31,32,33,34,37,38],"import":[22,37],"int":[1,3,4,6,7,8,9,11,12,13,17,18,21,22,24,26,27,29,30,31,32,33],"long":22,"new":[3,6,7,8,9,11,16,21,22,24,26,28,31,32,33],"null":9,"return":[1,3,4,6,7,8,9,11,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,34,37],"static":[3,6,7,8,9,11,16,17,22,24,26,28,31,32],"switch":[26,31],"true":[3,4,6,7,8,9,12,13,16,17,18,21,22,23,24,27,28,29,30,31,32,33,34,38],"try":[7,33],"var":[8,28,31],"while":[3,7,17,18,21,22,26,31,35,38],Axes:[6,9],For:[3,8,22,24,31,32,35,37,38],Its:22,Not:22,OPS:31,One:29,Ones:[29,30],The:[3,6,7,8,9,12,13,16,17,18,21,22,23,24,27,28,29,30,31,32,33,34,35,37,38],Then:36,There:32,Use:[3,9,22,31],Used:[21,30,31],Useful:[3,21,22,31],Uses:[28,31,32],Will:[7,8,13,18,24,33],With:37,__all__:[3,22,31,38],__loss__:22,__name__:9,_ax:[6,9],_block_shap:[3,22,31],_deepsparsebasemodelrunn:8,_dim:[3,22,31],_map_mask_to_tensor:[3,22,31],abc:[3,4,8,9,18,22,24,28,31],about:[9,18,24,33],abov:8,abs:[7,17,22],absolut:[3,8,22,31,33],accept:[3,9,21,22,31],access:[22,24],accord:[3,8,11,22,24,31],accordingli:7,account:24,accumul:24,accuraci:[22,24,32,35,38],achiev:[6,9],across:[6,9,21,22,24,32,33],act:[21,29,30],act_typ:21,activ:[1,3,7,9,10,22,23,24,29,30,31,32,35,38],adam:38,adapt:[24,32],add:[6,7,8,9,11,22,24,29,30,33,38],add_async:33,add_async_gener:33,add_item:33,add_measur:[6,9],add_model_result:[6,9],add_modifi:[3,22,31],add_ops_cr:31,add_param_group:22,add_quant_dequ:23,add_reduce_to_node_output:7,add_result:[6,9],added:[3,7,17,22,24,31,32,33],addit:[3,4,6,7,8,17,18,21,22,24,26,28,31,33,36],addition:[8,12,22,24,31,35,37,38],addtion:18,adjust:[6,22,24],adjust_current_step:22,affect:[6,9,24],after:[3,6,8,9,17,22,24,27,29,30,31,32,33,37,38],after_optim:[3,31],after_run:31,afterward:[17,18,21],again:3,against:[6,9,22,24,31,32],aggreg:[24,31],aggress:33,aka:[22,31],algorithm:[35,37],alia:[8,23,24,27,31],all:[1,3,4,6,7,8,9,12,13,16,17,18,21,22,23,24,26,27,28,30,31,32,33,34,35,37,38],all_token:[3,22,31],allow:[3,6,8,9,11,22,24,31,32,35],along:[1,3,6,13,22,24,31,33],alongsid:[3,9,22,31],alpha:22,alreadi:[8,13,22,28,34,38],also:[3,6,8,9,17,18,22,24,26,31,33,37,38],altern:35,although:[17,18,21],altogeth:22,alwai:22,among:24,amount:[3,8,17,22,24,31],amp:24,analys:24,analysi:[6,8,9,22,31],analyz:[0,1,6,22,31],analyze_lay:22,analyze_model:8,analyze_modul:31,analyzedlayerdesc:[9,22],analyzer_a:[1,10],analyzer_model:[1,5],analyzer_modul:[1,10,25],analyzer_prun:[1,10],ancestor:8,anchor:[18,24],anchor_group:[18,24],anchors_group:24,ani:[3,4,6,8,9,11,13,16,17,18,22,24,26,28,30,31,32,33,35,36,37,38],annot:[13,24,33],annotatedimagetransform:13,anoth:[3,31],any_str_or_regex_matches_param_nam:24,any_str_or_regex_matches_tensor_nam:32,anyth:[22,37,38],apart:[24,33],api:[8,26,35,37,38],appear:23,append:[24,33],appli:[3,6,7,8,9,11,12,13,17,21,22,24,26,27,28,29,30,31,32,33,35,37,38],applic:6,applied_learning_r:22,applied_spars:22,apply_op_vars_mask:31,apply_shape_change_mult:6,apply_softmax:28,approach:35,appropri:[22,28,33],approx_ks_loss_sensit:31,approxim:[6,21,22,31],architectur:[16,17,18,28,29],area:33,arg:[3,8,9,11,16,22,26,28,31,32],arg_scop:32,arg_scope_var:32,argument:[3,9,16,17,18,22,24,28,31,37,38],around:[3,24,35],arrai:[4,7,8,24,32,33],art:35,artifici:22,arxiv:[17,22],as_classifi:17,as_default:37,as_ltrb:24,as_xywh:24,as_yolo_backbon:17,ascend:33,asd932:12,asd932_:27,ask:24,aslayertrack:22,aspect:[3,22,24,26,31],aspect_ratio:24,asregmodifi:22,asresulttyp:22,assign:[3,31],associ:[8,24,32],assum:[3,8,24,33],assumpt:24,asymmetr:[23,38],async:33,attach:[8,28],attempt:32,attibut:8,attr:8,attribut:[3,4,6,8,9,11,22,26,31,33],augment:7,augmented_model_path:7,automat:[3,38],automl:37,aux:[17,24],aux_pr:24,aux_weight:24,auxiliari:24,avail:[3,6,8,16,22,24,28,31,37,38],available_kei:[16,28],available_loss:24,averag:[6,9,24,32],avg:30,avoid:[8,32],awai:[26,31],awar:[22,23,38],axes:[6,9],axi:7,back:[8,33],backbon:[17,18],backbone_early_output_idx:18,backbone_out_channel:18,backend:[7,24],backward:[22,24],ball:[8,33],bar:[6,7,8],base:[3,4,6,7,8,9,11,12,13,16,17,18,21,22,23,24,26,27,28,29,31,32,33,34,35],base_name_scop:28,baselin:[6,9,35],baseline_a:22,baseline_averag:[6,9],baseline_loss:22,baseline_measurement_index:[6,9],baseline_measurement_kei:[6,9],basemanag:[3,9,22,31],basemodifi:[3,9,22,31],baseobject:9,baseprop:[3,9,22,31],baseschedul:[3,9,22,31],baseupd:[3,9,22,31],basic:[9,17,29],basic_session_run_hook:28,batch:[3,4,6,7,8,9,13,17,22,24,26,27,29,30,31,32,33,37],batch_cross_entropy_loss:32,batch_forward:[8,24],batch_norm:32,batch_norm_decai:32,batch_norm_epsilon:32,batch_norm_updates_collect:32,batch_siz:[6,8,9,22,24,26,32,37],batchbenchmarkresult:24,batcher:33,batchnorm2d:23,batchnorm:[8,29],batchnormparam:8,becaus:8,been:[3,8,22,24,28,30,31,32],befor:[3,4,6,8,17,22,24,27,30,31,37,38],begin:[4,22,31,33],begin_step:31,behav:22,behavior:[4,8,22],being:[3,9,21,22,24,30,31,32],belong:[16,28,31],below:[3,8,24,38],benchmark:[1,6,8,10],best:22,beta:[29,30],beta_initi:[29,30],better:[1,22],between:[3,6,8,9,11,21,22,23,24,26,31,33,38],bia:[6,8,22,29,30],bias_initi:[29,30],bias_nam:6,bias_shap:[6,8],bin:24,binari:24,binary_cross_entropy_with_logit:24,binarycrossentropylosswrapp:24,bit:[7,37],blob:8,block:[3,7,8,17,22,23,29,31,38],block_shap:[3,22,31],blockpruningmaskcr:[3,22,31],blog:[35,38],bn_node:8,bool:[3,4,6,7,8,9,11,12,13,16,17,18,21,22,23,24,27,28,29,30,31,32,33,34],boost:22,boosted_a:22,boosted_loss:22,booster:22,both:[21,22,38],bottleneck:[17,29],bottom:33,boudn:24,bound:[13,24],bounding_box_and_labels_to_yolo_fmt:13,box:[13,24],box_giou:24,boxes_a:24,boxes_b:24,break_batch:[24,33],broadcast:21,bucket:[24,33],bucket_count:24,bucket_iter:33,bucket_limit:24,buffer:[21,26],bug:35,build:[3,6,22,26,35],build_input_fn:26,build_target:24,built:[3,4,8,26,27,30,35,37],builtin:22,cach:[11,12,13,22,24,27],cacheabl:11,cacheabledataset:11,calcul:[3,6,8,9,17,22,24,31,33],calculate_flop:8,calculate_map:24,calibr:[5,6],calibrate_op_typ:7,calibrationsess:7,call:[3,4,6,9,16,17,18,21,22,24,26,31,32,37],callabl:[3,8,9,16,22,24,26,28,31,32,33],callback:[1,2,3,24,31,37],caller:32,came:24,can:[1,3,6,7,8,9,11,12,13,17,18,21,22,23,24,27,29,30,31,32,33,34,35,37,38],cannot:[3,9,22,31,38],canon:8,canonical_nam:8,cap:33,capabl:3,card:33,care:[17,18,21],cat:12,cent_crop:27,center:[24,26],center_i:24,center_square_crop:[26,27],center_x:24,certain:[3,8,9,22,31,38],chain:13,chan:21,chang:[3,4,6,8,9,16,22,24,31],channel:[3,7,17,18,21,22,24,29,30,31],channel_wis:21,channels_first:30,channels_last:30,chart:[6,9],chauhan:24,check:[3,7,8,9,17,18,21,22,23,24,31,32,33,37],check_feat_lab_inp:24,check_load_model:8,check_lr:22,check_numb:33,check_opset_vers:7,checkpoint:32,checkpoint_path:32,child:8,choos:[8,22,24,38],chosen:22,cifar100:12,cifar100dataset:[12,27],cifar10:[12,29],cifar10dataset:[12,27],cifar:[10,11,25,26],cifardataset:27,class_i:27,class_nam:3,class_typ:[17,29],class_x:27,classif:[10,11,16,19,24,25,26,28,32,34],classifi:[17,18,29],classificationestimatormodelfn:28,classmethod:3,clazz:9,clean:[31,32,33,37],clean_path:33,clean_tensor_nam:32,clear:[22,24],cli:35,client:[28,31,32],clone:36,close:[3,31],closest:[6,9],closur:[22,24],cnn:18,coco:[10,11,18,24],coco_2017_yolo:13,cocodetectiondataset:13,code:[2,3,4,5,6,8,9,10,11,16,22,24,25,26,28,31,32,33,35,37,38],coeffici:[24,32],collat:13,collect:[3,9,22,24,28,31,32,33],collection_nam:31,column:24,com:[8,12,24,27,33,35,38],combin:[8,9,22,24,31],combo:24,common:[21,22,33],commonli:38,commun:35,compar:[3,6,8,9,22,24,31,32,37],compare_index:[6,9],comparison:[6,9,24],compat:[16,22,28],compil:[24,37],complet:[6,8,22,24,31,37],complete_graph:[31,37],compress:[21,33],comput:[3,7,9,12,13,15,17,18,21,24,27,29,31,32,34],compute_output_shap:3,condit:[24,31],confid:24,confidence_threshold:24,config:[3,9,22,23,37],configur:[17,18,24,29,33,37,38],connect:[3,29,30],consid:[6,24],consist:[1,33],consol:22,constant:[3,22,31,32],constant_log:22,constantli:22,constantpruningmodifi:[3,22,31,35],construct:[8,22,24],constructor:[3,9,16,21,22,28,29,31],contain:[3,4,6,8,9,17,21,22,24,26,27,28,31,32,33,35,37,38],content:[0,35],context:[22,24],continu:[3,8,22,24,31,33],contract:[3,31],contradict:24,contradict_hinton:24,control:[3,8,9,22,24,31,38],conv0:38,conv1:[22,31,37,38],conv1d:32,conv2:[37,38],conv2d:[23,30,31,32],conv2d_1:3,conv2d_5:3,conv2d_block:30,conv3:[37,38],conv3d:32,conv:[6,7,8,17,18,22,23,24,29,30,31,32,38],conv__224:7,conv__252:7,conv_net:[22,31],conv_node_param:8,conveni:[3,8,22,24,31,32,37,38],convers:[8,23,37],convert:[3,8,9,23,24,31,33,37,38],convert_kera:4,convert_model_initializers_to_spars:8,convert_relus_to_fat:21,convert_sparse_initializers_to_dens:8,convert_to_bool:33,convinteg:7,convnd:24,convolut:[8,17,18,22,29,30,32],coordin:[18,24],copi:[21,23,24],core:[6,8,9],correct:[8,9,22,24],correct_nm_analyze_model_node_id:8,corrected_lr_info:9,correctli:[4,31,35],correspond:[4,22,24,33],cosineannealingwarmrestart:[9,22],cost:22,could:[4,8,9],couldn:32,count:[22,24,33],counter:[4,24,33],cpu:[6,9,11,22,24,35],creat:[1,2,3,4,5,6,7,8,9,10,11,12,13,16,17,18,21,22,24,25,26,27,28,29,30,31,32,33,35,37,38],create_activ:21,create_dir:33,create_estim:28,create_extra:31,create_graph_ops_prun:31,create_ks_schedule_op:31,create_ks_scheduled_constant_graph_op:31,create_label:8,create_loss:28,create_metr:28,create_metric_update_ops_hook:28,create_modifier_ops_and_update_hook:28,create_op:[3,31,37],create_op_prun:31,create_parent_dir:33,create_predict:28,create_scaffold:28,create_sect:17,create_sparse_tensor:8,create_sparsity_mask:[3,22,31],create_sparsity_mask_from_abs_threshold:22,create_sparsity_mask_from_tensor:22,create_split_iterators_handl:26,create_summaries_prun:31,create_summary_op:28,create_train_summary_hook:28,create_training_op:28,create_unique_dir:33,create_zoo_model:[16,28],creation:[3,31,37,38],creator:[3,22,26,27,28,31],crop:[13,24,26],cross:[24,32],cross_entropi:24,crossentropyloss:22,crossentropylosswrapp:24,csv:9,cubic:[3,22,31,33],cuda:[22,24],cudnn:24,cumul:24,current:[3,4,6,7,8,9,16,21,22,24,26,28,29,30,31,32,33,37,38],curv:33,custom:[21,32,38],custom_op_handl:32,cutoff:22,cwd:[4,24],cycl:[22,31],darknet53:17,darknet:[10,16,18],darknetsectionset:17,data:[1,5,6,7,11,12,13,22,24,26,27,28,33],data_format:30,data_load:[7,8,24],data_loader_kwarg:22,data_shap:8,data_typ:8,dataload:[6,7,8,13,22,24],dataparallel:24,datapararallel:24,dataset:[1,10,16,17,18,22,24,25,28,29,33,35],dataset_op:26,dataset_s:[12,27,34],datasetregistri:[11,26],datasetv1:26,ddp:24,deal:31,debian:36,debug:4,debug_mod:4,decai:[22,31,32,38],decay_r:[3,31],decay_step:[3,31],decim:[8,22,38],decod:24,decode_output_batch:24,deconstruct_tensor:24,decor:[3,9,11,16,22,24,26,28,31,33],decreas:[22,31],deep:35,deepspars:[6,8,24,33,35,37],deepsparseanalyzemodelrunn:8,deepsparsemodelrunn:8,def_ignore_error_tensor:16,def_model_backward:24,default_box:13,default_context:24,default_dataset:[16,28],default_dataset_path:34,default_desc:[16,28],default_devic:24,default_exponential_check_lr:22,default_image_s:32,default_loss_kei:24,default_model_fn_cr:28,default_onnx_opset:32,default_pruning_sparsities_loss:9,default_pruning_sparsities_perf:9,default_qat_qconfig:23,defaultbox:[13,24],defin:[3,4,6,8,17,18,21,22,24,28,31,32,38],definit:37,delet:8,dens:[8,30],dense_block:30,densiti:[8,24,32],depend:[22,32,36,38],deploi:35,deploy:37,depth:[17,33,37],depthwis:[17,18,29,30,32],depthwise_conv2d_block:30,dequantize_nod:8,dequantizelinear:23,deriv:[3,4,6,22,31],desc:[8,9],desc_arg:16,descend:[21,33],descent:38,describ:[9,17,29],descript:[9,16,22,28,31,33],deseri:3,design:[33,37,38],desir:[8,16,22,23,24,26,28,30,31,32,34,37,38],destin:4,detail:6,detect:[8,10,11,16,17,24,28],detector:[18,24],determin:[8,22,32,33],determinist:24,dev:35,deviat:[3,11,22,24,31,32],devic:[4,22,24,32],device_context:24,device_to_name_id:24,dict:[3,6,7,8,9,11,16,17,18,21,22,24,26,27,28,31,32,33],dictionari:[3,4,6,7,8,9,21,22,24,26,28,31,32,33,38],did:[8,22],differ:[6,9,22,23,24,28,31,32,38],dim:[3,22,24,31],dimens:[3,8,9,21,22,24,31,32,33],dimensionpruningmaskcr:[3,31],dimensionsparsitymaskcr:22,dir:[4,24],direct:[8,35],directli:22,directori:[4,7,24,28,32,33],disabl:[22,24,38],disable_bn_fus:24,disable_quantization_observer_epoch:22,disclaim:8,disk:[11,12,33],displai:[6,7,8,9],distanc:8,distil:24,distribut:[6,9,11,12,13,24,27],distributeddataparallel:24,diverg:8,divid:[3,22,31,32],divis:32,doc:[3,4,8,9,22,31,33,35],doc_str:4,document:[35,37],doe:[3,6,7,8,12,13,22,23,24,27,31,32,33,34,38],doesn:[3,11,22,31,33],dog:12,doing:[3,9,22,24,31],domain:[16,28],domainadapt:24,done:[3,24,37,38],doubl:17,down:[17,21,29],download:[12,13,27,33,34,35,37],download_root:34,downsampl:[17,29],downsample_out_channel:17,driven:35,drop:24,dropout:[17,30,32],dropout_r:30,dtype:[3,8,31,32],due:8,dure:[4,7,22,24,28,31,38],dynam:[7,8,21],dynamicquantizelinear:7,e2e_batch_second:24,e2e_batch_tim:24,e2e_batches_per_second:24,e2e_item_second:24,e2e_items_per_second:24,e2e_sec:24,each:[3,4,6,7,8,9,13,17,18,22,24,27,29,31,33,37,38],earli:[11,24],earlier:[17,24],early_stop:11,early_stop_data_load:24,early_stop_step:24,earlystopdataset:11,eas:37,easi:37,easiest:38,easili:[3,9,11,16,22,26,28,31,35,38],edg:[8,33],edge_perc:33,edit:[2,5,8,10,25,31,35,37],editor:32,effect:[3,4,9,22,31,35],efficientnet:[10,16],efficientnet_b0:17,efficientnet_b1:17,efficientnet_b2:17,efficientnet_b3:17,efficientnet_b4:17,efficientnet_b5:17,efficientnet_b6:17,efficientnet_b7:17,efficientnetsectionset:17,either:[3,6,8,23,24,30,32,33,38],element:[3,24,31,33],els:[3,8,9,21,24,30,31,33],empti:[9,21,22,31],emul:[22,38],enabl:[3,9,17,22,23,24,31,35,37,38],enable_aux:17,encapsul:31,enclos:3,encod:[13,24,35,37,38],encode_annotation_bounding_box:13,encode_image_box_label:24,encompass:35,end:[3,4,9,17,22,24,29,31,32,38],end_compar:[3,9,22,31],end_epoch:[3,9,22,31,37,38],end_pend:22,end_point:32,end_step:31,enforc:[3,8,9,21,22,24,31,38],engin:[3,4,6,8,24,35,37],enhanc:3,ensur:7,entir:[3,18,22,31],entri:22,entropi:[8,24,32],enumer:38,environ:36,epoch:[3,4,9,22,24,31,35,37],epoch_end:22,epoch_start:22,epochrangemodifi:[3,22,31,37,38],epsilon:8,equal:[3,8,9,21,22,31,32,33],equat:7,equival:24,err:[3,22,31],error:[3,16,17,18,22,31,33],error_desc:33,estim:[1,25,26,31,35],estimatormodelfn:28,etc:[6,9,16,17,18,22,24,28,30,31,33],eval:[22,31],eval_tensor_dens:32,eval_tensor_spars:32,evalu:[4,8,22,24,28,32],even:32,evenli:[3,22,24,31],event:[8,22,31,33],everi:[6,11,17,18,21,22,24,31,37,38],everyth:[24,35],exactli:[3,21,22,31],exampl:[3,7,8,22,31,32,35,37,38],exce:[3,22,31],except:[7,13,22,24,32],excit:[17,21],exclud:7,exclude_nod:7,execut:[8,9,22,24,31,32],execution_ord:9,exist:[22,24,32,33,34],exp:21,exp_channel:[17,29],exp_count:[4,24],exp_ratio:[17,29],expand:[17,21,29,33],expanded_channel:21,expans:[17,29],expansion_ratio:17,expect:[3,6,8,9,17,18,22,24,26,29,31],explor:[36,37],expon:[3,31],exponenti:[21,22,31],exponential_lr_schedul:31,exponentialdecai:[3,31],exponentiallr:[3,9,22,31,38],export_checkpoint:32,export_dir:[24,33],export_h5:4,export_kera:4,export_named_sampl:32,export_onnx:[4,24,32,37],export_pb:[32,37],export_pytorch:24,export_sampl:[4,24,32],expos:8,ext:27,extend:9,extens:33,extern:[1,10,16],extra:[3,4,6,9,13,21,22,24,28,31,32,37],extra_opset:32,extra_repr:21,extract:[7,8,18,22,24,33,34],extract_node_id:8,extract_node_shap:8,extract_nodes_shapes_ort:8,extract_nodes_shapes_shape_infer:8,extract_shap:8,extracted_root:34,extractor:[17,18],extrat:18,extrem:[6,9],factor:24,fake:23,fall:33,fals:[3,6,7,8,9,12,13,16,17,18,21,22,23,24,27,28,29,30,31,32,33,34],far:24,fast:18,fastai:[12,27],faster:[22,31,35],fat:21,fat_exp_relu:21,fat_pw_relu:21,fat_relu:21,fat_sig_relu:21,fatrelu:[1,10,22],featur:[17,18,24,26,32,35,37],feature_map:24,fed:24,feed:[4,24,26,31,32],feed_dict_cr:31,few:[32,35,37],fft:35,field:[3,8,12,13,14,15,17,18,21,23,24,27,29,31,34],figur:[6,9,22,32],file:[1,3,4,6,7,8,9,16,18,22,23,24,27,28,31,32,33,34,37,38],file_path:[3,9,22,27,31,33],filepath:7,filewrit:32,fill:37,filter:[3,22,31],final_lr:22,final_spars:[3,22,31,37,38],final_v:22,find:[8,12,13,24,27,32],find_weight_data:7,fine:[8,22,38],first:[6,8,9,18,22,24,32,33,37],fit:37,fit_gener:37,fix:24,fix_data_parallel:24,flatten:[12,33],flatten_iter:33,flexibl:37,flip:13,float16:24,float32:[7,24,37],float64:32,flop:[6,8,9,22],flow:[8,31,35],fold:8,fold_conv_bn:8,foldabl:8,foldable_nod:8,folder:[12,13,27],follow:[3,7,8,12,22,24,31,32,33,37,38],footprint:22,forc:[7,21],force_fus:7,form:[12,27,33],format:[1,3,4,6,7,9,13,22,24,31,32,33,35,37,38],format_iterator_batch:[26,27],format_repr:9,format_str:9,former:[17,18,21],formula:[3,22,31],forward:[3,17,18,21,22,23,24,31],found:[3,6,8,9,12,13,16,17,18,21,22,23,24,27,29,31,32,34,35,37],fp32:8,fraction:[3,9,22,24,31,32,38],framework:[0,1,2,3,4,5,6,9,10,18,22,24,25,26,27,28,29,30,31,32,37,38],free:3,freez:24,freeze_bn_stats_epoch:22,frequenc:[3,4,22,31],from:[1,3,4,6,7,8,9,11,12,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,35,37,38],from_config:3,from_dict:[6,9],from_model_random:8,from_random:8,from_sparse_model:22,from_train:32,from_yaml:[3,22,31,37],front:[33,37,38],frozen:[22,38],full:[3,7,9,12,22,24,27,31,32,34,35],full_precis:24,fulli:[29,30,38],func:[6,9,12,22,27,31],func_get:[3,9,22,31],func_set:[3,9,22,31],further:[17,29],fuse:[7,22,23,24],fuse_dynamic_qu:7,fuse_modul:22,fuse_module_conv_bn_relu:[22,23],fusion:[7,23],futur:8,gama:29,gamma:[22,30,31,37,38],gamma_initi:[29,30],gan:32,gather:[9,21],gemm:[6,7,8],gemm_node_param:8,gen:33,gener:[1,4,6,7,8,9,10,18,21,22,24,26,31,32,33,34,35,37,38],generate_augmented_model:7,get:[3,6,8,9,22,24,26,28,31,32,33,34,38],get_anchor_grid:24,get_attr_float_val_for_nod:8,get_available_provid:8,get_batch_norm_param:8,get_config:3,get_conv_lay:24,get_default_boxes_300:24,get_default_graph:[31,32],get_default_sess:32,get_feature_extractor:18,get_gan_network_fn:32,get_grid:24,get_group:31,get_grouping_fn:22,get_grouping_op:[3,31],get_init_by_nam:8,get_input:24,get_kernel_shap:8,get_label:24,get_lay:24,get_layer_name_from_param:3,get_layer_param:24,get_linear_lay:24,get_main_logg:1,get_mask_initi:[3,31],get_model_input_nam:7,get_model_scop:32,get_named_layers_and_params_by_regex:24,get_network_fn:32,get_nm_root_logg:1,get_nod:6,get_node_attribut:8,get_node_by_id:8,get_node_input:8,get_node_input_nod:8,get_node_output:8,get_node_output_nod:8,get_node_param:8,get_nodes_by_input_id:8,get_nodes_by_output_id:8,get_numpy_dtyp:8,get_op_input_var:32,get_op_var_index:32,get_ops_and_inputs_by_name_or_regex:32,get_optim_learning_r:24,get_or_create_global_step:31,get_or_create_graph_ops_prun:31,get_or_create_ks_schedule_op:31,get_or_create_ks_scheduled_graph_op:31,get_output_grid_shap:24,get_pr:24,get_prunable_lay:24,get_prunable_nod:8,get_prunable_node_from_fold:8,get_prunable_op:32,get_qat_qconfig:23,get_quantization_param:23,get_quantization_params_dict:7,get_quantize_parent_for_dequantize_nod:8,get_recall_level:24,get_result:[6,9],get_tensor_var:32,get_terminal_lay:24,get_threshold:21,getter:[3,9,22,31],giou:24,github:[8,12,24,27,35],give:[8,16,18,22,24,27,28,30,35,38],given:[3,4,6,7,8,9,11,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,38],glob:[8,33],global:[4,22,24,31,32],global_avg:30,global_step:[3,31],global_variables_initi:[31,37],glorotuniform:[29,30],gmp:38,gmpruningmodifi:[3,22,31,37],goe:[22,24,31],gpu:[24,35],grab:[8,22,24,32],grad:22,grad_scal:24,gradient:[22,24,38],gradscal:24,gradual:[3,22,31,37,38],gradualparammodifi:22,grain:38,granular:8,graph:[3,6,7,8,22,23,24,26,27,28,29,30,31,32,37],graph_editor:[1,5],graph_optim:[1,5],graphexport:[32,37],graphkei:32,greater:[3,9,22,24,31],grid:24,grid_shap:24,ground:[24,28],ground_truth_annot:24,group:[3,8,9,17,22,24,30,31,33],group_idx:24,group_tensor:[3,22,31],groupedpruningmaskcr:[3,22,31],grouping_fn_nam:22,grouping_op_nam:[3,31],grouplearningratemodifi:31,guarante:[8,22],guid:[24,32],hack:22,had:24,half:[24,29,38],han_mobilenet:17,hand:[37,38],handl:[1,3,4,6,8,9,11,12,22,24,26,27,31,33,37,38],handler:32,happen:[4,22],hard:[24,38],hard_swish:21,hardcod:8,hardswish:21,has:[3,6,7,8,9,11,22,31,32,38],has_baselin:[6,9],have:[3,8,11,16,19,22,23,24,28,30,31,32,38],hdf5:4,head:18,height:[24,26,32],help:[1,4,24,35],helper:[0,1,4,5,6,10,11,22,25],here:[3,12,13,17,18,21,22,27,29,34,37],hidden:17,hidden_channel:17,higher:31,highest:33,hinton:24,his:37,histogram:24,hold:[3,22,31],hook:[17,18,21,22,24,28,31],horizont:13,host:35,how:[3,6,8,9,17,22,24,29,31,35],howev:[3,22,37,38],http:[8,12,17,22,24,27,33,35,38],human:[6,9],hyper:24,id_:[6,9],id_or_nam:[6,9],ident:[7,8],identif:[4,24],identifi:[4,6,9,24,31],ides:24,ids:[8,24],ignor:[16,17,18,21,24,28,31,33],ignore_error_tensor:[16,17,18,24],iin:8,imag:[12,13,17,24,26,27,29,32,34],image_s:[12,13,24,26,27],imagefold:[10,11,25,26],imagefolderdataset:[12,27],imagenet:[1,10,11,17,18,19,25,26,28,33],imagenet_norm:27,imagenetdataset:[12,27],imagenett:[1,10,11,25,26,33],imagenettedataset:[12,27],imagenettedownload:[12,27,34],imagenettes:[12,27,34],imagewoof:[12,27,34],imagewoofdataset:[12,27],imagewoofdownload:[12,27,34],imagewoofs:[12,27],img:[6,9,27,35],immedi:[3,9,22,31],impl:26,implement:[3,4,6,8,9,12,13,17,18,21,22,23,24,26,27,29,31,33,34,35,37,38],impos:[8,22,31],imposed_k:8,improv:[22,35],in_chan:30,in_channel:17,incept:[17,24],inception_v3:[10,16],inceptioncrossentropylosswrapp:24,inceptionv3:17,inclin:38,includ:[3,6,8,22,23,24,30,31,33,35,38],include_bia:30,include_bn:30,include_modifi:24,include_nod:7,include_target:23,include_valu:8,inclus:24,incom:24,increas:[3,9,21,22,31,33,38],indefinit:[26,33],independ:22,index:[3,4,6,9,11,18,22,24,28,31,32,33,38],indic:[3,17,22,24,30,31,32],individu:[6,8,9,22,31],induc:[3,22,31,35],infer:[6,8,24,29,37],inferencesess:8,infinit:8,infinite_data_load:24,info:[1,4,6,8,9,12,13,17,22,24,27,29,33,34],inform:[3,4,6,8,9,18,21,22,24,35,37],inherit:[3,9,22,31],init:22,init_lr:[3,9,22,31,37,38],init_nam:8,init_op:[29,30],init_sect:[17,29],init_spars:[3,22,31,37,38],init_v:22,initi:[3,7,8,9,17,21,22,23,29,30,31,32,38],initial_learning_r:[3,31],initialize_logg:22,initialize_sess:31,inject:31,inp:[17,18,21,22],inp_dict:32,inp_tensor:32,inp_val:32,inplac:[8,21,23],input1:8,input2:8,input:[3,4,6,7,8,9,12,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,37],input_batch:7,input_fn:26,input_func:22,input_id:8,input_nam:[6,7,37],input_op:31,input_qtyp:7,input_shap:[3,6,8,9,16,24,28],input_tensor:3,inputs_sampl:22,inputs_sample_max:22,inputs_sample_mean:22,inputs_sample_min:22,inputs_sample_s:22,inputs_sample_std:22,inputs_spars:22,inputs_sparsity_max:22,inputs_sparsity_mean:22,inputs_sparsity_min:22,inputs_sparsity_std:22,insid:[37,38],instal:[19,35],instanc:[3,4,6,8,9,17,18,21,22,24,26,28,31,32,33],instanti:[3,11,16,26],instead:[3,7,8,9,17,18,21,22,24,31,32,38],instruct:[37,38],int8:[7,38],integ:[3,4,7,8,9,22,31,32,33],integerop:7,integr:[6,9,19,31,33,35,36,37],intend:37,intens:11,inter_func:[3,22,31,33],interact:24,interfac:33,intermedi:[7,8,32],intern:28,interpol:[3,21,22,31,33],interpolate_list_linear:33,interpolated_integr:33,intersect:24,interv:[22,31,38],intial:7,intput:32,intro:35,introduc:[6,8,9,22],invers:24,inverse_cub:[3,22,31,33],invert:[17,29],invoc:[26,31],invok:37,invoke_batch_backward:24,invoke_batch_end:24,invoke_batch_forward:24,invoke_batch_loss:24,invoke_batch_start:24,iou:24,iou_step:24,iou_threshold:24,irregular:22,is_activ:21,is_after_end_step:31,is_foldable_nod:8,is_parallel_model:24,is_prunable_nod:8,is_prunable_op:32,is_pruning_step:3,is_train:32,is_url:33,issu:[17,18],item:[8,11,13,22,24,33],iter:[3,6,7,8,11,22,24,26,27,31,33],iter_batch:[26,27],iter_step:8,iterations_per_check:6,iters_sleep_tim:6,its:[3,4,7,8,9,13,21,22,24,31,32,33,37,38],itself:22,jekyllrb:33,join:37,json:[6,9],just:[13,24],kd_set:24,kdlosswrapp:24,kdset:24,keep:[3,6,8,9,22,24,28,31,33,36],keep_param:8,kei:[3,6,8,9,11,16,19,21,22,24,26,28,31,33],kept:38,kera:[0,1,35],keras2onnx:37,keraslogg:[3,4],kerasmodifieryaml:3,kernel:[3,6,8,9,17,22,29,30,31],kernel_initi:[29,30],kernel_s:[17,30],kernel_shap:8,keyword:[3,9,16,22,28,31],kl_diverg:[6,8],knowledg:24,known:22,ks_group:31,ks_layer_desc:22,ks_loss_sensitivity_op_var:31,kslosssensitivityanalysi:[6,9],kslosssensitivityresult:[6,9],ksperfsensitivityanalysi:[6,9],kssensitivityprogress:6,kwarg:[3,4,6,8,9,11,16,21,22,26,28,31],lab:24,label:[4,8,13,24,26,27,28,32,33],label_shap:8,labeled_data:8,larg:[22,35],larger:[22,24,31,38],last:[17,21,22,32],later:[3,9,22,31],latter:[17,18,21],layer1:22,layer2:22,layer:[1,3,4,6,8,9,17,18,21,22,23,24,25,29,31,32,38],layer_desc:22,layer_nam:[3,21,22,24],layer_norm:22,layerboostresult:22,layerwis:8,lead:31,learn:[3,9,22,24,28,31,37],learning_r:[0,1,3,22,31,38],learningr:[3,9,22,31],learningratemodifi:[3,22,31,37],least:[22,28],leav:[13,22],leave_en:[3,22,31,38],left:[24,30],len:37,length:[11,24],less:[3,9,22,31,38],lesser:38,lev:22,level:[1,3,6,8,9,11,17,22,24,31,35,38],librari:[35,37],life:31,lifecycl:[22,24],lifetim:4,like:[3,6,9,21,22,24,31,32,36,37,38],limit:[8,22,35,38],line:[9,21,31,35,37],linear:[3,6,7,22,23,24,31,33],linearli:[7,33],linux:36,list:[3,4,6,7,8,9,11,13,16,17,18,21,22,24,26,28,29,31,32,33,37,38],lite:18,littl:37,load:[3,6,7,8,9,11,16,17,18,22,24,26,27,28,31,32,33],load_desc:9,load_epoch:24,load_framework_list:9,load_framework_obj:9,load_json:[6,9],load_labeled_data:33,load_list:[3,22,31],load_manag:22,load_manager_state_dict:22,load_mask_cr:[3,22,31],load_model:24,load_numpi:33,load_obj:[3,22,31],load_optim:24,load_pretrain:28,load_recipe_yaml_str:33,load_state_dict:[21,22],load_strict:[16,17,18],loader:[8,11,13,24],local:[3,4,12,22,24,27,31,32,33,34],local_rank:24,locat:[22,24,27,37],log:[0,3,4,6,22,24,31,33,35],log_dir:4,log_histogram:24,log_histogram_raw:24,log_hyperparam:24,log_nam:24,log_path:24,log_scalar:[4,24],log_step:24,log_summari:24,log_typ:[3,9,22,31],log_upd:22,logger:[1,2,3,9,10,22,31,33],loggers_initi:22,loggersettingcallback:4,loggingmod:4,logic:[3,33],logit:[17,24,29,32,37],longer:22,look:[24,33,37,38],lookup:31,loop:22,loss:[1,4,5,6,9,10,13,22,25,28,31,38],loss_fn:[22,24],loss_kei:22,loss_measur:9,loss_tensor:31,loss_upd:22,lossesandmetricsloggingcallback:4,losswrapp:[22,24],lower:[8,22],lowest:[8,22,31,33],lr_class:[3,9,22,31,37,38],lr_kwarg:[3,9,22,31,37,38],lr_loss_sensit:22,lr_modifi:31,lr_mult:22,lrelu:21,lrlosssensitivityanalysi:[9,22],lrs:22,ltrb:[13,24],made:[3,8,22,24,31],magic:[2,5,6,8,10,25,35],magnitud:[3,8,22,31,37,38],mai:[17,18,22,24,38],main:1,make:[3,9,16,22,24,31,37],make_one_shot_iter:26,manag:[0,1,2,10,24,25,28,37],manager_state_dict:22,mani:22,manual:[22,24],map:[7,8,9,21,22,24,26,31,33],map_loc:[22,24],mark:[3,9,22,31,38],markdown:[33,37,38],mask:[3,22,24,31],mask_creat:[3,22,31],mask_creator_prun:[1,10,25],mask_differ:24,mask_prun:[1,2,10,25],mask_pruning_cr:[1,2],mask_typ:[3,22,31,37,38],mask_updat:3,masked_lay:3,maskedlay:3,master:8,match:[3,6,7,8,9,11,18,21,22,24,31,32,33,38],matmul:[7,8,31,32],matmul_node_param:8,matmulinteg:7,matplotlib:[6,9],matter:[33,37,38],max:[3,7,22,24,26,30,31,33],max_available_cor:8,max_bin:24,max_detect:24,max_epoch:9,max_node_dist:8,max_source_s:33,max_step:8,max_target_metric_loss:22,max_val:[21,24],maxim:6,maximum:[6,7,8,24,33],mdoel:8,mean:[3,6,8,9,11,12,22,24,27,31],meanaverageprecis:24,meant:[9,33],measur:[6,8,9,22,24,31,33],memori:[8,11,21,22,24,26,33],merg:[9,33],merge_desc:9,meta_canonical_nam:8,metaclass:33,metadata:6,method:[3,8,9,21,22,24,31,37],metric:[4,22,24,28,35],metric_increas:22,metric_kei:22,metrics_dict:28,metrics_initializers_dict:28,metricupdateopshook:28,microsoft:8,middl:38,might:3,mileston:[22,31,37,38],milestone_step:31,min:[3,22,24,26,31,33],min_end:[3,9,22,31],min_epoch:9,min_frequ:[3,9,22,31],min_start:[3,9,22,31],min_val:[21,24],min_valu:8,mine:24,minim:[6,28],minimum:[3,7,8,9,22,24,31,33],miss:[3,21,22,31],missing_kei:21,mix:24,mnist:[10,11,16,18,25,28,37],mnist_net:[17,29,37,38],mnistdataset:12,mnistnet:17,mobilenet:[10,16,18,25,28,32],mobilenet_const:29,mobilenet_v1_arg_scop:32,mobilenet_v2:[10,16,19,25,28],mobilenet_v2_const:29,mobilenet_v2_width:[17,29],mobilenetsect:29,mobilenetsectionset:17,mobilenetv1:32,mobilenetv2:[17,29],mobilenetv2sect:29,mobilenetv2sectionset:17,mod_extra:[31,37],mod_op:[31,37],mode:[4,7,21,22,28,29,30,31,32],model:[1,2,3,5,6,7,9,10,11,13,21,22,23,25,26,31,32,33,35,37,38],model_aug:7,model_backward:24,model_batch_second:24,model_batch_tim:24,model_batches_per_second:24,model_const:28,model_dir:28,model_fn:31,model_fn_nam:19,model_fn_param:28,model_forward:24,model_fuse_fn_kwarg:22,model_fuse_fn_nam:22,model_input:8,model_item_second:24,model_items_per_second:24,model_nam:32,model_output:[8,24],model_prunability_magnitud:22,model_quantize_qat_export:38,model_sec:24,model_to_devic:24,modelanalyz:6,modelexport:[4,37],modelproto:[6,7,8,23],modelregistri:[16,28],modelrunn:8,moder:[16,28,33],modestli:22,modif:[22,37,38],modifi:[0,1,2,4,8,10,23,24,25,28,32,35,37],modifier_a:[1,10],modifier_epoch:[1,2,10,25],modifier_idx:22,modifier_lr:[1,2,10,25],modifier_manag:28,modifier_param:[1,2,10,25],modifier_prun:[1,2,10,25],modifier_quant:[1,10],modifier_regular:[1,10],modifierprop:[3,9,22,31],modifiers_to_string_lin:[9,31],modifiersessionrunhook:[28,31],modifieryaml:[3,9,22,31],modify_estim:[31,37],modoel:29,modul:[0,35],moduleanalyz:22,moduleasanalyz:22,moduleasoneshootboost:22,modulebenchmark:24,moduledevicecontext:24,moduleexport:[24,37],moduleparampruningmask:22,modulepruninganalyz:22,modulerunfunc:[22,24],modulerunhook:24,modulerunn:24,modulerunresult:[22,24],moduletest:[22,24],moduletrain:[22,24],momentum:[8,22],monitor:[6,22],monitored_sess:28,more:[6,8,12,13,21,22,27,33,34,37,38],most:[22,32,37,38],move:[6,17,22,31,32],much:[6,9,22,24,31],multi:[3,9,17,21,22,29,31,33],multi_step_lr_schedul:31,multibox:24,multipl:[3,7,9,22,24,31,33,38],multipli:[17,22,29,31,38],multisteplr:[3,9,22,31,37,38],must:[3,4,8,9,11,19,21,22,23,24,28,31,33,34,37,38],n_box:24,name:[3,4,6,7,8,9,11,16,18,21,22,24,26,27,28,29,30,31,32,33,34,37,38],name_or_regex_pattern:[24,32],name_prefix:[24,33],name_scop:[26,27],named_modul:[22,23],namedlayerparam:24,namedtupl:21,namespac:1,nativ:[37,38],natur:35,nbit:7,ndarrai:[7,8,24,32,33],nearli:35,necessari:[3,7,8,24,31,37],need:[3,17,18,21,22,31,37,38],neg:[21,24],nest:33,net:[28,30,32],net_output:28,nets_factori:32,nets_util:[1,25],network:[3,6,8,9,17,18,21,22,29,30,32,35,37],network_fn:32,network_input_shap:8,neural:[2,5,6,8,9,10,21,22,25,30,35,37],neuralmag:[35,38],neuralmagicml:38,never:[3,9,22,24],new_mask:24,new_quantized_nam:7,newli:24,next:[4,26],nightli:35,nlp:[16,28],nm_conditional_upd:31,nm_dataset:[12,13,27],nm_k:31,nm_ks_op:31,nm_mask:31,nm_mask_assign:31,nm_mask_upd:31,nm_mask_update_no_op:31,nm_masked_var:31,nm_prune_vars_assign:31,nm_result:8,nm_root:1,nm_save:31,nm_sparsiti:31,nm_threshold:31,nm_update_readi:31,nm_weight_upd:31,nms:24,no_fus:22,no_serialize_v:[3,9,22,31],node:[6,7,8,9,23],node_id:8,node_shap:6,nodeanalyz:6,nodearg:8,nodeparam:8,nodeproto:[7,8,23],nodes_to_exclud:7,nodes_to_quant:7,nodeshap:[6,8],nois:[6,9,11],noisydataset:11,non:[3,8,22,24,31,32],none:[3,4,6,7,8,9,12,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,37],nonzero:[3,31],nor:3,norm:[8,17,22,24,29,30,32],normal:[6,8,9,11,13,22,27,29,32,37],normalizer_fn:32,note:[3,8,11,21,22,24,28,31,33,37,38],notebook:36,noth:[8,24],now:[7,33],npy:[24,33],npz:[7,24,33],nsdf3:[12,27],nthread:8,num:21,num_accumulated_batch:24,num_anchor:24,num_anchor_grid:24,num_block:[17,29],num_bucket:33,num_channel:21,num_class:[12,17,18,24,27,29,32],num_cor:[6,8,9],num_default_box:24,num_featur:32,num_imag:27,num_iter:8,num_parallel_cal:26,num_recall_level:24,num_sampl:8,num_train_batch:37,num_upd:31,num_val:24,num_warmup_iter:8,num_work:[11,33],number:[3,4,6,7,8,9,11,17,18,21,22,23,24,26,27,29,30,31,32,33,37,38],numer:[3,31],numpi:[4,7,8,24,32,33],numpyarraybatch:33,obj:[3,22,31],object:[3,4,6,7,8,9,11,13,16,17,18,22,23,24,26,28,29,30,31,32,33,34,37],observ:[22,23],obtain:8,occur:[33,35],off:[3,9,12,22,31],offer:[3,31],offici:[35,37],offset:[13,24],old:24,old_mask:24,omit:[17,32],on_epoch_begin:4,on_epoch_end:4,on_predict_batch_begin:4,on_predict_batch_end:4,on_predict_begin:4,on_predict_end:4,on_test_batch_begin:4,on_test_batch_end:4,on_test_begin:4,on_test_end:4,on_train_batch_begin:4,on_train_batch_end:4,on_train_begin:4,on_train_end:4,onc:[3,8,9,22,31,37,38],one:[3,6,7,8,17,18,21,22,24,28,30,31,32,33,38],one_shot_ks_loss_sensit:31,ones:[3,21,22,31],onli:[3,7,8,9,11,13,21,22,23,24,28,31,33,35,37,38],only_serializ:9,onnx:[0,1,4,22,23,24,32,35,38],onnx_fil:[7,8],onnx_nodes_spars:8,onnx_onnx_rel_1_6_ml_pb2:[6,7,8,23],onnx_path:32,onnx_runtime_graph_optim:8,onnxquant:7,onnxruntim:[6,8],onto:[22,24,31],oop:21,op_cond_upd:31,op_input:[31,32],op_mask_assign:31,op_mask_upd:31,op_mask_update_no_op:31,op_masked_var:31,op_nam:31,op_prune_vars_assign:31,op_sav:31,op_spars:31,op_ten:31,op_typ:[6,7,8,31],op_update_readi:31,op_var:31,op_weight_upd:31,openvino:8,openvinomodelrunn:8,oper:[3,6,7,8,9,21,23,24,28,30,31,32],ops:[3,4,7,8,22,23,26,27,28,29,30,31,32,37],ops_input:31,ops_schedul:31,ops_spars:31,ops_summari:31,ops_upd:31,opset:[4,7,24,32],optim:[0,1,2,5,8,10,16,17,18,24,25,28,33,35],optim_categori:33,optim_closur:24,optim_full_nam:33,optim_nam:33,optim_target:33,optimization_level:[6,8],optimization_recip:[3,22,31,33],optimizationrecip:[3,22,31,33],optimizer_post_step:22,optimizer_pre_step:22,optimizer_v2:3,optimizers_post_step:22,optimizerv2:3,option:[3,4,6,7,8,9,12,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,38],order:[6,7,9,22,33,36],ordereddict:33,org:[17,22],org_model:7,orig:[9,26],origin:[3,7,8,9,11,12,13,17,22,24,27,31,32,34],ort:8,ortmodelrunn:8,other:[1,3,6,8,9,18,22,24,31,32,33,38],otherwis:[3,6,8,9,12,13,16,17,21,22,23,24,26,27,29,30,31,32,33,34],ouput:8,out:[3,6,9,17,18,22,24,29,31,32,37],out_chan:30,out_channel:[17,18,29],out_dict:32,out_tensor:32,output:[3,4,6,7,8,9,12,13,17,18,21,22,23,24,26,27,28,29,30,32,33,37],output_block:17,output_dir:[4,24,32,37],output_edg:7,output_file_path:23,output_func:22,output_id:8,output_model_path:7,output_nam:[6,37],output_shap:[6,8,9],outputs_sampl:22,outputs_sample_max:22,outputs_sample_mean:22,outputs_sample_min:22,outputs_sample_s:22,outputs_sample_std:22,outputs_spars:22,outputs_sparsity_max:22,outputs_sparsity_mean:22,outputs_sparsity_min:22,outputs_sparsity_std:22,outsid:[22,31,33],over:[3,8,21,22,24,31,35,38],overal:[6,8,9,22,24],overprecis:35,overrid:[3,8,17,18,22,23,24,28,31,32,37],overridden:[17,18,21,22],override_bn_subclasses_forward:23,override_model_batch_s:8,overwrit:[8,21],overwrite_input_nam:8,overwritten:[22,23,31],own:[4,21,24,33,38],pack:7,packag:[0,35,37],pad:[6,26,30],pair:[24,32],paper:[17,18,21,22,24,29],parallel:[22,24,26,33],parallelize_model:24,parallelwork:33,param:[3,6,8,9,17,18,22,23,24,28,31,32,33,35,37],param_data:22,param_grad:22,param_group:22,param_init:22,param_mask:22,param_nam:[7,8,22,24],param_spars:22,param_sparsity_dim:22,param_unmask:22,paramet:[1,3,4,6,7,8,9,11,12,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,34,38],parameter:35,params_count:8,params_dim:9,params_strict:[3,22,24,31],params_zero_count:8,parent:[9,33],pars:[23,26,37,38],parse_optimization_str:33,part:[7,24],particular:[8,24],pass:[3,4,6,8,9,17,18,21,22,23,24,28,31,32,33,37],path:[3,4,6,7,8,9,16,17,18,22,23,24,27,28,31,32,33,34,37],path_file_count:33,path_file_s:33,pattern:[3,8,22,24,31,32,33,38],pb_path:32,pb_to_onnx:32,penalti:[22,38],pend:22,per:[3,7,17,18,21,22,24,31,37,38],per_channel:7,percent:33,percentag:[22,33,38],perf:[6,9,16,17,18,28],perform:[1,2,5,6,7,8,9,10,13,17,18,21,22,23,24,25,35,38],period:[3,22,31,38],permiss:32,persist:21,physic:[6,8,9],pick:32,piecewis:21,pil:[13,24],pip:36,pipelin:[13,35,38],pixel:24,place:[3,8,21,22,23,24],placehold:37,plot:[6,9],plot_integr:[6,9],plot_loss_kei:9,plu:35,plugin:[3,9,22,31],png:12,point:[3,7,8,9,13,17,18,22,23,24,31,38],pool2d:30,pool:[30,33],pool_siz:30,portion:38,posit:[8,24,28,33],possibl:[8,33,37],post:[7,24],post_resize_transform:27,postprocess_yolo:24,postprocessing_fn:24,potenti:24,power:38,pre:[7,13,22,28,37,38],pre_resize_transform:27,preced:[3,17,18,22,24,31,33],precis:[22,24,38],preconfigur:[17,18,28],pred:24,predict:[4,8,24,28,31],predicted_box:24,predicted_l:24,predicted_label:24,predictor:24,prefetch:26,prefetch_buffer_s:26,prefix:[3,22,24,31,32,33,38],prelu:21,prepare_qat:22,prepopul:[6,9],preprocess_for_ev:27,preprocess_for_train:27,preprocessing_typ:13,present:[8,33],preserv:[24,38],pretrain:[16,17,18,28,33],pretrained_backbon:18,pretrained_dataset:[16,17,18,28],pretrained_path:[16,17,18,28],pretrained_path_backbon:18,previou:[6,8,9],previous:[6,22,24,31],primit:33,print:[4,6,9,21,24],print_r:[6,9],prior:22,probabl:13,process:[3,6,7,8,9,13,22,24,26,27,31,33,35,37,38],process_batch:7,processor:[26,27],product:35,profil:22,programmat:22,progress:[6,7,22,24,31],proj_channel:[17,29],project:[17,29,33],promot:22,prop:[3,9,22,31],propag:24,proper:[8,22,24,30,31,32],properli:[9,13,33],properti:[3,4,6,7,8,9,12,13,18,21,22,23,24,27,31,32,33,34],proport:24,proto:8,protobuf:37,provid:[3,7,8,13,16,17,18,22,24,28,32,33,37,38],prunabl:[3,6,8,9,22,24,31,32],prunable_equation_sensit:6,prunable_lay:3,prunable_param:[6,9],prunable_params_dim:9,prunable_params_zero:6,prune:[3,6,8,9,16,22,28,31,33,35,37],prune_model_one_shot:8,prune_model_one_shot_it:8,prune_op_var:31,prune_unstructur:8,pruned_lay:3,pruning_loss_sens_approx:6,pruning_loss_sens_magnitud:[6,22,31],pruning_loss_sens_magnitude_it:6,pruning_loss_sens_one_shot:[6,22,31],pruning_loss_sens_one_shot_it:6,pruning_loss_sens_op_var:31,pruning_op_var:31,pruning_perf_sens_one_shot:6,pruning_perf_sens_one_shot_it:6,pruning_schedul:3,pruning_var:3,pruninglosssensitivityanalysi:[6,9,22,31],pruningmaskcr:[3,22,31],pruningopvar:31,pruningperfsensitivityanalysi:[6,9],pruningschedul:3,pruningscop:31,pruningsensitivityresult:[6,9],pth:[22,24],pull:[31,33],push:24,put:[6,9,17,22,24,26,29,31],pypi:35,python:[3,4,8,24,26,27,28,29,30,31,32,33,36],pythonlogg:[4,24],pytorch:[0,1,28,29,35,38],pytorchlogg:[22,24],pytorchmodifieryaml:22,qat:[22,23,38],qconfig:23,qlinear:7,qlinearconv:7,qlinearmatmul:7,qlinearop:7,qtype:7,quantiz:[5,6,8,10,22,35],quantization_mod:7,quantization_param:7,quantizationmod:7,quantizationmodifi:[22,38],quantizationparam:23,quantize_data:7,quantize_model:7,quantize_model_post_train:[5,6],quantize_qat_export:[10,22,38],quantize_rang:7,quantize_resnet_identity_add_input:8,quantize_torch_qat_export:23,quantized_data:7,quantized_model:8,quantized_value_typ:7,quantizediniti:7,quantizedvalu:7,quantizedvaluetyp:7,quantizelinear:23,quantizerd:38,quantwrapp:23,queue:33,quick:35,quickli:38,rais:[3,7,8,9,17,18,22,24,31,32,33],raise_on_error:33,rand_crop:27,rand_tran:[12,13,27],randn:37,randndataset:11,random:[8,11,24,26],random_flip_left_right:27,random_flip_up_down:27,random_horizontal_flip_image_and_annot:13,random_scaling_crop:[26,27],randomcrop:[12,13,27],randomhorizontalflip:[12,13,27],randomli:[13,22,26],rang:[3,6,9,22,24,31,33,38],rank:[3,22,24,31],rate:[3,9,22,24,30,31,35,37],ratio:[17,24,26,29],ratio_rang:26,reach:[3,22,24,31],read:[23,32,37],readabl:[6,9],readi:[3,9,22,31],real:7,reappli:22,reason:[6,9,33],recal:24,recal_upd:31,recalibr:[3,6,9,22,31],receiv:22,recent:22,recip:[3,17,18,21,22,24,31,33,35,37],recipe_typ:[3,17,18,22,24,31,33],recogn:32,recommend:[10,11,16,36],record:[6,9,22,24,31],recov:[35,38],recreat:[3,9,22,31],reduc:[3,7,22,31,32],reduce_rang:23,reducemax:7,reducemin:7,redund:35,ref:[26,27],refer:[16,24,28],referenc:22,reg:22,reg_func:22,reg_ten:22,regex:[3,22,24,31,32,38],region:21,regist:[11,16,17,18,19,21,26,28],register_batch_backward_hook:24,register_batch_end_hook:24,register_batch_forward_hook:24,register_batch_loss_hook:24,register_batch_start_hook:24,register_wrapped_model_constructor:16,registri:[1,10,13,19,25],regular:[22,32],regularize_depthwis:32,relat:[3,6,9,11,12,13,14,15,16,17,18,20,21,22,24,26,27,28,29,31,33],relev:8,reli:4,relu6:[21,30],relu:[7,8,21,22,23,29,30],relu_1:7,relu_2:7,remain:[32,38],remov:[3,8,22,24,28,31,32,35,37],removablehandl:24,remove_dynamic_tl_var:28,remove_node_and_params_from_graph:8,remove_pruning_mask:3,reorder:31,repeat:[24,26,37],repeat_count:26,replac:[8,21],replace_activ:21,repo:[16,19,28],repo_sourc:[16,28],report:[6,22,24],repositori:[35,36],repr:9,repres:[3,6,7,9,13,18,22,24,26,31,32,33],represent:[3,6,8,9,21,22,24,31,33,37],request:[22,24,35],requir:[3,8,22,28,31,36,37,38],reset:[6,22,24,28,31],reshap:[8,27],residu:17,resiz:[12,26,27,34],resnet101:[17,29],resnet101_2xwidth:17,resnet152:[17,29],resnet18:[17,29],resnet20:29,resnet34:[17,29],resnet50:[17,29],resnet50_2xwidth:17,resnet:[7,8,10,16,18,25,28],resnet_const:29,resnet_model:7,resnetsect:29,resnetsectionset:17,resnetv2_101:17,resnetv2_152:17,resnetv2_18:17,resnetv2_34:17,resnetv2_50:17,resnext101:17,resnext152:17,resnext50:17,resnext:17,respect:[8,24],respons:24,rest:[33,37,38],restor:28,restrict:[3,9,22,31],restrict_en:[3,9,22,31],restrict_extra:[3,9,22,31],restrict_initi:[3,9,22,31],result:[3,6,8,9,22,24,28,31,35,37],result_list_tensor:24,result_mean:24,result_std:24,result_typ:22,results_max:22,results_mean:22,results_min:22,results_model:[6,9],results_std:22,retrain:[6,8,22,31],retriev:[3,4,8,16,28,31,38],reus:31,revers:3,revert:22,rewrit:8,right:[3,24],rmax:7,rmin:7,root:[1,12,13,27,34],round:24,routin:7,rule:38,run:[3,4,6,7,8,9,11,17,18,21,22,24,26,28,29,30,31,32,33,34,37,38],run_batches_on_devic:24,run_config:28,run_context:31,run_extra_opt:7,run_func:24,run_it:8,run_lay:22,run_valu:31,runconfig:28,runner:8,runtim:8,runtimeerror:22,s160:[12,27,34],s320:[12,27,34],same:[3,8,22,23,24,30,32,35],sampl:[3,4,8,17,22,24,29,31,32,37,38],sample_batch:[4,24,37],sample_inputs_path:32,sample_label:[4,24],sample_outputs_path:32,sample_s:24,save:[4,6,7,9,23,24,28,31,32,33,34,37],save_desc:9,save_json:[6,9],save_model:[24,37],save_numpi:33,saver:[28,32],scaffold:[28,31],scale:[7,8,12,22,23,24,26],scale_nam:7,scale_rang:26,scale_wh:24,scale_xi:24,scaler:24,schedul:[3,9,22,31,38],schedule_lr:[1,25],schedule_op:31,scheduled_log_upd:22,scheduled_upd:22,scheduledmodif:22,scheduledmodifi:[3,9,22,31],scheduledmodifiermanag:[3,22,24,28,31,37],scheduledoptim:[22,24,37],scheduledupdatemodifi:[3,22,31],scope:[26,27,29,30,31,32],score:24,score_threhsold:24,script:[1,35,36,38],se_mod:17,se_ratio:17,seamless:35,seamlessli:37,search:8,sec_set:[17,29],second:[6,8,9,24,33,38],section:[17,29,37,38],see:[4,12,27,32],seed:24,segment:13,select:[3,22,31],self:[3,22,27,31],sensit:[0,1,6,22,24,31],sensitivity_a:[1,10],sensitivity_lr:[1,10],sensitivity_prun:[1,5,10,25],separ:[17,21,22,29],sequenc:31,sequenti:[17,18,23],serial:[3,9,22,24,31,32],serializ:[3,9,22,31],sess:[28,31,32,37],session:[23,28,31,32,35],session_run_hook:31,sessionrunhook:[28,31],sessionrunvalu:31,set:[1,3,4,6,7,8,9,17,21,22,23,24,26,29,30,31,32,37,38],set_deterministic_se:24,set_logging_level:1,set_optim_learning_r:24,set_param_data:22,set_param_mask:22,set_param_mask_from_abs_threshold:22,set_param_mask_from_spars:22,set_param_mask_from_weight:22,set_relu_to_fat:21,set_threshold:21,set_to_non:22,set_weight:3,setlearningr:[3,9,22,31],setlearningratemodifi:[3,22,31],setparammodifi:22,setter:[3,9,22,31],setup:[1,8,22,24,37,38],setweightdecaymodifi:22,shall:4,shape:[3,6,8,9,11,16,17,18,22,24,28,29,31,32,33],shape_overrid:32,share:[3,8,9,24],shift:[8,24],shot:[6,8,18,22,31],should:[3,4,6,7,8,9,11,12,16,17,18,21,22,24,26,27,28,29,31,32,33,38],should_prun:3,show:8,show_progress:[6,7,8,22,31],shuffl:26,shuffle_buffer_s:26,shutdown:33,side:26,sigmoid:[21,29,30],sign:7,signal:31,signatur:32,significantli:35,silent:[17,18,21],similarli:24,simpl:[3,17,22,24,29,31,32,35],simpler:37,simplif:35,simplifi:29,simplified_arch:29,sinc:[17,18,21],singl:[4,8,17,18,21,22,24,29,33],singleton:[0,1],size:[6,8,9,12,13,17,18,22,24,26,27,29,30,32,33,34],size_i:24,size_x:24,skip:22,slash:31,sleep:6,slice:33,slightli:24,slim:32,slope:21,small:[22,32],smaller:[35,38],smallest:22,smoother:17,softmax:[24,28,29,30],solut:[3,22,31],some:[3,4,8,22,24,31,37],someth:24,somewher:38,sort:[22,33],sort_highest:33,sort_kei:33,sourc:[1,3,4,6,7,8,9,11,12,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,34,35],space:24,sparisti:31,spars:[3,6,8,9,22,31,35,38],sparse_averag:[6,9],sparse_comparison:[6,9],sparse_integr:[6,9],sparse_measur:[6,9],sparse_tensor:[1,5],sparse_tensor_to_dens:8,sparseml:[36,37,38],sparsepruningopvar:31,sparsetensorproto:8,sparsezoo:[3,16,17,18,22,24,28,31,33,35,37,38],sparsif:22,sparsifi:[22,35,37,38],sparsiti:[3,4,6,8,9,21,22,24,31,32,35,38],sparsity_level:[6,22,31],sparsity_mask:22,sparsity_op:31,sparsity_threshold:8,sparsitymaskcr:[3,22,31],sparsitymeasur:8,sparsti:9,sparstii:9,spec:[8,28],special:[7,24],specif:[3,6,9,16,17,18,21,22,24,28,31,34,38],specifi:[3,7,8,11,16,22,24,26,28,29,31,38],specific_result_typ:22,split:[8,24,26,34],split_canonical_nam:8,split_dataset:26,split_root:34,splitstransform:27,spp:18,squar:[24,26],squeez:[17,21],squeezed_channel:21,squeezeexcit:21,src:[24,35],ssd300:[18,24],ssd300_resnet101:18,ssd300_resnet152:18,ssd300_resnet18:18,ssd300_resnet34:18,ssd300_resnet50:18,ssd300lite:18,ssd300lite_mobilenetv2:18,ssd300mobilenetbackbon:18,ssd300resnetbackbon:18,ssd:[10,13,16,24],ssd_collate_fn:13,ssd_helper:[1,10,13],ssd_lite:[10,16],ssd_mobilenet:[10,16],ssd_random_crop:[13,24],ssd_random_crop_image_and_annot:13,ssd_resnet:[10,16],ssdbackbon:18,ssdlite:18,ssdlosswrapp:24,ssummarysaverhook:28,stabl:35,stack:[13,24,33],stage:24,standard:[1,3,9,11,12,13,17,18,21,22,24,27,29,31,32,33,38],start:[3,4,9,22,24,31,33,38],start_end_step:[3,31],start_epoch:[3,9,22,31,37,38],start_pend:22,start_step:[4,31],startup:38,stat:22,state:[3,16,17,18,22,23,24,31,33,35],state_dict:[21,22],std:[6,9,12,27],stddev:32,stdev:11,step:[3,4,6,8,9,22,24,31,32,37,38],step_count:24,step_lr_schedul:31,step_siz:31,steplr:[3,9,22,31,38],steps_per_epoch:[3,9,22,31,37],steps_per_measur:[6,22,31],still:37,stochast:38,stop:[3,9,11,22,24,31,33,38],storag:31,store:[3,6,7,8,9,11,22,24,31,33,37,38],store_init:22,store_unmask:22,str:[3,4,6,7,8,9,11,12,13,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33,34],strict:[21,24],strictli:[21,22],stride:[6,9,17,29,30],string:[3,4,8,9,16,17,18,21,22,24,28,30,31,32,33,38],strip:8,strip_first_dim:8,structur:[3,6,22,27,31,38],strucur:[3,31],stub:[3,17,18,22,24,31,33],student:24,style:[32,37],sub:[16,18,26,28,32],sub_arch:18,sub_architectur:[16,28],sub_domain:[16,28],subarrai:33,subclass:[3,8,17,18,21,22,23,31],submodul:[0,2,5,10,25,35],subpackag:[0,35],subsect:38,subsequ:[22,24,31],subset:9,suggest:24,suit:35,sum:24,sum_squar:24,sum_val:24,summari:[1,24,25,28,31,37],summary_op:31,summarysaverhook:28,summarywrit:[4,24],suppli:[4,6,8,9,16,17,18,22,24,28,29,31,32],support:[3,7,8,9,13,17,22,24,29,30,31,33,35,37,38],suppress:24,sure:[3,9,16,22,31,37],surround:23,svg:35,swap_node_output:8,swish:21,symmetr:[7,23,38],symmetric_activ:7,symmetric_pad2d:30,symmetric_weight:7,syntax:[3,9,22,31],system:[2,5,8,10,22,24,25,31,32,33,36,37,38],tag:[4,22,24,32],take:[3,4,6,8,13,17,18,21,22,24,26,31,33,35,37],taken:[3,4,9,22,24,26,31],tar:[8,33],target:[3,9,18,22,23,24,31,33,38],target_spars:3,task:[24,33],teacher:24,techniqu:35,temp_stud:24,temp_teach:24,temperatur:24,ten:[18,21,22,24,31,32],tensor:[3,4,8,9,13,16,17,18,21,22,24,26,27,28,29,30,31,32,33],tensor_dens:24,tensor_export:[24,33],tensor_nam:32,tensor_sampl:24,tensor_spars:24,tensorboard:[4,22,24,31,32,37],tensorboardlogg:[4,24],tensorflow:[3,4,24,26,27,28,29,30,31,32,35,38],tensorflow_estim:[28,31],tensorflow_path:32,tensorflow_v1:[0,1,37],tensorflowmodifieryaml:31,tensorproto:[7,8],tensors_batch_s:24,tensors_export:[24,33],tensors_module_forward:24,tensors_to_devic:24,tensors_to_precis:24,tensorshap:3,termin:[9,24],terminolog:24,test:[1,4,6,9,22,24,36],test_siz:24,tester_logg:22,tester_run_func:22,tf2onnx:37,tf_compat:37,tf_compat_div:32,than:[3,9,22,24,31,38],thei:[3,8,9,22,24,31,38],them:[3,8,17,18,21,22,24,31,33],themselv:[3,31,38],therefor:[3,8],thi:[3,4,6,7,8,9,11,12,13,17,18,21,22,23,24,27,31,32,33,35,36,37,38],thing:[3,6,9,22,31],those:[8,13,24,31,38],thread:[6,8,33],three:[13,24],threshold:[7,8,21,22,24,31],through:[3,4,6,7,8,9,11,17,22,24,31,32,37,38],throughout:33,til:31,time:[3,4,6,8,9,11,22,24,26,31],titl:[6,9],tl_ignore_ten:28,to_devic:24,to_string_lin:9,togeth:[3,17,22,29,31,33],token:[3,22,31,33],too:[6,9],took:24,tool:[1,7,23,37],toolkit:35,top1:24,top1acc:22,top5:24,top5acc:22,top:[11,22,24,33,35,37],topk:24,topkaccuraci:24,topmost:32,torch:[11,13,16,17,18,21,22,23,24,37],torch_distributed_zero_first:24,torchvis:[10,12,13,16],total:[8,9,11,24,33],total_flop:9,tour:35,toward:[6,38],tqdm:[6,7,8],track:[9,22,24,31],track_grad_mom:22,track_input:22,track_inputs_spars:22,track_output:22,track_outputs_spars:22,tracked_input:22,tracked_output:22,trail:31,trailing_slash:31,train:[1,3,4,7,9,12,13,17,18,21,22,23,24,26,27,28,29,30,31,32,34,35,37],train_data:37,train_on_batch:37,trainabl:[3,22,31,32,38],trainable_vari:32,trainableparamsmodifi:[3,22,31],trainer_logg:22,trainer_run_func:22,transfer:[3,22,24,28,31,33,38],transform:[3,7,12,13,22,27,31],trasnform:7,travers:8,traverse_previ:8,treat:[24,32,33],treatment:32,tri:22,truncat:8,trunctat:32,truth:[24,28],truthi:[3,9,22,31],tune:22,tupl:[3,6,7,8,9,11,13,16,17,18,22,23,24,26,27,28,30,31,32,33],twice:[29,38],two:[8,13,22,24,31],type:[3,4,6,7,8,9,13,17,18,21,22,23,24,27,30,31,33,34],type_:[9,30],typic:[3,8,24],uint8:7,unchang:38,under:[9,24,26,27,28,29,30,31,32,33,37],unexpect:21,unexpected_kei:21,union:[3,4,6,7,8,9,11,12,16,17,18,21,22,23,24,26,27,28,29,30,31,32,33],uniqu:[8,33],unit:[6,9],unless:22,unmask:[22,24],unset:[26,30],unsign:7,unstructur:[3,8,22,31,37,38],unstructuredpruningmaskcr:[3,22,31],until:[3,22,31,33,38],unus:[3,9,22,30,31],updat:[3,4,6,7,8,9,22,24,28,31,32,37,38],update_freq:4,update_frequ:[3,9,22,31,37,38],update_frequency_step:[3,31],update_model_param:8,update_op:[31,32],update_readi:[3,22,31],update_step_freq:31,upper:24,url:33,use:[3,4,6,7,8,9,12,16,17,18,21,22,24,26,27,28,29,30,31,32,33,37,38],use_batchnorm:29,use_deepsparse_infer:6,use_mixed_precis:24,use_s:17,use_zipfile_serialization_if_avail:24,used:[1,3,4,6,7,8,9,11,13,16,18,22,24,26,28,31,32,33,37,38],useful:[22,38],user:[17,22,29,33,38],uses:[17,18,22,24,29,30,31],using:[3,4,7,8,13,17,21,22,24,26,27,28,29,31,32,35,36,37,38],util:[0,1,2,5,6,7,10,11,12,13,22,25,26,27,37],utk:24,val:[8,22,27,32,33],valid:[3,4,6,8,9,12,13,22,27,31,33,34],validate_learning_r:9,validate_lr_info:9,validate_onnx_fil:8,validate_schedul:9,validate_str_iter:33,validate_upd:9,valu:[3,4,6,7,8,9,12,13,21,22,23,24,27,31,32,33,34,38],valueerror:[3,8,9,31,32,33],valueinfoproto:7,var_index:32,var_index_from_train:32,var_mask:31,var_nam:[31,32],var_ten:32,var_threshold:31,variabl:[1,3,22,23,25,28,29,30,31,35],variablev1:[31,32],varianc:32,variou:18,verif:7,version:[6,7,9,16,17,18,22,24,28,29,32,33,37,38],vgg11:[17,29],vgg11bn:[17,29],vgg13:[17,29],vgg13bn:[17,29],vgg16:[17,29],vgg16bn:[17,29],vgg19:[17,29],vgg19bn:[17,29],vgg:[10,16,25,28],vgg_const:29,vggsection:29,vggsectionset:17,via:35,video:[10,11],view:[4,24],virtual:36,vision:[12,13,15,17,18,27,29,34],visual:[4,22,24],voc:[10,11,24],vocdetect:13,vocdetectiondataset:13,vocsegment:13,vocsegmentationdataset:13,wai:[8,23,24,38],wait:24,wait_between_it:6,wall:[4,24],wall_tim:[4,24],want:7,warmup:8,warmup_iterations_per_check:6,warmup_s:24,warn:33,wasn:33,websit:35,weight:[3,6,7,8,16,17,18,22,23,24,28,31,32,37,38],weight_decai:[22,32,38],weight_nam:6,weight_qtyp:7,weight_shap:[6,8],well:[4,8,24,26,32],were:[8,24],what:[3,8,9,22,31,33],when:[3,4,6,7,8,9,11,13,22,23,24,28,31,33,35,38],where:[3,6,7,8,9,17,22,24,31,33,34],whether:[4,21,22,29,32],which:[3,7,21,22,26,31,32,34,37,38],whole:11,whose:[8,18,24,33],width:[17,24,26,29,32,35],width_mult:[17,29],wildcard:32,window:30,winograd:35,wise:17,within:[3,7,8,9,17,18,21,22,24,31,32,33,35],without:[3,7,22,24,31,33],won:22,word:[3,9,22,31],work:[2,5,9,10,13,21,24,25,26,28,31,32,33,38],worker:[0,1],worker_func:33,world:24,world_siz:24,wors:22,would:[8,36],wrap:[3,9,13,16,22,23,24,31,33,37],wrapped_constructor:16,wrapper:[0,1,3,7,12,13,16,21,22,24,27],wrapper_decor:33,write:[32,37],write_simple_summari:32,writer:[4,24,32],written:[37,38],x_cur:33,x_ten:[17,21,29,30],x_val:33,xavier:32,xml:8,xxx:[12,27],xxy:[12,27],xxz:[12,27],xywh:24,yaml:[3,9,22,31,33,37,38],yaml_kei:9,yaml_str:[3,9,22,31],year:13,yeild:6,yet:22,yield:[6,24],yolo:[13,17,18,24],yolo_collate_fn:13,yolo_grid:24,yolo_help:[1,10],yolo_v3:[10,16],yolo_v3_anchor_group:24,yologrid:24,yololosswrapp:24,yolov3:18,you:[21,35,36,37,38],your:[21,35,36,37,38],zero:[3,6,7,8,9,21,22,23,24,29,30,31,32,33,38],zero_grad:22,zero_point:[7,8,23],zero_point_nam:7,zeroed_param:9,zeroth:24,zipfil:24,zoo:[3,16,17,18,22,24,28,31,33]},titles:["sparseml","sparseml package","sparseml.keras package","sparseml.keras.optim package","sparseml.keras.utils package","sparseml.onnx package","sparseml.onnx.optim package","sparseml.onnx.optim.quantization package","sparseml.onnx.utils package","sparseml.optim package","sparseml.pytorch package","sparseml.pytorch.datasets package","sparseml.pytorch.datasets.classification package","sparseml.pytorch.datasets.detection package","sparseml.pytorch.datasets.recommendation package","sparseml.pytorch.datasets.video package","sparseml.pytorch.models package","sparseml.pytorch.models.classification package","sparseml.pytorch.models.detection package","sparseml.pytorch.models.external package","sparseml.pytorch.models.recommendation package","sparseml.pytorch.nn package","sparseml.pytorch.optim package","sparseml.pytorch.optim.quantization package","sparseml.pytorch.utils package","sparseml.tensorflow_v1 package","sparseml.tensorflow_v1.datasets package","sparseml.tensorflow_v1.datasets.classification package","sparseml.tensorflow_v1.models package","sparseml.tensorflow_v1.models.classification package","sparseml.tensorflow_v1.nn package","sparseml.tensorflow_v1.optim package","sparseml.tensorflow_v1.utils package","sparseml.utils package","sparseml.utils.datasets package","SparseML 0.1","Installation","Quick Tour","Sparsification Recipes"],titleterms:{"export":[4,24,32,37],activ:21,analyz:9,analyzer_a:22,analyzer_model:6,analyzer_modul:[22,31],analyzer_prun:22,base:37,benchmark:24,calibr:7,callback:4,cifar:[12,27],classif:[12,17,27,29],coco:13,constantpruningmodifi:38,content:[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34],darknet:17,data:8,dataset:[11,12,13,14,15,26,27,34],detect:[13,18],efficientnet:17,epoch:38,estim:[28,37],extern:19,fatrelu:21,framework:33,gener:11,gmpruningmodifi:38,graph_editor:8,graph_optim:8,helper:[8,13,23,24,26,32,33,34],histori:35,imagefold:[12,27],imagenet:[12,27,34],imagenett:[12,27,34],inception_v3:17,instal:36,intro:38,kera:[2,3,4,37],layer:30,learn:[35,38],learning_r:9,learningratemodifi:38,log:1,logger:[4,24],loss:[8,24,32],manag:[3,9,22,31],mask_creator_prun:[22,31],mask_prun:[3,22,31],mask_pruning_cr:3,mnist:[12,17,29],mobilenet:[17,29],mobilenet_v2:[17,29],model:[4,8,16,17,18,19,20,24,28,29],modifi:[3,9,22,31,38],modifier_a:22,modifier_epoch:[3,22,31],modifier_lr:[3,22,31],modifier_param:[3,22,31],modifier_prun:[3,22,31],modifier_quant:22,modifier_regular:22,modul:[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34],more:35,nets_util:32,onnx:[5,6,7,8,37],optim:[3,6,7,9,22,23,31,37,38],overview:35,packag:[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34],param:38,pipelin:37,prune:38,pytorch:[10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,37],quantiz:[7,23,38],quantize_model_post_train:7,quantize_qat_export:23,quick:37,rate:38,recip:38,recommend:[14,20],registri:[11,16,26,28],releas:35,resnet:[17,29],resourc:35,schedule_lr:31,sensit:9,sensitivity_a:22,sensitivity_lr:22,sensitivity_prun:[6,22,31],session:37,setlearningratemodifi:38,setweightdecaymodifi:38,singleton:33,sparse_tensor:8,sparseml:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35],sparsif:[35,37,38],ssd:18,ssd_helper:24,ssd_lite:18,ssd_mobilenet:18,ssd_resnet:18,submodul:[1,3,4,6,7,8,9,11,12,13,16,17,18,19,21,22,23,24,26,27,28,29,30,31,32,33,34],subpackag:[1,2,5,6,10,11,16,22,25,26,28,33],summari:32,tensorflow:37,tensorflow_v1:[25,26,27,28,29,30,31,32],torchvis:19,tour:37,train:38,trainableparamsmodifi:38,util:[3,4,8,24,32,33,34],variabl:[32,38],vgg:[17,29],video:15,voc:13,worker:33,wrapper:33,yolo_help:24,yolo_v3:18}}) \ No newline at end of file diff --git a/sparsezoo/_modules/index.html b/sparsezoo/_modules/index.html index c35724d1fab..4b5dbac8773 100644 --- a/sparsezoo/_modules/index.html +++ b/sparsezoo/_modules/index.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/main.html b/sparsezoo/_modules/sparsezoo/main.html index 7cdaea29ed1..1e46f3d05f4 100644 --- a/sparsezoo/_modules/sparsezoo/main.html +++ b/sparsezoo/_modules/sparsezoo/main.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              @@ -356,7 +357,6 @@

              Source code for sparsezoo.main

               DOWNLOAD_COMMAND = "download"
               SEARCH_COMMAND = "search"
               
              -logging.basicConfig(level=logging.INFO)
               LOGGER = logging.getLogger()
               
               
              @@ -507,13 +507,7 @@ 

              Source code for sparsezoo.main

                       if hasattr(model, field) and getattr(model, field) is not None
                   ]
               
              -    command_string = download_command + " ".join(
              -        # [
              -        #     " \\ \n ".join(command_strings[i : i + 2])
              -        #     for i in range(0, len(command_strings), 2)
              -        # ]
              -        command_strings
              -    )
              +    command_string = download_command + " ".join(command_strings)
                   return command_string
               
               
              diff --git a/sparsezoo/_modules/sparsezoo/models/classification/efficientnet.html b/sparsezoo/_modules/sparsezoo/models/classification/efficientnet.html
              index 0b53e33f43f..892691ce615 100644
              --- a/sparsezoo/_modules/sparsezoo/models/classification/efficientnet.html
              +++ b/sparsezoo/_modules/sparsezoo/models/classification/efficientnet.html
              @@ -105,10 +105,11 @@
               
              -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/models/classification/inception.html b/sparsezoo/_modules/sparsezoo/models/classification/inception.html index 1cf3f992b0d..73d03bcf6cf 100644 --- a/sparsezoo/_modules/sparsezoo/models/classification/inception.html +++ b/sparsezoo/_modules/sparsezoo/models/classification/inception.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/models/classification/mobilenet.html b/sparsezoo/_modules/sparsezoo/models/classification/mobilenet.html index 3c796b024bc..b56e1ad0dfd 100644 --- a/sparsezoo/_modules/sparsezoo/models/classification/mobilenet.html +++ b/sparsezoo/_modules/sparsezoo/models/classification/mobilenet.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/models/classification/resnet.html b/sparsezoo/_modules/sparsezoo/models/classification/resnet.html index debc43656ff..c1e12d2c9b6 100644 --- a/sparsezoo/_modules/sparsezoo/models/classification/resnet.html +++ b/sparsezoo/_modules/sparsezoo/models/classification/resnet.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/models/classification/vgg.html b/sparsezoo/_modules/sparsezoo/models/classification/vgg.html index c51300fc21b..5701e9048d4 100644 --- a/sparsezoo/_modules/sparsezoo/models/classification/vgg.html +++ b/sparsezoo/_modules/sparsezoo/models/classification/vgg.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/models/detection/ssd.html b/sparsezoo/_modules/sparsezoo/models/detection/ssd.html index 5c40b241f4e..fb24dff8d6f 100644 --- a/sparsezoo/_modules/sparsezoo/models/detection/ssd.html +++ b/sparsezoo/_modules/sparsezoo/models/detection/ssd.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/models/detection/yolo.html b/sparsezoo/_modules/sparsezoo/models/detection/yolo.html index 0372bc1f642..f295391c327 100644 --- a/sparsezoo/_modules/sparsezoo/models/detection/yolo.html +++ b/sparsezoo/_modules/sparsezoo/models/detection/yolo.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/models/zoo.html b/sparsezoo/_modules/sparsezoo/models/zoo.html index 5f47a54ff3b..abfad6089d6 100644 --- a/sparsezoo/_modules/sparsezoo/models/zoo.html +++ b/sparsezoo/_modules/sparsezoo/models/zoo.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              @@ -194,14 +195,62 @@

              Source code for sparsezoo.models.zoo

               """
               
               
              -from typing import List, Union
              +import warnings
              +from typing import Dict, List, Tuple, Union
               
               from sparsezoo.objects.model import Model
              -from sparsezoo.objects.optimization_recipe import OptimizationRecipe
              +from sparsezoo.objects.optimization_recipe import (
              +    OptimizationRecipe,
              +    OptimizationRecipeTypes,
              +)
               from sparsezoo.requests import ModelArgs, download_get_request, search_get_request
               
               
              -__all__ = ["Zoo"]
              +__all__ = [
              +    "ZOO_STUB_PREFIX",
              +    "parse_zoo_stub",
              +    "Zoo",
              +]
              +
              +
              +# optional prefix for stubs
              +ZOO_STUB_PREFIX = "zoo:"
              +
              +
              +
              [docs]def parse_zoo_stub( + stub: str, valid_params: Union[List[str], None] = None +) -> Tuple[str, Dict[str, str]]: + """ + :param stub: A SparseZoo model stub. i.e. 'model/stub/path', + 'zoo:model/stub/path', 'zoo:model/stub/path?param1=value1&param2=value2' + :param valid_params: list of expected parameter names to be encoded in the + stub. Will raise a warning if any unexpected param names are given. Leave + as None to not raise any warnings. Default is None + :return: the parsed base stub and a dictionary of parameter names and their values + """ + # strip optional zoo stub prefix + if stub.startswith(ZOO_STUB_PREFIX): + stub = stub[len(ZOO_STUB_PREFIX) :] + + if "?" not in stub: + return stub, {} + + stub_parts = stub.split("?") + if len(stub_parts) > 2: + raise ValueError( + "Invalid SparseZoo stub, query string must be preceded by only one '?'" + f"given {stub}" + ) + stub, params = stub_parts + params = dict(param.split("=") for param in params.split("&")) + + if valid_params is not None and any(param not in valid_params for param in params): + warnings.warn( + f"Invalid query string for stub {stub} valid params include {valid_params}," + f" given {list(params.keys())}" + ) + + return stub, params
              [docs]class Zoo: @@ -296,6 +345,9 @@

              Source code for sparsezoo.models.zoo

                       :param force_token_refresh: True to refresh the auth token, False otherwise
                       :return: The requested Model instance
                       """
              +        if isinstance(stub, str):
              +            stub, _ = parse_zoo_stub(stub, valid_params=[])
              +
                       response_json = download_get_request(
                           args=stub,
                           file_name=None,
              @@ -619,7 +671,101 @@ 

              Source code for sparsezoo.models.zoo

                           match_dataset=match_dataset,
                           match_training_scheme=match_training_scheme,
                       )
              -        return [recipe for model in optimized_models for recipe in model.recipes]
              + return [recipe for model in optimized_models for recipe in model.recipes]
              + +
              [docs] @staticmethod + def download_recipe_from_stub( + stub: str, + ) -> str: + """ + :param stub: a string model stub that points to a SparseZoo model. + recipe_type may be added as a stub parameter. i.e. + "model/stub/path", "zoo:model/stub/path", + "zoo:model/stub/path?recipe_type=original" + :return: file path of the downloaded recipe for that model + """ + stub, args = parse_zoo_stub(stub, valid_params=["recipe_type"]) + recipe_type = _get_stub_args_recipe_type(args) + model = Zoo.load_model_from_stub(stub) + + for recipe in model.recipes: + if recipe.recipe_type == recipe_type: + return recipe.downloaded_path() + + found_recipe_types = [recipe.recipe_type for recipe in model.recipes] + raise RuntimeError( + f"No recipe with recipe_type {recipe_type} found for model {model}. " + f"Found {len(model.recipes)} recipes with recipe types {found_recipe_types}" + )
              + +
              [docs] @staticmethod + def download_recipe_base_framework_files( + stub: str, + extensions: Union[List[str], None] = None, + ) -> List[str]: + """ + :param stub: a string model stub that points to a SparseZoo model. + recipe_type may be added as a stub parameter. i.e. + "model/stub/path", "zoo:model/stub/path", + "zoo:model/stub/path?recipe_type=transfer" + :param extensions: List of file extensions to filter for. ex ['.pth', '.ptc']. + If None or empty list, all framework files are downloaded. Default is None + :return: file path to the downloaded framework checkpoint files for the + base weights of this recipe + """ + stub, args = parse_zoo_stub(stub, valid_params=["recipe_type"]) + recipe_type = _get_stub_args_recipe_type(args) + model = Zoo.load_model_from_stub(stub) + + if recipe_type == OptimizationRecipeTypes.TRANSFER_LEARN.value: + # return final model's optimized weights for sparse transfer learning + framework_files = model.download_framework_files(extensions=extensions) + + # download only pre-quantized weights if available + checkpoint_framework_files = [ + framework_file + for framework_file in framework_files + if ".ckpt" in framework_file + ] + + # return non-empty list, preferring filtered list + return checkpoint_framework_files or framework_files + else: + # search for base model, and return those weights as a starting checkpoint + base_model = [ + result + for result in Zoo.search_optimized_models(model) + if result.optim_name == "base" + ] + if not base_model: + raise ValueError(f"Could not find base model for model {model}") + framework_files = base_model[0].download_framework_files( + extensions=extensions + ) + + # filter out checkpoint weights if any exist + base_framework_files = [ + framework_file + for framework_file in framework_files + if ".ckpt" not in framework_file + ] + + # return non-empty list, preferring filtered list + return base_framework_files or framework_files
              + + +def _get_stub_args_recipe_type(stub_args: Dict[str, str]) -> str: + # check recipe type, default to original, and validate + recipe_type = stub_args.get("recipe_type", OptimizationRecipeTypes.ORIGINAL.value) + + # validate + valid_recipe_types = list(map(lambda typ: typ.value, OptimizationRecipeTypes)) + if recipe_type not in valid_recipe_types: + raise ValueError( + f"Invalid recipe_type: '{recipe_type}'. " + f"Valid recipe types: {valid_recipe_types}" + ) + return recipe_type
              diff --git a/sparsezoo/_modules/sparsezoo/nbutils/utils.html b/sparsezoo/_modules/sparsezoo/nbutils/utils.html index 9a86ff0127c..61456b650d9 100644 --- a/sparsezoo/_modules/sparsezoo/nbutils/utils.html +++ b/sparsezoo/_modules/sparsezoo/nbutils/utils.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/objects/base.html b/sparsezoo/_modules/sparsezoo/objects/base.html index 99852e407df..04fd44bddf1 100644 --- a/sparsezoo/_modules/sparsezoo/objects/base.html +++ b/sparsezoo/_modules/sparsezoo/objects/base.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/objects/data.html b/sparsezoo/_modules/sparsezoo/objects/data.html index 7507580d66a..969b05b1278 100644 --- a/sparsezoo/_modules/sparsezoo/objects/data.html +++ b/sparsezoo/_modules/sparsezoo/objects/data.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/objects/downloadable.html b/sparsezoo/_modules/sparsezoo/objects/downloadable.html index 92ad560792f..37dacf1ebe5 100644 --- a/sparsezoo/_modules/sparsezoo/objects/downloadable.html +++ b/sparsezoo/_modules/sparsezoo/objects/downloadable.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/objects/file.html b/sparsezoo/_modules/sparsezoo/objects/file.html index bf15d8fb54a..6a6b857da23 100644 --- a/sparsezoo/_modules/sparsezoo/objects/file.html +++ b/sparsezoo/_modules/sparsezoo/objects/file.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/objects/metadata.html b/sparsezoo/_modules/sparsezoo/objects/metadata.html index e32a072fc67..f1d6feed82d 100644 --- a/sparsezoo/_modules/sparsezoo/objects/metadata.html +++ b/sparsezoo/_modules/sparsezoo/objects/metadata.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/objects/model.html b/sparsezoo/_modules/sparsezoo/objects/model.html index b603e3960d6..2138741dcea 100644 --- a/sparsezoo/_modules/sparsezoo/objects/model.html +++ b/sparsezoo/_modules/sparsezoo/objects/model.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/objects/optimization_recipe.html b/sparsezoo/_modules/sparsezoo/objects/optimization_recipe.html index 0df9b903971..17d81fe5d92 100644 --- a/sparsezoo/_modules/sparsezoo/objects/optimization_recipe.html +++ b/sparsezoo/_modules/sparsezoo/objects/optimization_recipe.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/objects/release_version.html b/sparsezoo/_modules/sparsezoo/objects/release_version.html index 2745c54535b..7dcbcced2c0 100644 --- a/sparsezoo/_modules/sparsezoo/objects/release_version.html +++ b/sparsezoo/_modules/sparsezoo/objects/release_version.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/objects/result.html b/sparsezoo/_modules/sparsezoo/objects/result.html index 9a9dc86bee4..3f7c99d8f24 100644 --- a/sparsezoo/_modules/sparsezoo/objects/result.html +++ b/sparsezoo/_modules/sparsezoo/objects/result.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/objects/tag.html b/sparsezoo/_modules/sparsezoo/objects/tag.html index 392a2097073..5571fcb6990 100644 --- a/sparsezoo/_modules/sparsezoo/objects/tag.html +++ b/sparsezoo/_modules/sparsezoo/objects/tag.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/objects/user.html b/sparsezoo/_modules/sparsezoo/objects/user.html index 0de603222a2..5b83066db6f 100644 --- a/sparsezoo/_modules/sparsezoo/objects/user.html +++ b/sparsezoo/_modules/sparsezoo/objects/user.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/requests/authentication.html b/sparsezoo/_modules/sparsezoo/requests/authentication.html index a8b0ee195c9..489fc497051 100644 --- a/sparsezoo/_modules/sparsezoo/requests/authentication.html +++ b/sparsezoo/_modules/sparsezoo/requests/authentication.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              @@ -231,7 +232,7 @@

              Source code for sparsezoo.requests.authentication

              def __init__(self): if os.path.exists(CREDENTIALS_YAML): - _LOGGER.info(f"Loading sparse zoo credentials from {CREDENTIALS_YAML}") + _LOGGER.debug(f"Loading sparse zoo credentials from {CREDENTIALS_YAML}") with open(CREDENTIALS_YAML) as credentials_file: credentials_yaml = yaml.safe_load(credentials_file) if credentials_yaml and CREDENTIALS_YAML_TOKEN_KEY in credentials_yaml: @@ -243,7 +244,9 @@

              Source code for sparsezoo.requests.authentication

              self._token = None self._created = None else: - _LOGGER.info(f"No sparse zoo credentials files found at {CREDENTIALS_YAML}") + _LOGGER.debug( + f"No sparse zoo credentials files found at {CREDENTIALS_YAML}" + ) self._token = None self._created = None @@ -255,7 +258,7 @@

              Source code for sparsezoo.requests.authentication

              :param token: the jwt for accessing sparse zoo APIs :param created: the approximate time the token was created """ - _LOGGER.info(f"Saving sparse zoo credentials at {CREDENTIALS_YAML}") + _LOGGER.debug(f"Saving sparse zoo credentials at {CREDENTIALS_YAML}") if not os.path.exists(CREDENTIALS_YAML): create_parent_dirs(CREDENTIALS_YAML) with open(CREDENTIALS_YAML, "w+") as credentials_file: @@ -276,17 +279,17 @@

              Source code for sparsezoo.requests.authentication

              """ :return: obtain the token if under 1 day old, else return None """ - _LOGGER.info(f"Obtaining sparse zoo credentials from {CREDENTIALS_YAML}") + _LOGGER.debug(f"Obtaining sparse zoo credentials from {CREDENTIALS_YAML}") if self._token and self._created is not None: creation_date = datetime.fromtimestamp(self._created, tz=timezone.utc) creation_difference = datetime.now(tz=timezone.utc) - creation_date - if creation_difference.days == 0: + if creation_difference.days < 30: return self._token else: - _LOGGER.warning(f"Expired sparse zoo credentials at {CREDENTIALS_YAML}") + _LOGGER.debug(f"Expired sparse zoo credentials at {CREDENTIALS_YAML}") return None else: - _LOGGER.warning(f"No sparse zoo credentials found at {CREDENTIALS_YAML}") + _LOGGER.debug(f"No sparse zoo credentials found at {CREDENTIALS_YAML}") return None @@ -315,7 +318,7 @@

              Source code for sparsezoo.requests.authentication

              if token and not force_token_refresh: return {NM_TOKEN_HEADER: token} elif authentication_type.lower() == PUBLIC_AUTH_TYPE: - _LOGGER.warning("Obtaining new sparse zoo credentials token") + _LOGGER.info("Obtaining new sparse zoo credentials token") created = time.time() response = requests.post( url=AUTH_API, data=json.dumps({"authentication_type": PUBLIC_AUTH_TYPE}) diff --git a/sparsezoo/_modules/sparsezoo/requests/base.html b/sparsezoo/_modules/sparsezoo/requests/base.html index e6795623f4d..0b449cb1c2a 100644 --- a/sparsezoo/_modules/sparsezoo/requests/base.html +++ b/sparsezoo/_modules/sparsezoo/requests/base.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/requests/download.html b/sparsezoo/_modules/sparsezoo/requests/download.html index 5b54825ae9a..abda564e707 100644 --- a/sparsezoo/_modules/sparsezoo/requests/download.html +++ b/sparsezoo/_modules/sparsezoo/requests/download.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/requests/search.html b/sparsezoo/_modules/sparsezoo/requests/search.html index d4b7aea510b..6398446f30d 100644 --- a/sparsezoo/_modules/sparsezoo/requests/search.html +++ b/sparsezoo/_modules/sparsezoo/requests/search.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              @@ -241,7 +242,7 @@

              Source code for sparsezoo.requests.search

                   search_args = "&".join(search_args)
                   url = f"{BASE_API_URL}/{SEARCH_PATH}/{args.model_url_root}?{search_args}"
               
              -    _LOGGER.debug(f"Searching objects from {url}")
              +    _LOGGER.info(f"Searching objects from {url}")
                   response_json = requests.get(url=url, headers=header).json()
               
                   return response_json
              diff --git a/sparsezoo/_modules/sparsezoo/utils/data.html b/sparsezoo/_modules/sparsezoo/utils/data.html index d770d51c11a..029a8dc2d9b 100644 --- a/sparsezoo/_modules/sparsezoo/utils/data.html +++ b/sparsezoo/_modules/sparsezoo/utils/data.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/utils/downloader.html b/sparsezoo/_modules/sparsezoo/utils/downloader.html index f051cd8c9c3..3a5ba4b882e 100644 --- a/sparsezoo/_modules/sparsezoo/utils/downloader.html +++ b/sparsezoo/_modules/sparsezoo/utils/downloader.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/utils/helpers.html b/sparsezoo/_modules/sparsezoo/utils/helpers.html index 23e5d584dd0..3d1d51c8726 100644 --- a/sparsezoo/_modules/sparsezoo/utils/helpers.html +++ b/sparsezoo/_modules/sparsezoo/utils/helpers.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/sparsezoo/utils/numpy.html b/sparsezoo/_modules/sparsezoo/utils/numpy.html index 6b22b735153..efbc9558562 100644 --- a/sparsezoo/_modules/sparsezoo/utils/numpy.html +++ b/sparsezoo/_modules/sparsezoo/utils/numpy.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_modules/tqdm/asyncio.html b/sparsezoo/_modules/tqdm/asyncio.html index 13491f2650f..47ddbafcdda 100644 --- a/sparsezoo/_modules/tqdm/asyncio.html +++ b/sparsezoo/_modules/tqdm/asyncio.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/_sources/index.rst.txt b/sparsezoo/_sources/index.rst.txt index 835d31be66e..1918a9a67fe 100644 --- a/sparsezoo/_sources/index.rst.txt +++ b/sparsezoo/_sources/index.rst.txt @@ -17,7 +17,7 @@ SparseZoo |version| =================== -Neural network model repository for highly sparse models and optimization recipes +Neural network model repository for highly sparse and sparse-quantized models with matching sparsification recipes .. raw:: html @@ -48,51 +48,53 @@ Neural network model repository for highly sparse models and optimization recipe Overview ======== -SparseZoo is a constantly-growing repository of optimized models and optimization recipes for neural networks. -It simplifies and accelerates your time-to-value in building performant deep learning models with a -collection of inference-optimized models and recipes to prototype from. +SparseZoo is a constantly-growing repository of highly sparse and sparse-quantized models with matching sparsification recipes for neural networks. +It simplifies and accelerates your time-to-value in building performant deep learning models with a collection of inference-optimized models and recipes to prototype from. -Available via API and hosted in the cloud, the SparseZoo contains both baseline and models optimized -to different degrees of inference performance vs baseline loss recovery. -Optimizations on neural networks include approaches such as -`pruning `_ and `quantization `_ -allowing for significantly faster models with limited to no effect on their baseline metrics such as accuracy. -Recipe-driven approaches built around these optimizations allow you to take the models as given, -transfer learn from the models onto private datasets, or transfer the recipes to your architectures. +Available via API and hosted in the cloud, the SparseZoo contains both baseline and models optimized to different degrees of inference performance vs. baseline loss recovery. +Recipe-driven approaches built around sparsification algorithms allow you to take the models as given, transfer-learn from the models onto private datasets, or transfer the recipes to your architectures. -This repository contains the Python API code to handle the connection and authentication to the cloud. +`This repository `_ contains the Python API code to handle the connection and authentication to the cloud. -Related Products -================ +Sparsification +============== -- `DeepSparse `_: - CPU inference engine that delivers unprecedented performance for sparse models -- `SparseML `_: - Libraries for state-of-the-art deep neural network optimization algorithms, - enabling simple pipelines integration with a few lines of code -- `Sparsify `_: - Easy-to-use autoML interface to optimize deep neural networks for - better inference performance and a smaller footprint +Sparsification is the process of taking a trained deep learning model and removing redundant information from the overprecise and over-parameterized network resulting in a faster and smaller model. +Techniques for sparsification are all encompassing including everything from inducing sparsity using `pruning `_ and `quantization `_ to enabling naturally occurring sparsity using `activation sparsity `_ or `winograd/FFT `_. +When implemented correctly, these techniques result in significantly more performant and smaller models with limited to no effect on the baseline metrics. +For example, pruning plus quantization can give over `7x improvements in performance `_ while recovering to nearly the same baseline accuracy. + +The Deep Sparse product suite builds on top of sparsification enabling you to easily apply the techniques to your datasets and models using recipe-driven approaches. +Recipes encode the directions for how to sparsify a model into a simple, easily editable format. +- Download a sparsification recipe and sparsified model from the `SparseZoo `_. +- Alternatively, create a recipe for your model using `Sparsify `_. +- Apply your recipe with only a few lines of code using `SparseML `_. +- Finally, for GPU-level performance on CPUs, deploy your sparse-quantized model with the `DeepSparse Engine `_. + + +**Full Deep Sparse product flow:** + + Resources and Learning More =========================== -- `SparseML Documentation `_ -- `Sparsify Documentation `_ -- `DeepSparse Documentation `_ -- `Neural Magic Blog `_, - `Resources `_, - `Website `_ +- `SparseML Documentation `_ +- `Sparsify Documentation `_ +- `DeepSparse Documentation `_ +- `Neural Magic Blog `_, + `Resources `_, + `Website `_ Release History =============== Official builds are hosted on PyPi -- stable: `sparsezoo `_ -- nightly (dev): `sparsezoo-nightly `_ +- stable: `sparsezoo `_ +- nightly (dev): `sparsezoo-nightly `_ Additionally, more information can be found via -`GitHub Releases `_. +`GitHub Releases `_. .. toctree:: :maxdepth: 3 @@ -110,8 +112,9 @@ Additionally, more information can be found via api/sparsezoo .. toctree:: - :maxdepth: 2 - :caption: Help and Support + :maxdepth: 3 + :caption: Help Bugs, Feature Requests - Support, General Q&A \ No newline at end of file + Support, General Q&A + Neural Magic Docs diff --git a/sparsezoo/_sources/models.md.txt b/sparsezoo/_sources/models.md.txt index 3ddec0ee931..6de95d243b0 100644 --- a/sparsezoo/_sources/models.md.txt +++ b/sparsezoo/_sources/models.md.txt @@ -18,33 +18,34 @@ limitations under the License. Each model in the SparseZoo has a specific stub that identifies it. The stubs are made up of the following structure: -`DOMAIN/SUB_DOMAIN/ARCHITECTURE{-SUB_ARCHITECTURE}/FRAMEWORK/REPO/DATASET{-TRAINING_SCHEME}/OPTIM_NAME-OPTIM_CATEGORY-{OPTIM_TARGET}` +`DOMAIN/SUB_DOMAIN/ARCHITECTURE{-SUB_ARCHITECTURE}/FRAMEWORK/REPO/DATASET{-TRAINING_SCHEME}/SPARSE_NAME-SPARSE_CATEGORY-{SPARSE_TARGET}` The properties within each model stub are defined as the following: -| Model Property | Definition | Examples | -|:----------------:|:---------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------:| -| DOMAIN | The type of solution the model is architected and trained for | cv, nlp | -| SUB_DOMAIN | The sub type of solution the model is architected and trained for | classification, segmentation | -| ARCHITECTURE | The name of the guiding setup for the network's graph | resnet_v1, mobilenet_v1 | -| SUB_ARCHITECTURE | (optional) The scaled version of the architecture such as width or depth | 50, 101, 152 | -| FRAMEWORK | The machine learning framework the model was defined and trained in | pytorch, tensorflow_v1 | -| REPO | The model repository the model and baseline weights originated from | sparseml, torchvision | -| DATASET | The dataset the model was trained on | imagenet, cifar10 | -| TRAINING_SCHEME | (optional) A description on how the model was trained | augmented, lower_lr | -| OPTIM_NAME | An overview of what was done to optimize the model | base, pruned, quant (quantized), pruned_quant, arch (architecture modified) | -| OPTIM_CATEGORY | Descriptor on the degree to which the model is optimized as compared with the baseline metric | none, conservative (100% baseline), moderate (>= 99% baseline), aggressive (< 99%) | -| OPTIM_TARGET | (optional) Descriptor for the target environment the model was optimized for | disk, edge, deepsparse, gpu | +| Model Property | Definition | Examples | +|:----------------:|:----------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------:| +| DOMAIN | The type of solution the model is architected and trained for | cv, nlp | +| SUB_DOMAIN | The sub type of solution the model is architected and trained for | classification, segmentation | +| ARCHITECTURE | The name of the guiding setup for the network's graph | resnet_v1, mobilenet_v1 | +| SUB_ARCHITECTURE | (optional) The scaled version of the architecture such as width or depth | 50, 101, 152 | +| FRAMEWORK | The machine learning framework the model was defined and trained in | pytorch, tensorflow_v1 | +| REPO | The model repository the model and baseline weights originated from | sparseml, torchvision | +| DATASET | The dataset the model was trained on | imagenet, cifar10 | +| TRAINING_SCHEME | (optional) A description on how the model was trained | augmented, lower_lr | +| SPARSE_NAME | An overview of what was done to sparsify the model | base, pruned, quant (quantized), pruned_quant, arch (architecture modified) | +| SPARSE_CATEGORY | Descriptor on the degree to which the model is sparsified as compared with the baseline metric | none, conservative (100% baseline), moderate (>= 99% baseline), aggressive (< 99%) | +| SPARSE_TARGET | (optional) Descriptor for the target environment the model was sparsified for | disk, edge, deepsparse, gpu | The contents of each model are made up of the following: + - model.md: The model card containing metadata, descriptions, and information for the model. - model.onnx: The [ONNX](https://onnx.ai/) representation of the model's graph. - model.onnx.tar.gz: A compressed format for the ONNX file. Currently ONNX does not support sparse tensors and quantized sparse tensors well for compression. - [FRAMEWORK]/model.[EXTENSION]: The native ML framework file(s) for the model in which it was originally trained. Such as PyTorch, Keras, TensorFlow V1 -- recipes/optimization.[md|yaml]: The original optimization recipe used to create the model. -- recipes/[NAME].[md|yaml]: Additional optimization recipes that can be used with the model such as transfer learning. +- recipes/original.[md|yaml]: The original sparsification recipe used to create the model. +- recipes/[NAME].[md|yaml]: Additional sparsification recipes that can be used with the model such as transfer learning. - sample-originals: The original sample data without any preprocessing for use with the model. - sample-inputs: The sample data after pre processing for use with the model. - sample-outputs: The outputs after running the sample inputs through the model. @@ -76,14 +77,15 @@ The contents of each model are made up of the following: | cv/classification/resnet_v1-152/pytorch/sparseml/imagenet/pruned-moderate | 77.5% top1 accuracy | | cv/classification/resnet_v1-152/pytorch/torchvision/imagenet/base-none | 77.5% top1 accuracy | | cv/classification/resnet_v1-18/pytorch/sparseml/imagenet/base-none | 69.8% top1 accuracy | -| cv/classification/resnet_v1-18/pytorch/sparseml/imagenet/sparse-conservative | 69.8% top1 accuracy | +| cv/classification/resnet_v1-18/pytorch/sparseml/imagenet/pruned-conservative | 69.8% top1 accuracy | | cv/classification/resnet_v1-18/pytorch/torchvision/imagenet/base-none | 69.8% top1 accuracy | | cv/classification/resnet_v1-34/pytorch/sparseml/imagenet/base-none | 73.3% top1 accuracy | -| cv/classification/resnet_v1-34/pytorch/sparseml/imagenet/sparse-conservative | 73.3% top1 accuracy | +| cv/classification/resnet_v1-34/pytorch/sparseml/imagenet/pruned-conservative | 73.3% top1 accuracy | | cv/classification/resnet_v1-34/pytorch/torchvision/imagenet/base-none | 73.3% top1 accuracy | | cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/base-none | 76.1% top1 accuracy | | cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned-conservative | 76.1% top1 accuracy | | cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned-moderate | 75.3% top1 accuracy | +| cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned_quant-moderate | 75.4% top1 accuracy | | cv/classification/resnet_v1-50/pytorch/sparseml/imagenet-augmented/pruned_quant-aggressive | 76.1% top1 accuracy | | cv/classification/resnet_v1-50/pytorch/sparseml/imagenette/base-none | 99.9% top1 accuracy | | cv/classification/resnet_v1-50/pytorch/sparseml/imagenette/pruned-conservative | 99.9% top1 accuracy | diff --git a/sparsezoo/_sources/quicktour.md.txt b/sparsezoo/_sources/quicktour.md.txt index 78cbc63874a..62025650f6c 100644 --- a/sparsezoo/_sources/quicktour.md.txt +++ b/sparsezoo/_sources/quicktour.md.txt @@ -18,33 +18,34 @@ limitations under the License. Each model in the SparseZoo has a specific stub that identifies it. The stubs are made up of the following structure: -`DOMAIN/SUB_DOMAIN/ARCHITECTURE{-SUB_ARCHITECTURE}/FRAMEWORK/REPO/DATASET{-TRAINING_SCHEME}/OPTIM_NAME-OPTIM_CATEGORY-{OPTIM_TARGET}` +`DOMAIN/SUB_DOMAIN/ARCHITECTURE{-SUB_ARCHITECTURE}/FRAMEWORK/REPO/DATASET{-TRAINING_SCHEME}/SPARSE_NAME-SPARSE_CATEGORY-{SPARSE_TARGET}` The properties within each model stub are defined as the following: -| Model Property | Definition | Examples | -|:----------------:|:---------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------:| -| DOMAIN | The type of solution the model is architected and trained for | cv, nlp | -| SUB_DOMAIN | The sub type of solution the model is architected and trained for | classification, segmentation | -| ARCHITECTURE | The name of the guiding setup for the network's graph | resnet_v1, mobilenet_v1 | -| SUB_ARCHITECTURE | (optional) The scaled version of the architecture such as width or depth | 50, 101, 152 | -| FRAMEWORK | The machine learning framework the model was defined and trained in | pytorch, tensorflow_v1 | -| REPO | The model repository the model and baseline weights originated from | sparseml, torchvision | -| DATASET | The dataset the model was trained on | imagenet, cifar10 | -| TRAINING_SCHEME | (optional) A description on how the model was trained | augmented, lower_lr | -| OPTIM_NAME | An overview of what was done to optimize the model | base, pruned, quant (quantized), pruned_quant, arch (architecture modified) | -| OPTIM_CATEGORY | Descriptor on the degree to which the model is optimized as compared with the baseline metric | none, conservative (100% baseline), moderate (>= 99% baseline), aggressive (< 99%) | -| OPTIM_TARGET | (optional) Descriptor for the target environment the model was optimized for | disk, edge, deepsparse, gpu | +| Model Property | Definition | Examples | +|:----------------:|:----------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------:| +| DOMAIN | The type of solution the model is architected and trained for | cv, nlp | +| SUB_DOMAIN | The sub type of solution the model is architected and trained for | classification, segmentation | +| ARCHITECTURE | The name of the guiding setup for the network's graph | resnet_v1, mobilenet_v1 | +| SUB_ARCHITECTURE | (optional) The scaled version of the architecture such as width or depth | 50, 101, 152 | +| FRAMEWORK | The machine learning framework the model was defined and trained in | pytorch, tensorflow_v1 | +| REPO | The model repository the model and baseline weights originated from | sparseml, torchvision | +| DATASET | The dataset the model was trained on | imagenet, cifar10 | +| TRAINING_SCHEME | (optional) A description on how the model was trained | augmented, lower_lr | +| SPARSE_NAME | An overview of what was done to sparsify the model | base, pruned, quant (quantized), pruned_quant, arch (architecture modified) | +| SPARSE_CATEGORY | Descriptor on the degree to which the model is sparsified as compared with the baseline metric | none, conservative (100% baseline), moderate (>= 99% baseline), aggressive (< 99%) | +| SPARSE_TARGET | (optional) Descriptor for the target environment the model was sparsified for | disk, edge, deepsparse, gpu | The contents of each model are made up of the following: + - model.md: The model card containing metadata, descriptions, and information for the model. - model.onnx: The [ONNX](https://onnx.ai/) representation of the model's graph. - model.onnx.tar.gz: A compressed format for the ONNX file. Currently ONNX does not support sparse tensors and quantized sparse tensors well for compression. - [FRAMEWORK]/model.[EXTENSION]: The native ML framework file(s) for the model in which it was originally trained. Such as PyTorch, Keras, TensorFlow V1 -- recipes/optimization.[md|yaml]: The original optimization recipe used to create the model. -- recipes/[NAME].[md|yaml]: Additional optimization recipes that can be used with the model such as transfer learning. +- recipes/original.[md|yaml]: The original sparsification recipe used to create the model. +- recipes/[NAME].[md|yaml]: Additional sparsification recipes that can be used with the model such as transfer learning. - sample-originals: The original sample data without any preprocessing for use with the model. - sample-inputs: The sample data after pre processing for use with the model. - sample-outputs: The outputs after running the sample inputs through the model. @@ -52,8 +53,7 @@ The contents of each model are made up of the following: ### Python APIS -The Python APIs respect this format enabling you to search and download models. -Some code examples are given below. +The Python APIs respect this format enabling you to search and download models. Some code examples are given below. #### Searching the Zoo @@ -92,7 +92,8 @@ print(optimized_models) In addition to the Python APIs, a console script entry point is installed with the package `sparsezoo`. This enables easy interaction straight from your console/terminal. Note, for some environments the console scripts cannot install properly. -If this happens for your system and the sparsezoo command is not available, `scripts/sparsezoo.py` may be used in its place. +If this happens for your system and the sparsezoo command is not available, +`https://github.com/neuralmagic/sparsezoo/blob/main/scripts/sparsezoo.py` may be used in its place. ```shell script sparsezoo -h diff --git a/sparsezoo/_sources/recipes.md.txt b/sparsezoo/_sources/recipes.md.txt index b059773ba31..7f65acf3c9a 100644 --- a/sparsezoo/_sources/recipes.md.txt +++ b/sparsezoo/_sources/recipes.md.txt @@ -19,55 +19,56 @@ limitations under the License. Each recipe in the SparseZoo is stored under the model created with it and has a specific stub that identifies it. The stubs are made up of the following structure: -`DOMAIN/SUB_DOMAIN/ARCHITECTURE{-SUB_ARCHITECTURE}/FRAMEWORK/REPO/DATASET{-TRAINING_SCHEME}/OPTIM_NAME-OPTIM_CATEGORY-{OPTIM_TARGET}/RECIPE_NAME.[md|yaml]` +`DOMAIN/SUB_DOMAIN/ARCHITECTURE{-SUB_ARCHITECTURE}/FRAMEWORK/REPO/DATASET{-TRAINING_SCHEME}/SPARSE_NAME-SPARSE_CATEGORY-{SPARSE_TARGET}?recipe-type=RECIPE_TYPE` The properties within each model stub are defined as the following: -| Model Property | Definition | Examples | -|:----------------:|:---------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------:| -| DOMAIN | The type of solution the model is architected and trained for | cv, nlp | -| SUB_DOMAIN | The sub type of solution the model is architected and trained for | classification, segmentation | -| ARCHITECTURE | The name of the guiding setup for the network's graph | resnet_v1, mobilenet_v1 | -| SUB_ARCHITECTURE | (optional) The scaled version of the architecture such as width or depth | 50, 101, 152 | -| FRAMEWORK | The machine learning framework the model was defined and trained in | pytorch, tensorflow_v1 | -| REPO | The model repository the model and baseline weights originated from | sparseml, torchvision | -| DATASET | The dataset the model was trained on | imagenet, cifar10 | -| TRAINING_SCHEME | (optional) A description on how the model was trained | augmented, lower_lr | -| OPTIM_NAME | An overview of what was done to optimize the model | base, pruned, quant (quantized), pruned_quant, arch (architecture modified) | -| OPTIM_CATEGORY | Descriptor on the degree to which the model is optimized as compared with the baseline metric | none, conservative (100% baseline), moderate (>= 99% baseline), aggressive (< 99%) | -| OPTIM_TARGET | (optional) Descriptor for the target environment the model was optimized for | disk, edge, deepsparse, gpu | -| RECIPE_NAME | A named descriptor for the recipe signifying what the recipe is for | optimization, transfer_learning +| Model Property | Definition | Examples | +|:----------------:|:----------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------:| +| DOMAIN | The type of solution the model is architected and trained for | cv, nlp | +| SUB_DOMAIN | The sub type of solution the model is architected and trained for | classification, segmentation | +| ARCHITECTURE | The name of the guiding setup for the network's graph | resnet_v1, mobilenet_v1 | +| SUB_ARCHITECTURE | (optional) The scaled version of the architecture such as width or depth | 50, 101, 152 | +| FRAMEWORK | The machine learning framework the model was defined and trained in | pytorch, tensorflow_v1 | +| REPO | The model repository the model and baseline weights originated from | sparseml, torchvision | +| DATASET | The dataset the model was trained on | imagenet, cifar10 | +| TRAINING_SCHEME | (optional) A description on how the model was trained | augmented, lower_lr | +| SPARSE_NAME | An overview of what was done to sparsify the model | base, pruned, quant (quantized), pruned_quant, arch (architecture modified) | +| SPARSE_CATEGORY | Descriptor on the degree to which the model is sparsified as compared with the baseline metric | none, conservative (100% baseline), moderate (>= 99% baseline), aggressive (< 99%) | +| SPARSE_TARGET | (optional) Descriptor for the target environment the model was sparsified for | disk, edge, deepsparse, gpu | +| RECIPE_TYPE | A named descriptor for the recipe signifying what the recipe is for | original, transfer_learn | ### Image Classification -| Model Tag | Validation Baseline Metric | -| ------------------------------------------------------------------------------------------------------------ | -------------------------- | -| cv/classification/efficientnet-b0/pytorch/sparseml/imagenet/arch-moderate/optimization.md | 76.5% top1 accuracy | -| cv/classification/efficientnet-b4/pytorch/sparseml/imagenet/arch-moderate/optimization.md | 82.1% top1 accuracy | -| cv/classification/inception_v3/pytorch/sparseml/imagenet/pruned-conservative/optimization.md | 77.4% top1 accuracy | -| cv/classification/inception_v3/pytorch/sparseml/imagenet/pruned-moderate/optimization.md | 76.6% top1 accuracy | -| cv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/base-none/optimization.md | 70.9% top1 accuracy | -| cv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned-conservative/optimization.md | 70.9% top1 accuracy | -| cv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned-moderate/optimization.md | 70.1% top1 accuracy | -| cv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned_quant-moderate/optimization.md | 70.1% top1 accuracy | -| cv/classification/resnet_v1-101/pytorch/sparseml/imagenet/pruned-moderate/optimization.md | 76.6% top1 accuracy | -| cv/classification/resnet_v1-152/pytorch/sparseml/imagenet/pruned-moderate/optimization.md | 77.5% top1 accuracy | -| cv/classification/resnet_v1-18/pytorch/sparseml/imagenet/sparse-conservative/optimization.md | 69.8% top1 accuracy | -| cv/classification/resnet_v1-34/pytorch/sparseml/imagenet/sparse-conservative/optimization.md | 73.3% top1 accuracy | -| cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned-conservative/optimization.md | 76.1% top1 accuracy | -| cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned-moderate/optimization.md | 75.3% top1 accuracy | -| cv/classification/resnet_v1-50/pytorch/sparseml/imagenet-augmented/pruned_quant-aggressive/optimization.md | 76.1% top1 accuracy | -| cv/classification/resnet_v1-50/pytorch/sparseml/imagenette/pruned-conservative/optimization.md | 99.9% top1 accuracy | -| cv/classification/resnet_v1-50/pytorch/torchvision/imagenette/pruned-conservative/optimization.md | 99.9% top1 accuracy | -| cv/classification/vgg-11/pytorch/sparseml/imagenet/pruned-moderate/optimization.md | 68.3% top1 accuracy | -| cv/classification/vgg-16/pytorch/sparseml/imagenet/pruned-conservative/optimization.md | 71.6% top1 accuracy | -| cv/classification/vgg-16/pytorch/sparseml/imagenet/pruned-moderate/optimization.md | 70.8% top1 accuracy | -| cv/classification/vgg-19/pytorch/sparseml/imagenet/pruned-moderate/optimization.md | 71.7% top1 accuracy | +| Model Tag | Validation Baseline Metric | +| ----------------------------------------------------------------------------------------------------------------- | -------------------------- | +| cv/classification/efficientnet-b0/pytorch/sparseml/imagenet/arch-moderate?recipe_type=original | 76.5% top1 accuracy | +| cv/classification/efficientnet-b4/pytorch/sparseml/imagenet/arch-moderate?recipe_type=original | 82.1% top1 accuracy | +| cv/classification/inception_v3/pytorch/sparseml/imagenet/pruned-conservative?recipe_type=original | 77.4% top1 accuracy | +| cv/classification/inception_v3/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original | 76.6% top1 accuracy | +| cv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/base-none?recipe_type=original | 70.9% top1 accuracy | +| cv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned-conservative?recipe_type=original | 70.9% top1 accuracy | +| cv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original | 70.1% top1 accuracy | +| cv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned_quant-moderate?recipe_type=original | 70.1% top1 accuracy | +| cv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned_quant-moderate?recipe_type=original | 70.1% top1 accuracy | +| cv/classification/resnet_v1-101/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original | 76.6% top1 accuracy | +| cv/classification/resnet_v1-152/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original | 77.5% top1 accuracy | +| cv/classification/resnet_v1-18/pytorch/sparseml/imagenet/pruned-conservative?recipe_type=original | 69.8% top1 accuracy | +| cv/classification/resnet_v1-34/pytorch/sparseml/imagenet/pruned-conservative?recipe_type=original | 73.3% top1 accuracy | +| cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned-conservative?recipe_type=original | 76.1% top1 accuracy | +| cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original | 75.3% top1 accuracy | +| cv/classification/resnet_v1-50/pytorch/sparseml/imagenet-augmented/pruned_quant-aggressive?recipe_type=original | 76.1% top1 accuracy | +| cv/classification/resnet_v1-50/pytorch/sparseml/imagenette/pruned-conservative?recipe_type=original | 99.9% top1 accuracy | +| cv/classification/resnet_v1-50/pytorch/torchvision/imagenette/pruned-conservative?recipe_type=original | 99.9% top1 accuracy | +| cv/classification/vgg-11/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original | 68.3% top1 accuracy | +| cv/classification/vgg-16/pytorch/sparseml/imagenet/pruned-conservative?recipe_type=original | 71.6% top1 accuracy | +| cv/classification/vgg-16/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original | 70.8% top1 accuracy | +| cv/classification/vgg-19/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original | 71.7% top1 accuracy | ### Object Detection -| Model Tag | Validation Baseline Metric | -| ------------------------------------------------------------------------------------------------------------ | -------------------------- | -| cv/detection/ssd-resnet50_300/pytorch/sparseml/coco/pruned-moderate/optimization.md | 41.8 mAP@0.5 | -| cv/detection/ssd-resnet50_300/pytorch/sparseml/voc/pruned-moderate/optimization.md | 51.5 mAP@0.5 | -| cv/detection/yolo_v3-spp/pytorch/ultralytics/coco/pruned-aggressive/optimization.md | 62.1 mAP@0.5 | +| Model Tag | Validation Baseline Metric | +| ----------------------------------------------------------------------------------------------------------------- | -------------------------- | +| cv/detection/ssd-resnet50_300/pytorch/sparseml/coco/pruned-moderate?recipe_type=original | 41.8 mAP@0.5 | +| cv/detection/ssd-resnet50_300/pytorch/sparseml/voc/pruned-moderate?recipe_type=original | 51.5 mAP@0.5 | +| cv/detection/yolo_v3-spp/pytorch/ultralytics/coco/pruned-aggressive?recipe_type=original | 62.1 mAP@0.5 | diff --git a/sparsezoo/api/modules.html b/sparsezoo/api/modules.html index 0de595cdd45..7edd2ca5538 100644 --- a/sparsezoo/api/modules.html +++ b/sparsezoo/api/modules.html @@ -105,10 +105,11 @@ -

              Help and Support

              +

              Help

              diff --git a/sparsezoo/api/sparsezoo.html b/sparsezoo/api/sparsezoo.html index d96542e171b..f18251c77fe 100644 --- a/sparsezoo/api/sparsezoo.html +++ b/sparsezoo/api/sparsezoo.html @@ -120,10 +120,11 @@
          -

          Help and Support

          +

          Help

          diff --git a/sparsezoo/api/sparsezoo.models.classification.html b/sparsezoo/api/sparsezoo.models.classification.html index 4e3856023f2..d299131e5a3 100644 --- a/sparsezoo/api/sparsezoo.models.classification.html +++ b/sparsezoo/api/sparsezoo.models.classification.html @@ -126,10 +126,11 @@

      -

      Help and Support

      +

      Help

      diff --git a/sparsezoo/api/sparsezoo.models.detection.html b/sparsezoo/api/sparsezoo.models.detection.html index 4592f9438e2..f8e75cb0b96 100644 --- a/sparsezoo/api/sparsezoo.models.detection.html +++ b/sparsezoo/api/sparsezoo.models.detection.html @@ -126,10 +126,11 @@
    -

    Help and Support

    +

    Help

    diff --git a/sparsezoo/api/sparsezoo.models.html b/sparsezoo/api/sparsezoo.models.html index 352ae94c2e9..796b6353a30 100644 --- a/sparsezoo/api/sparsezoo.models.html +++ b/sparsezoo/api/sparsezoo.models.html @@ -126,10 +126,11 @@ -

    Help and Support

    +

    Help

    @@ -236,6 +237,43 @@

    Submodulesclass sparsezoo.models.zoo.Zoo[source]

    Bases: object

    Provides static functions for loading and searching SparseZoo models and recipes

    +
    +
    +static download_recipe_base_framework_files(stub: str, extensions: Optional[List[str]] = None) → List[str][source]
    +
    +
    Parameters
    +
      +
    • stub – a string model stub that points to a SparseZoo model. +recipe_type may be added as a stub parameter. i.e. +“model/stub/path”, “zoo:model/stub/path”, +“zoo:model/stub/path?recipe_type=transfer”

    • +
    • extensions – List of file extensions to filter for. ex [‘.pth’, ‘.ptc’]. +If None or empty list, all framework files are downloaded. Default is None

    • +
    +
    +
    Returns
    +

    file path to the downloaded framework checkpoint files for the +base weights of this recipe

    +
    +
    +
    + +
    +
    +static download_recipe_from_stub(stub: str) → str[source]
    +
    +
    Parameters
    +

    stub – a string model stub that points to a SparseZoo model. +recipe_type may be added as a stub parameter. i.e. +“model/stub/path”, “zoo:model/stub/path”, +“zoo:model/stub/path?recipe_type=original”

    +
    +
    Returns
    +

    file path of the downloaded recipe for that model

    +
    +
    +
    +
    static load_model(domain: str, sub_domain: str, architecture: str, sub_architecture: Optional[str], framework: str, repo: str, dataset: str, training_scheme: Optional[str], optim_name: str, optim_category: str, optim_target: Optional[str], release_version: Optional[str] = None, override_folder_name: Optional[str] = None, override_parent_path: Optional[str] = None, force_token_refresh: bool = False)sparsezoo.objects.model.Model[source]
    @@ -504,6 +542,25 @@

    Submodules +
    +sparsezoo.models.zoo.parse_zoo_stub(stub: str, valid_params: Optional[List[str]] = None) → Tuple[str, Dict[str, str]][source]
    +
    +
    Parameters
    +
      +
    • stub – A SparseZoo model stub. i.e. ‘model/stub/path’, +‘zoo:model/stub/path’, ‘zoo:model/stub/path?param1=value1&param2=value2’

    • +
    • valid_params – list of expected parameter names to be encoded in the +stub. Will raise a warning if any unexpected param names are given. Leave +as None to not raise any warnings. Default is None

    • +
    +
    +
    Returns
    +

    the parsed base stub and a dictionary of parameter names and their values

    +
    +
    +

    +

    Module contents

    diff --git a/sparsezoo/api/sparsezoo.nbutils.html b/sparsezoo/api/sparsezoo.nbutils.html index 7ec0d2e72dd..313417d3292 100644 --- a/sparsezoo/api/sparsezoo.nbutils.html +++ b/sparsezoo/api/sparsezoo.nbutils.html @@ -125,10 +125,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsezoo/api/sparsezoo.objects.html b/sparsezoo/api/sparsezoo.objects.html index 2c4fbfca539..da897c1c8ab 100644 --- a/sparsezoo/api/sparsezoo.objects.html +++ b/sparsezoo/api/sparsezoo.objects.html @@ -135,10 +135,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsezoo/api/sparsezoo.requests.html b/sparsezoo/api/sparsezoo.requests.html index de3c8cdb55e..84dec86bb74 100644 --- a/sparsezoo/api/sparsezoo.requests.html +++ b/sparsezoo/api/sparsezoo.requests.html @@ -128,10 +128,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsezoo/api/sparsezoo.utils.html b/sparsezoo/api/sparsezoo.utils.html index c81bd5e5d09..12f1d830a9b 100644 --- a/sparsezoo/api/sparsezoo.utils.html +++ b/sparsezoo/api/sparsezoo.utils.html @@ -127,10 +127,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsezoo/genindex.html b/sparsezoo/genindex.html index bf4b7ee7985..2bcb82991f6 100644 --- a/sparsezoo/genindex.html +++ b/sparsezoo/genindex.html @@ -105,10 +105,11 @@ -

    Help and Support

    +

    Help

    @@ -348,6 +349,10 @@

    D

  • download_framework_files() (sparsezoo.objects.model.Model method)
  • download_get_request() (in module sparsezoo.requests.download) +
  • +
  • download_recipe_base_framework_files() (sparsezoo.models.zoo.Zoo static method) +
  • +
  • download_recipe_from_stub() (sparsezoo.models.zoo.Zoo static method)
  • Downloadable (class in sparsezoo.objects.downloadable)
  • @@ -654,6 +659,8 @@

    O

    P

    - + - + - + - + - + - + - + - + - + + + + + - + - + - + - + - + - + - + - + - + - + - + - + @@ -379,15 +384,15 @@

    Object Detectionsparsezoo package -

    Help and Support

    +

    Help

    diff --git a/sparsezoo/searchindex.js b/sparsezoo/searchindex.js index ed611f222ff..e2d64d101c9 100644 --- a/sparsezoo/searchindex.js +++ b/sparsezoo/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["api/modules","api/sparsezoo","api/sparsezoo.models","api/sparsezoo.models.classification","api/sparsezoo.models.detection","api/sparsezoo.nbutils","api/sparsezoo.objects","api/sparsezoo.requests","api/sparsezoo.utils","index","installation","models","quicktour","recipes"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":3,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["api/modules.rst","api/sparsezoo.rst","api/sparsezoo.models.rst","api/sparsezoo.models.classification.rst","api/sparsezoo.models.detection.rst","api/sparsezoo.nbutils.rst","api/sparsezoo.objects.rst","api/sparsezoo.requests.rst","api/sparsezoo.utils.rst","index.rst","installation.md","models.md","quicktour.md","recipes.md"],objects:{"":{sparsezoo:[1,0,0,"-"]},"sparsezoo.main":{main:[1,1,1,""]},"sparsezoo.models":{classification:[3,0,0,"-"],detection:[4,0,0,"-"],zoo:[2,0,0,"-"]},"sparsezoo.models.classification":{efficientnet:[3,0,0,"-"],inception:[3,0,0,"-"],mobilenet:[3,0,0,"-"],resnet:[3,0,0,"-"],vgg:[3,0,0,"-"]},"sparsezoo.models.classification.efficientnet":{efficientnet_b0:[3,1,1,""],efficientnet_b4:[3,1,1,""]},"sparsezoo.models.classification.inception":{inception_v3:[3,1,1,""]},"sparsezoo.models.classification.mobilenet":{mobilenet_v1:[3,1,1,""],mobilenet_v2:[3,1,1,""]},"sparsezoo.models.classification.resnet":{resnet_101:[3,1,1,""],resnet_101_2x:[3,1,1,""],resnet_152:[3,1,1,""],resnet_18:[3,1,1,""],resnet_34:[3,1,1,""],resnet_50:[3,1,1,""],resnet_50_2x:[3,1,1,""]},"sparsezoo.models.classification.vgg":{vgg_11:[3,1,1,""],vgg_11bn:[3,1,1,""],vgg_13:[3,1,1,""],vgg_13bn:[3,1,1,""],vgg_16:[3,1,1,""],vgg_16bn:[3,1,1,""],vgg_19:[3,1,1,""],vgg_19bn:[3,1,1,""]},"sparsezoo.models.detection":{ssd:[4,0,0,"-"],yolo:[4,0,0,"-"]},"sparsezoo.models.detection.ssd":{ssd_resnet50_300:[4,1,1,""]},"sparsezoo.models.detection.yolo":{yolo_v3:[4,1,1,""]},"sparsezoo.models.zoo":{Zoo:[2,2,1,""]},"sparsezoo.models.zoo.Zoo":{load_model:[2,3,1,""],load_model_from_stub:[2,3,1,""],search_models:[2,3,1,""],search_optimized_models:[2,3,1,""],search_optimized_recipes:[2,3,1,""],search_recipes:[2,3,1,""],search_similar_models:[2,3,1,""]},"sparsezoo.nbutils":{utils:[5,0,0,"-"]},"sparsezoo.nbutils.utils":{ModelSelectWidgetContainer:[5,2,1,""],SelectDomainWidgetContainer:[5,2,1,""]},"sparsezoo.nbutils.utils.ModelSelectWidgetContainer":{create:[5,3,1,""],selected_framework:[5,3,1,""],selected_model:[5,3,1,""]},"sparsezoo.nbutils.utils.SelectDomainWidgetContainer":{create:[5,3,1,""],selected_domain_info:[5,3,1,""]},"sparsezoo.objects":{base:[6,0,0,"-"],data:[6,0,0,"-"],downloadable:[6,0,0,"-"],file:[6,0,0,"-"],metadata:[6,0,0,"-"],model:[6,0,0,"-"],optimization_recipe:[6,0,0,"-"],release_version:[6,0,0,"-"],result:[6,0,0,"-"],tag:[6,0,0,"-"],user:[6,0,0,"-"]},"sparsezoo.objects.base":{BaseObject:[6,2,1,""]},"sparsezoo.objects.base.BaseObject":{created:[6,3,1,""],dict:[6,3,1,""],modified:[6,3,1,""]},"sparsezoo.objects.data":{Data:[6,2,1,""]},"sparsezoo.objects.data.Data":{dataset:[6,3,1,""],loader:[6,3,1,""],name:[6,3,1,""],sample_batch:[6,3,1,""]},"sparsezoo.objects.downloadable":{Downloadable:[6,2,1,""]},"sparsezoo.objects.downloadable.Downloadable":{dir_path:[6,3,1,""],download:[6,3,1,""],folder_name:[6,3,1,""],override_parent_path:[6,3,1,""]},"sparsezoo.objects.file":{File:[6,2,1,""],FileTypes:[6,2,1,""]},"sparsezoo.objects.file.File":{check_download:[6,3,1,""],checkpoint:[6,3,1,""],display_name:[6,3,1,""],download:[6,3,1,""],downloaded:[6,3,1,""],downloaded_path:[6,3,1,""],downloads:[6,3,1,""],file_id:[6,3,1,""],file_size:[6,3,1,""],file_type:[6,3,1,""],file_type_card:[6,3,1,""],file_type_data:[6,3,1,""],file_type_data_inputs:[6,3,1,""],file_type_data_labels:[6,3,1,""],file_type_data_originals:[6,3,1,""],file_type_data_outputs:[6,3,1,""],file_type_framework:[6,3,1,""],file_type_onnx:[6,3,1,""],file_type_onnx_gz:[6,3,1,""],file_type_recipe:[6,3,1,""],md5:[6,3,1,""],model_metadata:[6,3,1,""],operator_version:[6,3,1,""],path:[6,3,1,""],url:[6,3,1,""]},"sparsezoo.objects.file.FileTypes":{CARD:[6,4,1,""],DATA_INPUTS:[6,4,1,""],DATA_LABELS:[6,4,1,""],DATA_ORIGINALS:[6,4,1,""],DATA_OUTPUTS:[6,4,1,""],FRAMEWORK:[6,4,1,""],ONNX:[6,4,1,""],ONNX_GZ:[6,4,1,""],RECIPE:[6,4,1,""]},"sparsezoo.objects.metadata":{ModelMetadata:[6,2,1,""]},"sparsezoo.objects.metadata.ModelMetadata":{base_model:[6,3,1,""],model_id:[6,3,1,""],user_id:[6,3,1,""]},"sparsezoo.objects.model":{Model:[6,2,1,""]},"sparsezoo.objects.model.Model":{card_file:[6,3,1,""],data:[6,3,1,""],data_inputs:[6,3,1,""],data_labels:[6,3,1,""],data_loader:[6,3,1,""],data_originals:[6,3,1,""],data_outputs:[6,3,1,""],display_description:[6,3,1,""],display_name:[6,3,1,""],download:[6,3,1,""],download_framework_files:[6,3,1,""],framework_files:[6,3,1,""],onnx_file:[6,3,1,""],onnx_file_gz:[6,3,1,""],onnx_files:[6,3,1,""],original_recipe:[6,3,1,""],recipes:[6,3,1,""],release_version:[6,3,1,""],results:[6,3,1,""],sample_batch:[6,3,1,""],tags:[6,3,1,""],transfer_learning_recipe:[6,3,1,""],user:[6,3,1,""]},"sparsezoo.objects.optimization_recipe":{OptimizationRecipe:[6,2,1,""],OptimizationRecipeTypes:[6,2,1,""]},"sparsezoo.objects.optimization_recipe.OptimizationRecipe":{display_description:[6,3,1,""],display_name:[6,3,1,""],recipe_id:[6,3,1,""],recipe_type:[6,3,1,""],recipe_type_original:[6,3,1,""],recipe_type_transfer_learn:[6,3,1,""]},"sparsezoo.objects.optimization_recipe.OptimizationRecipeTypes":{ORIGINAL:[6,4,1,""],TRANSFER_LEARN:[6,4,1,""]},"sparsezoo.objects.release_version":{ReleaseVersion:[6,2,1,""]},"sparsezoo.objects.release_version.ReleaseVersion":{major_version:[6,3,1,""],minor_version:[6,3,1,""],patch_version:[6,3,1,""],published:[6,3,1,""],release_version_id:[6,3,1,""]},"sparsezoo.objects.result":{Result:[6,2,1,""]},"sparsezoo.objects.result.Result":{display_name:[6,3,1,""],model_id:[6,3,1,""],recorded_format:[6,3,1,""],recorded_units:[6,3,1,""],recorded_value:[6,3,1,""],result_category:[6,3,1,""],result_id:[6,3,1,""],result_type:[6,3,1,""]},"sparsezoo.objects.tag":{Tag:[6,2,1,""]},"sparsezoo.objects.tag.Tag":{display_name:[6,3,1,""],model_id:[6,3,1,""],name:[6,3,1,""],recipe_id:[6,3,1,""],tag_id:[6,3,1,""]},"sparsezoo.objects.user":{User:[6,2,1,""]},"sparsezoo.objects.user.User":{email:[6,3,1,""],name:[6,3,1,""],trusted:[6,3,1,""],user_id:[6,3,1,""]},"sparsezoo.requests":{authentication:[7,0,0,"-"],base:[7,0,0,"-"],download:[7,0,0,"-"],search:[7,0,0,"-"]},"sparsezoo.requests.authentication":{get_auth_header:[7,1,1,""]},"sparsezoo.requests.base":{ModelArgs:[7,2,1,""]},"sparsezoo.requests.base.ModelArgs":{architecture:[7,3,1,""],architecture_id:[7,3,1,""],dataset:[7,3,1,""],domain:[7,3,1,""],framework:[7,3,1,""],model_url_args:[7,3,1,""],model_url_root:[7,3,1,""],optim_category:[7,3,1,""],optim_name:[7,3,1,""],optim_target:[7,3,1,""],optimization_id:[7,3,1,""],release_version:[7,3,1,""],repo:[7,3,1,""],stub:[7,3,1,""],sub_architecture:[7,3,1,""],sub_domain:[7,3,1,""],training_id:[7,3,1,""],training_scheme:[7,3,1,""]},"sparsezoo.requests.download":{download_get_request:[7,1,1,""]},"sparsezoo.requests.search":{search_get_request:[7,1,1,""]},"sparsezoo.utils":{data:[8,0,0,"-"],downloader:[8,0,0,"-"],helpers:[8,0,0,"-"],numpy:[8,0,0,"-"]},"sparsezoo.utils.data":{DataLoader:[8,2,1,""],Dataset:[8,2,1,""],RandomDataset:[8,2,1,""]},"sparsezoo.utils.data.DataLoader":{batch_as_list:[8,3,1,""],batch_size:[8,3,1,""],datasets:[8,3,1,""],get_batch:[8,3,1,""],infinite:[8,3,1,""],iter_steps:[8,3,1,""],num_items:[8,3,1,""]},"sparsezoo.utils.data.Dataset":{data:[8,3,1,""],name:[8,3,1,""]},"sparsezoo.utils.downloader":{DownloadProgress:[8,2,1,""],PreviouslyDownloadedError:[8,5,1,""],download_file:[8,1,1,""],download_file_iter:[8,1,1,""]},"sparsezoo.utils.downloader.DownloadProgress":{chunk_size:[8,3,1,""],content_length:[8,3,1,""],downloaded:[8,3,1,""],path:[8,3,1,""]},"sparsezoo.utils.helpers":{clean_path:[8,1,1,""],create_dirs:[8,1,1,""],create_parent_dirs:[8,1,1,""],create_tqdm_auto_constructor:[8,1,1,""],tqdm_auto:[8,4,1,""]},"sparsezoo.utils.numpy":{NumpyArrayBatcher:[8,2,1,""],load_numpy:[8,1,1,""],load_numpy_list:[8,1,1,""],save_numpy:[8,1,1,""],tensor_export:[8,1,1,""],tensors_export:[8,1,1,""]},"sparsezoo.utils.numpy.NumpyArrayBatcher":{append:[8,3,1,""],stack:[8,3,1,""]},sparsezoo:{main:[1,0,0,"-"],models:[2,0,0,"-"],nbutils:[5,0,0,"-"],objects:[6,0,0,"-"],requests:[7,0,0,"-"],utils:[8,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","function","Python function"],"2":["py","class","Python class"],"3":["py","method","Python method"],"4":["py","attribute","Python attribute"],"5":["py","exception","Python exception"]},objtypes:{"0":"py:module","1":"py:function","2":"py:class","3":"py:method","4":"py:attribute","5":"py:exception"},terms:{"00567":3,"02325":4,"02767":4,"03385":3,"04381":3,"04861":3,"100":[1,2,3,4,6,7,11,12,13],"101":[1,2,3,6,7,11,12,13],"101_2x":11,"11946":3,"11_bn":11,"13_bn":11,"1409":3,"1512":[3,4],"152":[1,2,3,6,7,11,12,13],"1556":3,"16_bn":11,"1704":3,"1801":3,"1804":4,"1905":3,"19_bn":11,"224":8,"300":4,"50_2x":11,"break":8,"class":[2,5,6,7,8],"default":[1,2,3,4,6,8],"enum":6,"export":8,"float":6,"function":[2,3,4,7,8],"import":12,"int":[2,6,7,8],"new":[7,8],"public":7,"return":[2,3,4,5,6,7,8],"static":2,"true":[2,3,4,6,7,8],"try":8,Such:[11,12],The:[1,2,3,4,6,7,8,11,12,13],Then:10,Will:6,about:[6,8],abs:[3,4],absolut:8,acceler:9,accuraci:[6,9,11,13],add:8,addit:[8,10,11,12],addition:9,after:[11,12],aggress:[1,2,3,4,6,7,11,12,13],algorithm:9,alia:8,all:[5,6,8,12],allow:[5,9],along:8,alreadi:[6,8],amount:[1,6],ani:[1,2,3,4,6,7,8,10,11,12],apart:8,api:[7,9],app:7,app_id:7,append:[6,8],appli:6,approach:9,arch:[11,12,13],architect:[11,12,13],architectur:[1,2,4,6,7,9,11,12,13],architecture_id:7,arg:[7,8],argument:[1,7],around:[7,9],arrai:8,art:9,arxiv:[3,4],as_list:8,associ:6,asyncio:8,augment:[1,2,3,4,6,7,11,12,13],auth:[2,3,4,6,7],authent:[0,1,9],authentication_typ:7,auto:8,automat:6,automl:9,avail:[2,6,9,12],bar:8,base:[0,1,2,3,4,5,8,11,12,13],base_model:6,baselin:[1,2,3,4,6,7,9,11,12,13],baseobject:6,batch:[3,6,8],batch_as_list:[6,8],batch_index:6,batch_siz:[6,8],batcher:8,bath_index:8,been:[6,8],befor:6,belong:[1,2,3,4,6,7],below:12,benchmark:6,better:9,blog:9,bool:[2,3,4,6,7,8],both:[7,9],break_batch:8,bug:9,build:9,built:9,cach:6,call:[6,7],can:[2,6,7,8,9,11,12],cannot:12,card:[6,11,12],card_fil:6,categori:[1,6,12],chang:7,check:6,check_download:6,checkpoint:6,child:6,child_folder_nam:6,chunk_siz:8,cifar10:[1,2,3,4,6,7,11,12,13],classif:[1,2,5,6,7,9,12],classifi:[11,12],clean:8,clean_path:8,clone:10,cloud:[6,7,9],coco:[4,11,13],code:[2,5,6,7,8,9,12],collect:[8,9],com:7,command:12,common:[2,9],compar:[11,12,13],compress:[8,11,12],comput:12,connect:9,conserv:[1,2,3,4,6,7,11,12,13],consol:9,constantli:9,constructor:2,contact:6,contain:[6,7,8,9,11,12],content:[0,9,11,12],content_length:8,continu:8,conveni:[2,3,4],convert:6,counter:8,cpu:[6,9],creat:[3,4,5,6,8,11,12,13],create_dir:8,create_parent_dir:8,create_tqdm_auto_constructor:8,creation:2,credenti:7,current:[2,7,8,11,12],cwd:1,dai:7,data:[0,1,11,12],data_input:6,data_label:6,data_load:6,data_origin:6,data_output:6,dataload:[6,8],dataset:[1,2,3,4,5,6,7,8,9,11,12,13],date:6,debian:10,deep:9,deepspars:[1,2,3,4,6,7,9,11,12,13],deepsparse_throughput:[1,2,3,4,6,7],defin:[11,12,13],definit:[11,12,13],degre:[1,2,3,4,6,7,9,11,12,13],deliv:9,depend:10,deploy:[1,2,3,4,6,7],depth:[11,12,13],describ:[1,2,3,4,6,7],descript:[1,6,11,12,13],descriptor:[11,12,13],desir:[6,8],dest_path:8,detect:[1,2,9],dev:9,dict:[5,6,7,8],dictionari:[6,8],differ:[2,9],dimens:8,dir:1,dir_path:6,directori:[1,8],disk:[6,8,11,12,13],displai:[5,6],display_descript:6,display_nam:6,document:9,doe:[11,12],domain:[1,2,5,6,7,11,12,13],done:[11,12,13],download:[0,1,5,9],download_fil:8,download_file_it:8,download_framework_fil:6,download_get_request:7,downloaded_path:[6,12],downloadprogress:8,driven:9,dtype:8,each:[5,6,8,11,12,13],easi:[9,12],edg:[1,2,3,4,6,7,11,12,13],effect:9,effici:8,efficientnet:[1,2,11,13],efficientnet_b0:3,efficientnet_b4:3,either:[7,8],email:6,empti:6,enabl:[9,12],engin:9,entri:12,env:6,environ:[7,10,11,12,13],error:8,etc:6,exampl:[11,12,13],except:8,exist:[6,8],exit:1,expand:8,explor:10,export_dir:8,extens:[6,8,11,12],factor:[1,2,6,7],fail:8,fals:[2,3,4,6,7,8],faster:9,featur:9,few:9,field:8,file:[0,1,2,3,4,7,8,11,12],file_id:6,file_nam:7,file_path:8,file_s:6,file_typ:6,file_type_card:6,file_type_data:6,file_type_data_input:6,file_type_data_label:6,file_type_data_origin:6,file_type_data_output:6,file_type_framework:6,file_type_onnx:6,file_type_onnx_gz:6,file_type_recip:6,filetyp:6,filter:6,filter_dataset:5,filter_framework:5,float32:8,flow:6,folder:[1,2,3,4,6,8],folder_nam:6,follow:[6,11,12,13],footprint:9,forc:[5,7],force_token_refresh:[2,3,4,7],format:[1,6,8,11,12],found:[6,9],framework:[1,2,3,4,5,6,7,11,12,13],framework_fil:6,from:[1,2,6,7,8,9,11,12,13],full:7,gener:[7,9],get:[2,3,4,6,7,8],get_auth_head:7,get_batch:8,github:9,given:[2,6,8,9,12],gpu:[1,2,3,4,6,7,11,12,13],graph:[11,12,13],grow:9,guid:[11,12,13],gzip:6,gzipe:6,handl:[8,9],happen:12,has:[6,8,11,12,13],hash:6,hasn:6,header:7,help:[1,9,12],helper:[0,1],highli:9,host:9,how:[7,11,12,13],http:[3,4],identifi:[11,12,13],imag:[3,4,9],imagenet:[1,2,3,4,6,7,11,12,13],imagenett:[11,13],implement:8,incept:[1,2],inception_v3:[3,11,13],includ:9,increas:8,index:[6,8],infer:9,infinit:[6,8],info:[6,7,8],inform:[6,9,11,12],inp:8,input:[6,11,12],instal:[8,9,12],instanc:[2,8],integ:[6,8],integr:9,interact:[6,12],interfac:[6,9],ipywidget:[5,8],item:[6,8],iter:[6,8],iter_step:[6,8],its:12,json:7,jupyt:5,keep:10,kei:[7,8],kera:[11,12],kwarg:[6,7],label:[6,11,12],latest:6,learn:[6,11,12,13],length:[1,2,7],librari:9,like:10,limit:9,line:9,linux:10,list:[2,5,6,7,8],load:[2,6,8],load_model:2,load_model_from_stub:2,load_numpi:8,load_numpy_list:8,loader:[6,8],local:[6,8],locat:7,look:5,loss:9,lower_lr:[11,12,13],machin:[11,12,13],made:[11,12,13],magic:9,mai:12,main:[0,9],major:6,major_vers:6,make:7,manag:2,map:[5,8,11,13],markdown:6,match:[2,6,7,8],match_architectur:2,match_dataset:2,match_domain:2,match_framework:2,match_optim_categori:2,match_optim_nam:2,match_optim_target:2,match_repo:2,match_sub_architectur:2,match_sub_domain:2,match_training_schem:2,max:1,md5:6,memori:8,messag:1,metadata:[0,1,11,12],metric:[1,2,3,4,6,7,9,11,12,13],minor:6,minor_vers:6,mnist:11,mnistnet:11,mobilenet:[1,2],mobilenet_v1:[1,2,3,6,7,11,12,13],mobilenet_v2:[3,11],model:[0,1,5,7,8,9,13],model_id:6,model_metadata:6,model_url_arg:7,model_url_root:7,modelarg:[2,6,7],modelmetadata:6,modelselectwidgetcontain:5,moder:[1,2,3,4,6,7,11,12,13],modif:6,modifi:[6,11,12,13],modul:[0,9],multipl:[6,8],must:8,nad:8,name:[1,2,3,4,6,7,8,11,12,13],name_prefix:8,nativ:[11,12],nbutil:[0,1],ndarrai:[6,8],network:[9,11,12,13],neural:9,neuralmag:7,nightli:9,nlp:[1,2,6,7,11,12,13],nm_sparse_zoo_credenti:7,none:[1,2,3,4,6,7,8,11,12,13],normal:3,note:[8,12],notebook:[5,8,10],npy:8,npz:8,num_item:8,num_retri:8,num_sampl:8,number:[6,8],numpi:[0,1,6],numpyarraybatch:8,object:[0,1,2,3,4,5,7,8,9],obtain:[2,7],off:2,offici:[6,9],old:7,onc:[6,8],one:[2,5,6,8],onli:[7,8],onnx:[6,8,11,12],onnx_fil:[6,12],onnx_file_gz:6,onnx_gz:6,onto:9,operator_vers:6,opset:6,optim:[1,2,3,4,6,7,9,11,13],optim_categori:[1,2,3,4,6,7,11,12,13],optim_nam:[1,2,3,4,6,7,11,12,13],optim_target:[1,2,3,4,6,7,11,12,13],optimization_id:7,optimization_recip:[0,1,2],optimizationrecip:[2,6],optimizationrecipetyp:6,optimized_model:12,option:[1,2,3,4,6,7,8,11,12,13],order:[6,10],ordereddict:[6,8],org:[3,4],origin:[6,8,11,12,13],original_recip:6,otherwis:[2,3,4,6,7,8],output:[6,11,12],over:7,overrid:[2,3,4,6],override_folder_nam:[2,3,4,6],override_parent_path:[2,3,4,6],overview:[11,12,13],overwrit:[6,8],packag:[0,9,12],page:[1,2,7],page_length:[1,2,7],parallel:8,param:8,paramet:[2,3,4,5,6,7,8],parent:[2,3,4,6,8],patch:6,patch_vers:6,patchvers:6,path:[2,3,4,6,7,8],per:1,perform:[6,9],pip:10,pipelin:9,place:12,point:12,posit:[1,6,8],pre:[6,11,12],prefix:8,preprocess:[11,12],previou:[6,8],previouslydownloadederror:8,print:[6,12],privat:9,process:[6,11,12],progress:[6,8],progress_titl:8,properli:12,properti:[5,6,7,8,11,12,13],prototyp:9,provid:[2,5],prune:[1,2,3,4,6,7,9,11,12,13],pruned_qu:[1,2,3,4,6,7,11,12,13],ptc:6,pth:6,publish:6,pypi:9,python:[9,10],pytorch:[1,2,3,4,6,7,11,12,13],quant:[11,12,13],quantiz:[9,11,12,13],quick:9,rais:8,random:8,randomdataset:8,recip:[2,6,9,11,12],recipe_id:6,recipe_nam:13,recipe_typ:6,recipe_type_origin:6,recipe_type_transfer_learn:6,recommend:10,record:6,recorded_format:6,recorded_unit:6,recorded_valu:6,recoveri:9,refresh:[2,3,4,6,7],refresh_token:6,relat:[5,6,7,8],releas:[1,2,6,7],release_vers:[0,1,2,7],release_version_id:6,releasevers:6,repo:[1,2,3,4,5,6,7,11,12,13],repositori:[9,10,11,12,13],repres:[2,6,8],represent:[11,12],request:[0,1,2,6,9],requir:[7,10],resnet50:4,resnet50_300:[11,13],resnet:[1,2,12],resnet_101:3,resnet_101_2x:3,resnet_152:3,resnet_18:3,resnet_34:3,resnet_50:[3,12],resnet_50_2x:3,resnet_v1:[1,2,6,7,11,12,13],resolut:6,respect:12,respons:7,result:[0,1],result_categori:6,result_id:6,result_typ:6,retri:8,retriev:6,root:7,run:[6,8,11,12],same:6,sampl:[6,8,11,12],sample_batch:6,save:[1,2,3,4,6,8],save_dir:1,save_numpi:8,scale:[1,2,6,7,11,12,13],scheme:[1,2,3,4,6,7,12],script:[1,9,10],search:[0,1,2,5,9],search_get_request:7,search_model:[2,12],search_optimized_model:[2,12],search_optimized_recip:2,search_recip:2,search_similar_model:2,segment:[1,2,6,7,11,12,13],select:5,selectdomainwidgetcontain:5,selected_domain_info:5,selected_framework:5,selected_model:5,semant:[1,6],set:[6,7,8],setup:[8,11,12,13],shape:8,should:7,show:[1,6,8],show_progress:[6,8],sign:[2,6],signifi:13,significantli:9,similar:2,simpl:9,simplifi:9,singl:8,size:[6,8],smaller:9,solut:[11,12,13],some:12,sourc:[1,2,3,4,5,6,7,8],spars:[1,6,9,11,12,13],sparseml:[1,2,3,4,6,7,9,11,12,13],sparsezoo:[10,11,12,13],sparsezoo_models_path:6,sparsifi:9,sparszoo:6,specif:[1,11,12,13],spp:[4,11,13],ssd:[1,2,11,13],ssd_resnet50_300:4,stabl:9,stack:8,standard:8,state:[8,9],std:8,step:[6,8],store:[6,13],str:[2,3,4,5,6,7,8],straight:12,string:2,structur:[11,12,13],stub:[2,7,11,12,13],sub:[1,2,4,5,6,7,11,12,13],sub_architectur:[1,2,4,6,7,11,12,13],sub_domain:[1,2,5,6,7,11,12,13],subdomain:5,submodul:[0,9],subpackag:[0,9],support:[7,8,9,11,12],system:[8,10,12],tag:[0,1,11,13],tag_id:6,take:[8,9],tar:[11,12],target:[1,2,3,4,6,7,11,12,13],tensor:[8,11,12],tensor_export:8,tensorflow:[1,2,3,4,6,7,11,12],tensorflow_v1:[11,12,13],tensors_export:8,termin:12,test:10,thei:8,them:[6,8],thi:[1,2,3,4,6,9,10,12],through:[5,6,8,11,12],time:[6,8,9],titl:8,token:[2,3,4,6,7],top1:[11,13],torchvis:[1,2,3,4,6,7,11,12,13],tour:9,tqdm:[6,8],tqdm_asyncio:8,tqdm_auto:8,tqdm_notebook:8,train:[1,2,3,4,5,6,7,11,12,13],training_id:7,training_schem:[1,2,3,4,6,7,11,12,13],transfer:[6,9,11,12],transfer_learn:[6,13],transfer_learning_recip:6,treat:8,trust:6,tupl:8,type:[5,6,7,8,11,12,13],typed_shap:8,ultralyt:[4,11,13],under:[2,3,4,6,7,13],union:[2,6,7,8],uniqu:7,unit:6,unpreced:9,upload:6,url:[6,8],url_path:8,use:[6,8,9,11,12],used:[1,2,3,4,5,6,7,8,11,12],user:[0,1,7,8],user_id:[6,7],using:[5,10],util:[0,1,6],valid:[11,13],valu:[2,6,7,8,9],variabl:[6,7],version:[1,2,6,7,8,9,11,13],vgg:[1,2,11,13],vgg_11:3,vgg_11bn:3,vgg_13:3,vgg_13bn:3,vgg_16:3,vgg_16bn:3,vgg_19:3,vgg_19bn:3,via:9,view:1,virtual:10,vision:12,voc:[11,13],websit:9,weight:[11,12,13],well:[6,11,12],what:[7,8,11,12,13],when:8,where:[2,3,4,6,7],whether:6,which:[11,12,13],who:6,widget:5,width:[3,11,12,13],within:[5,6,11,12,13],without:[8,11,12],work:[6,8],worker:8,would:10,wrap:7,yaml:[11,12,13],yolo:[1,2],yolo_v3:[4,11,13],you:[9,10,12],your:[9,10,12],zero:8,zoo:[0,1,6,8,9]},titles:["sparsezoo","sparsezoo package","sparsezoo.models package","sparsezoo.models.classification package","sparsezoo.models.detection package","sparsezoo.nbutils package","sparsezoo.objects package","sparsezoo.requests package","sparsezoo.utils package","SparseZoo 0.1","Installation","Models","Quick Tour","Recipes"],titleterms:{api:12,authent:7,base:[6,7],classif:[3,11,13],common:12,consol:12,content:[1,2,3,4,5,6,7,8],data:[6,8],detect:[4,11,13],download:[6,7,8,12],efficientnet:3,file:6,helper:8,histori:9,imag:[11,13],incept:3,instal:10,learn:9,main:1,metadata:6,mobilenet:3,model:[2,3,4,6,11,12],modul:[1,2,3,4,5,6,7,8],more:9,nbutil:5,numpi:8,object:[6,11,13],optim:12,optimization_recip:6,overview:9,packag:[1,2,3,4,5,6,7,8],product:9,python:12,quick:12,recip:13,relat:9,releas:9,release_vers:6,request:7,resnet:3,resourc:9,result:6,script:12,search:[7,12],sparsezoo:[0,1,2,3,4,5,6,7,8,9],ssd:4,submodul:[1,2,3,4,5,6,7,8],subpackag:[1,2],tag:6,tour:12,user:6,util:[5,8],version:12,vgg:3,yolo:4,zoo:[2,12]}}) \ No newline at end of file +Search.setIndex({docnames:["api/modules","api/sparsezoo","api/sparsezoo.models","api/sparsezoo.models.classification","api/sparsezoo.models.detection","api/sparsezoo.nbutils","api/sparsezoo.objects","api/sparsezoo.requests","api/sparsezoo.utils","index","installation","models","quicktour","recipes"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":3,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["api/modules.rst","api/sparsezoo.rst","api/sparsezoo.models.rst","api/sparsezoo.models.classification.rst","api/sparsezoo.models.detection.rst","api/sparsezoo.nbutils.rst","api/sparsezoo.objects.rst","api/sparsezoo.requests.rst","api/sparsezoo.utils.rst","index.rst","installation.md","models.md","quicktour.md","recipes.md"],objects:{"":{sparsezoo:[1,0,0,"-"]},"sparsezoo.main":{main:[1,1,1,""]},"sparsezoo.models":{classification:[3,0,0,"-"],detection:[4,0,0,"-"],zoo:[2,0,0,"-"]},"sparsezoo.models.classification":{efficientnet:[3,0,0,"-"],inception:[3,0,0,"-"],mobilenet:[3,0,0,"-"],resnet:[3,0,0,"-"],vgg:[3,0,0,"-"]},"sparsezoo.models.classification.efficientnet":{efficientnet_b0:[3,1,1,""],efficientnet_b4:[3,1,1,""]},"sparsezoo.models.classification.inception":{inception_v3:[3,1,1,""]},"sparsezoo.models.classification.mobilenet":{mobilenet_v1:[3,1,1,""],mobilenet_v2:[3,1,1,""]},"sparsezoo.models.classification.resnet":{resnet_101:[3,1,1,""],resnet_101_2x:[3,1,1,""],resnet_152:[3,1,1,""],resnet_18:[3,1,1,""],resnet_34:[3,1,1,""],resnet_50:[3,1,1,""],resnet_50_2x:[3,1,1,""]},"sparsezoo.models.classification.vgg":{vgg_11:[3,1,1,""],vgg_11bn:[3,1,1,""],vgg_13:[3,1,1,""],vgg_13bn:[3,1,1,""],vgg_16:[3,1,1,""],vgg_16bn:[3,1,1,""],vgg_19:[3,1,1,""],vgg_19bn:[3,1,1,""]},"sparsezoo.models.detection":{ssd:[4,0,0,"-"],yolo:[4,0,0,"-"]},"sparsezoo.models.detection.ssd":{ssd_resnet50_300:[4,1,1,""]},"sparsezoo.models.detection.yolo":{yolo_v3:[4,1,1,""]},"sparsezoo.models.zoo":{Zoo:[2,2,1,""],parse_zoo_stub:[2,1,1,""]},"sparsezoo.models.zoo.Zoo":{download_recipe_base_framework_files:[2,3,1,""],download_recipe_from_stub:[2,3,1,""],load_model:[2,3,1,""],load_model_from_stub:[2,3,1,""],search_models:[2,3,1,""],search_optimized_models:[2,3,1,""],search_optimized_recipes:[2,3,1,""],search_recipes:[2,3,1,""],search_similar_models:[2,3,1,""]},"sparsezoo.nbutils":{utils:[5,0,0,"-"]},"sparsezoo.nbutils.utils":{ModelSelectWidgetContainer:[5,2,1,""],SelectDomainWidgetContainer:[5,2,1,""]},"sparsezoo.nbutils.utils.ModelSelectWidgetContainer":{create:[5,3,1,""],selected_framework:[5,3,1,""],selected_model:[5,3,1,""]},"sparsezoo.nbutils.utils.SelectDomainWidgetContainer":{create:[5,3,1,""],selected_domain_info:[5,3,1,""]},"sparsezoo.objects":{base:[6,0,0,"-"],data:[6,0,0,"-"],downloadable:[6,0,0,"-"],file:[6,0,0,"-"],metadata:[6,0,0,"-"],model:[6,0,0,"-"],optimization_recipe:[6,0,0,"-"],release_version:[6,0,0,"-"],result:[6,0,0,"-"],tag:[6,0,0,"-"],user:[6,0,0,"-"]},"sparsezoo.objects.base":{BaseObject:[6,2,1,""]},"sparsezoo.objects.base.BaseObject":{created:[6,3,1,""],dict:[6,3,1,""],modified:[6,3,1,""]},"sparsezoo.objects.data":{Data:[6,2,1,""]},"sparsezoo.objects.data.Data":{dataset:[6,3,1,""],loader:[6,3,1,""],name:[6,3,1,""],sample_batch:[6,3,1,""]},"sparsezoo.objects.downloadable":{Downloadable:[6,2,1,""]},"sparsezoo.objects.downloadable.Downloadable":{dir_path:[6,3,1,""],download:[6,3,1,""],folder_name:[6,3,1,""],override_parent_path:[6,3,1,""]},"sparsezoo.objects.file":{File:[6,2,1,""],FileTypes:[6,2,1,""]},"sparsezoo.objects.file.File":{check_download:[6,3,1,""],checkpoint:[6,3,1,""],display_name:[6,3,1,""],download:[6,3,1,""],downloaded:[6,3,1,""],downloaded_path:[6,3,1,""],downloads:[6,3,1,""],file_id:[6,3,1,""],file_size:[6,3,1,""],file_type:[6,3,1,""],file_type_card:[6,3,1,""],file_type_data:[6,3,1,""],file_type_data_inputs:[6,3,1,""],file_type_data_labels:[6,3,1,""],file_type_data_originals:[6,3,1,""],file_type_data_outputs:[6,3,1,""],file_type_framework:[6,3,1,""],file_type_onnx:[6,3,1,""],file_type_onnx_gz:[6,3,1,""],file_type_recipe:[6,3,1,""],md5:[6,3,1,""],model_metadata:[6,3,1,""],operator_version:[6,3,1,""],path:[6,3,1,""],url:[6,3,1,""]},"sparsezoo.objects.file.FileTypes":{CARD:[6,4,1,""],DATA_INPUTS:[6,4,1,""],DATA_LABELS:[6,4,1,""],DATA_ORIGINALS:[6,4,1,""],DATA_OUTPUTS:[6,4,1,""],FRAMEWORK:[6,4,1,""],ONNX:[6,4,1,""],ONNX_GZ:[6,4,1,""],RECIPE:[6,4,1,""]},"sparsezoo.objects.metadata":{ModelMetadata:[6,2,1,""]},"sparsezoo.objects.metadata.ModelMetadata":{base_model:[6,3,1,""],model_id:[6,3,1,""],user_id:[6,3,1,""]},"sparsezoo.objects.model":{Model:[6,2,1,""]},"sparsezoo.objects.model.Model":{card_file:[6,3,1,""],data:[6,3,1,""],data_inputs:[6,3,1,""],data_labels:[6,3,1,""],data_loader:[6,3,1,""],data_originals:[6,3,1,""],data_outputs:[6,3,1,""],display_description:[6,3,1,""],display_name:[6,3,1,""],download:[6,3,1,""],download_framework_files:[6,3,1,""],framework_files:[6,3,1,""],onnx_file:[6,3,1,""],onnx_file_gz:[6,3,1,""],onnx_files:[6,3,1,""],original_recipe:[6,3,1,""],recipes:[6,3,1,""],release_version:[6,3,1,""],results:[6,3,1,""],sample_batch:[6,3,1,""],tags:[6,3,1,""],transfer_learning_recipe:[6,3,1,""],user:[6,3,1,""]},"sparsezoo.objects.optimization_recipe":{OptimizationRecipe:[6,2,1,""],OptimizationRecipeTypes:[6,2,1,""]},"sparsezoo.objects.optimization_recipe.OptimizationRecipe":{display_description:[6,3,1,""],display_name:[6,3,1,""],recipe_id:[6,3,1,""],recipe_type:[6,3,1,""],recipe_type_original:[6,3,1,""],recipe_type_transfer_learn:[6,3,1,""]},"sparsezoo.objects.optimization_recipe.OptimizationRecipeTypes":{ORIGINAL:[6,4,1,""],TRANSFER_LEARN:[6,4,1,""]},"sparsezoo.objects.release_version":{ReleaseVersion:[6,2,1,""]},"sparsezoo.objects.release_version.ReleaseVersion":{major_version:[6,3,1,""],minor_version:[6,3,1,""],patch_version:[6,3,1,""],published:[6,3,1,""],release_version_id:[6,3,1,""]},"sparsezoo.objects.result":{Result:[6,2,1,""]},"sparsezoo.objects.result.Result":{display_name:[6,3,1,""],model_id:[6,3,1,""],recorded_format:[6,3,1,""],recorded_units:[6,3,1,""],recorded_value:[6,3,1,""],result_category:[6,3,1,""],result_id:[6,3,1,""],result_type:[6,3,1,""]},"sparsezoo.objects.tag":{Tag:[6,2,1,""]},"sparsezoo.objects.tag.Tag":{display_name:[6,3,1,""],model_id:[6,3,1,""],name:[6,3,1,""],recipe_id:[6,3,1,""],tag_id:[6,3,1,""]},"sparsezoo.objects.user":{User:[6,2,1,""]},"sparsezoo.objects.user.User":{email:[6,3,1,""],name:[6,3,1,""],trusted:[6,3,1,""],user_id:[6,3,1,""]},"sparsezoo.requests":{authentication:[7,0,0,"-"],base:[7,0,0,"-"],download:[7,0,0,"-"],search:[7,0,0,"-"]},"sparsezoo.requests.authentication":{get_auth_header:[7,1,1,""]},"sparsezoo.requests.base":{ModelArgs:[7,2,1,""]},"sparsezoo.requests.base.ModelArgs":{architecture:[7,3,1,""],architecture_id:[7,3,1,""],dataset:[7,3,1,""],domain:[7,3,1,""],framework:[7,3,1,""],model_url_args:[7,3,1,""],model_url_root:[7,3,1,""],optim_category:[7,3,1,""],optim_name:[7,3,1,""],optim_target:[7,3,1,""],optimization_id:[7,3,1,""],release_version:[7,3,1,""],repo:[7,3,1,""],stub:[7,3,1,""],sub_architecture:[7,3,1,""],sub_domain:[7,3,1,""],training_id:[7,3,1,""],training_scheme:[7,3,1,""]},"sparsezoo.requests.download":{download_get_request:[7,1,1,""]},"sparsezoo.requests.search":{search_get_request:[7,1,1,""]},"sparsezoo.utils":{data:[8,0,0,"-"],downloader:[8,0,0,"-"],helpers:[8,0,0,"-"],numpy:[8,0,0,"-"]},"sparsezoo.utils.data":{DataLoader:[8,2,1,""],Dataset:[8,2,1,""],RandomDataset:[8,2,1,""]},"sparsezoo.utils.data.DataLoader":{batch_as_list:[8,3,1,""],batch_size:[8,3,1,""],datasets:[8,3,1,""],get_batch:[8,3,1,""],infinite:[8,3,1,""],iter_steps:[8,3,1,""],num_items:[8,3,1,""]},"sparsezoo.utils.data.Dataset":{data:[8,3,1,""],name:[8,3,1,""]},"sparsezoo.utils.downloader":{DownloadProgress:[8,2,1,""],PreviouslyDownloadedError:[8,5,1,""],download_file:[8,1,1,""],download_file_iter:[8,1,1,""]},"sparsezoo.utils.downloader.DownloadProgress":{chunk_size:[8,3,1,""],content_length:[8,3,1,""],downloaded:[8,3,1,""],path:[8,3,1,""]},"sparsezoo.utils.helpers":{clean_path:[8,1,1,""],create_dirs:[8,1,1,""],create_parent_dirs:[8,1,1,""],create_tqdm_auto_constructor:[8,1,1,""],tqdm_auto:[8,4,1,""]},"sparsezoo.utils.numpy":{NumpyArrayBatcher:[8,2,1,""],load_numpy:[8,1,1,""],load_numpy_list:[8,1,1,""],save_numpy:[8,1,1,""],tensor_export:[8,1,1,""],tensors_export:[8,1,1,""]},"sparsezoo.utils.numpy.NumpyArrayBatcher":{append:[8,3,1,""],stack:[8,3,1,""]},sparsezoo:{main:[1,0,0,"-"],models:[2,0,0,"-"],nbutils:[5,0,0,"-"],objects:[6,0,0,"-"],requests:[7,0,0,"-"],utils:[8,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","function","Python function"],"2":["py","class","Python class"],"3":["py","method","Python method"],"4":["py","attribute","Python attribute"],"5":["py","exception","Python exception"]},objtypes:{"0":"py:module","1":"py:function","2":"py:class","3":"py:method","4":"py:attribute","5":"py:exception"},terms:{"00567":3,"02325":4,"02767":4,"03385":3,"04381":3,"04861":3,"100":[1,2,3,4,6,7,11,12,13],"101":[1,2,3,6,7,11,12,13],"101_2x":11,"11946":3,"11_bn":11,"13_bn":11,"1409":3,"1512":[3,4],"152":[1,2,3,6,7,11,12,13],"1556":3,"16_bn":11,"1704":3,"1801":3,"1804":4,"1905":3,"19_bn":11,"224":8,"300":4,"50_2x":11,"960px":9,"break":8,"class":[2,5,6,7,8],"default":[1,2,3,4,6,8],"enum":6,"export":8,"final":9,"float":6,"function":[2,3,4,7,8],"import":12,"int":[2,6,7,8],"new":[7,8],"public":7,"return":[2,3,4,5,6,7,8],"static":2,"true":[2,3,4,6,7,8],"try":8,"while":9,For:9,Such:[11,12],The:[1,2,3,4,6,7,8,9,11,12,13],Then:10,Will:[2,6],about:[6,8],abs:[3,4],absolut:8,acceler:9,accuraci:[6,9,11,13],activ:9,add:8,added:2,addit:[8,10,11,12],addition:9,after:[11,12],aggress:[1,2,3,4,6,7,11,12,13],algorithm:9,alia:8,all:[2,5,6,8,9,12],allow:[5,9],along:8,alreadi:[6,8],altern:9,amount:[1,6],ani:[1,2,3,4,6,7,8,10,11,12],apart:8,api:[7,9],app:7,app_id:7,append:[6,8],appli:[6,9],approach:9,arch:[11,12,13],architect:[11,12,13],architectur:[1,2,4,6,7,9,11,12,13],architecture_id:7,arg:[7,8],argument:[1,7],around:[7,9],arrai:8,arxiv:[3,4],as_list:8,associ:6,asyncio:8,augment:[1,2,3,4,6,7,11,12,13],auth:[2,3,4,6,7],authent:[0,1,9],authentication_typ:7,auto:8,automat:6,avail:[2,6,9,12],bar:8,base:[0,1,2,3,4,5,8,11,12,13],base_model:6,baselin:[1,2,3,4,6,7,9,11,12,13],baseobject:6,batch:[3,6,8],batch_as_list:[6,8],batch_index:6,batch_siz:[6,8],batcher:8,bath_index:8,been:[6,8],befor:6,belong:[1,2,3,4,6,7],below:12,benchmark:6,blob:12,blog:9,bool:[2,3,4,6,7,8],both:[7,9],break_batch:8,bug:9,build:9,built:9,cach:6,call:[6,7],can:[2,6,7,8,9,11,12],cannot:12,card:[6,11,12],card_fil:6,categori:[1,6,12],chang:7,check:6,check_download:6,checkpoint:[2,6],child:6,child_folder_nam:6,chunk_siz:8,cifar10:[1,2,3,4,6,7,11,12,13],classif:[1,2,5,6,7,9,12],classifi:[11,12],clean:8,clean_path:8,clone:10,cloud:[6,7,9],coco:[4,11,13],code:[2,5,6,7,8,9,12],collect:[8,9],com:[7,9,12],command:12,common:[2,9],compar:[11,12,13],compress:[8,11,12],comput:12,connect:9,conserv:[1,2,3,4,6,7,11,12,13],consol:9,constantli:9,constructor:2,contact:6,contain:[6,7,8,9,11,12],content:[0,9,11,12],content_length:8,continu:8,conveni:[2,3,4],convert:6,correctli:9,counter:8,cpu:[6,9],creat:[3,4,5,6,8,9,11,12,13],create_dir:8,create_parent_dir:8,create_tqdm_auto_constructor:8,creation:2,credenti:7,current:[2,7,8,11,12],cwd:1,dai:7,data:[0,1,11,12],data_input:6,data_label:6,data_load:6,data_origin:6,data_output:6,dataload:[6,8],dataset:[1,2,3,4,5,6,7,8,9,11,12,13],date:6,debian:10,deep:9,deepspars:[1,2,3,4,6,7,9,11,12,13],deepsparse_throughput:[1,2,3,4,6,7],defin:[11,12,13],definit:[11,12,13],degre:[1,2,3,4,6,7,9,11,12,13],depend:10,deploi:9,deploy:[1,2,3,4,6,7],depth:[11,12,13],describ:[1,2,3,4,6,7],descript:[1,6,11,12,13],descriptor:[11,12,13],desir:[6,8],dest_path:8,detect:[1,2,9],dev:9,dict:[2,5,6,7,8],dictionari:[2,6,8],differ:[2,9],dimens:8,dir:1,dir_path:6,direct:9,directori:[1,8],disk:[6,8,11,12,13],displai:[5,6],display_descript:6,display_nam:6,doc:9,document:9,doe:[11,12],domain:[1,2,5,6,7,11,12,13],done:[11,12,13],download:[0,1,2,5,9],download_fil:8,download_file_it:8,download_framework_fil:6,download_get_request:7,download_recipe_base_framework_fil:2,download_recipe_from_stub:2,downloaded_path:[6,12],downloadprogress:8,driven:9,dtype:8,each:[5,6,8,11,12,13],easi:12,easili:9,edg:[1,2,3,4,6,7,11,12,13],edit:9,effect:9,effici:8,efficientnet:[1,2,11,13],efficientnet_b0:3,efficientnet_b4:3,either:[7,8],email:6,empti:[2,6],enabl:[9,12],encod:[2,9],encompass:9,engin:9,entri:12,env:6,environ:[7,10,11,12,13],error:8,etc:6,everyth:9,exampl:[9,11,12,13],except:8,exist:[6,8],exit:1,expand:8,expect:2,explor:10,export_dir:8,extens:[2,6,8,11,12],factor:[1,2,6,7],fail:8,fals:[2,3,4,6,7,8],faster:9,featur:9,few:9,fft:9,field:8,file:[0,1,2,3,4,7,8,11,12],file_id:6,file_nam:7,file_path:8,file_s:6,file_typ:6,file_type_card:6,file_type_data:6,file_type_data_input:6,file_type_data_label:6,file_type_data_origin:6,file_type_data_output:6,file_type_framework:6,file_type_onnx:6,file_type_onnx_gz:6,file_type_recip:6,filetyp:6,filter:[2,6],filter_dataset:5,filter_framework:5,float32:8,flow:[6,9],folder:[1,2,3,4,6,8],folder_nam:6,follow:[6,11,12,13],forc:[5,7],force_token_refresh:[2,3,4,7],format:[1,6,8,9,11,12],found:[6,9],framework:[1,2,3,4,5,6,7,11,12,13],framework_fil:6,from:[1,2,6,7,8,9,11,12,13],full:[7,9],gener:[7,9],get:[2,3,4,6,7,8],get_auth_head:7,get_batch:8,github:[9,12],give:9,given:[2,6,8,9,12],gpu:[1,2,3,4,6,7,9,11,12,13],graph:[11,12,13],grow:9,guid:[11,12,13],gzip:6,gzipe:6,handl:[8,9],happen:12,has:[6,8,11,12,13],hash:6,hasn:6,header:7,help:[1,9,12],helper:[0,1],highli:9,host:9,how:[7,9,11,12,13],http:[3,4,9,12],identifi:[11,12,13],imag:[3,4,9],imagenet:[1,2,3,4,6,7,11,12,13],imagenett:[11,13],img:9,implement:[8,9],improv:9,incept:[1,2],inception_v3:[3,11,13],includ:9,increas:8,index:[6,8],induc:9,infer:9,infinit:[6,8],info:[6,7,8],inform:[6,9,11,12],inp:8,input:[6,11,12],instal:[8,9,12],instanc:[2,8],integ:[6,8],interact:[6,12],interfac:6,ipywidget:[5,8],item:[6,8],iter:[6,8],iter_step:[6,8],its:12,json:7,jupyt:5,keep:10,kei:[7,8],kera:[11,12],kwarg:[6,7],label:[6,11,12],latest:6,learn:[6,11,12,13],leav:2,length:[1,2,7],level:9,like:10,limit:9,line:9,linux:10,list:[2,5,6,7,8],load:[2,6,8],load_model:2,load_model_from_stub:2,load_numpi:8,load_numpy_list:8,loader:[6,8],local:[6,8],locat:7,look:5,loss:9,lower_lr:[11,12,13],machin:[11,12,13],made:[11,12,13],magic:9,mai:[2,12],main:[0,9,12],major:6,major_vers:6,make:7,manag:2,map:[5,8,11,13],markdown:6,match:[2,6,7,8,9],match_architectur:2,match_dataset:2,match_domain:2,match_framework:2,match_optim_categori:2,match_optim_nam:2,match_optim_target:2,match_repo:2,match_sub_architectur:2,match_sub_domain:2,match_training_schem:2,max:1,md5:6,memori:8,messag:1,metadata:[0,1,11,12],metric:[1,2,3,4,6,7,9,11,12,13],minor:6,minor_vers:6,mnist:11,mnistnet:11,mobilenet:[1,2],mobilenet_v1:[1,2,3,6,7,11,12,13],mobilenet_v2:[3,11],model:[0,1,5,7,8,9,13],model_id:6,model_metadata:6,model_url_arg:7,model_url_root:7,modelarg:[2,6,7],modelmetadata:6,modelselectwidgetcontain:5,moder:[1,2,3,4,6,7,11,12,13],modif:6,modifi:[6,11,12,13],modul:[0,9],multipl:[6,8],must:8,nad:8,name:[1,2,3,4,6,7,8,11,12,13],name_prefix:8,nativ:[11,12],natur:9,nbutil:[0,1],ndarrai:[6,8],nearli:9,network:[9,11,12,13],neural:9,neuralmag:[7,9,12],nightli:9,nlp:[1,2,6,7,11,12,13],nm_sparse_zoo_credenti:7,none:[1,2,3,4,6,7,8,11,12,13],normal:3,note:[8,12],notebook:[5,8,10],npy:8,npz:8,num_item:8,num_retri:8,num_sampl:8,number:[6,8],numpi:[0,1,6],numpyarraybatch:8,object:[0,1,2,3,4,5,7,8,9],obtain:[2,7],occur:9,off:2,offici:[6,9],old:7,onc:[6,8],one:[2,5,6,8],onli:[7,8,9],onnx:[6,8,11,12],onnx_fil:[6,12],onnx_file_gz:6,onnx_gz:6,onto:9,operator_vers:6,opset:6,optim:[1,2,3,4,6,7,9],optim_categori:[1,2,3,4,6,7],optim_nam:[1,2,3,4,6,7],optim_target:[1,2,3,4,6,7],optimization_id:7,optimization_recip:[0,1,2],optimizationrecip:[2,6],optimizationrecipetyp:6,optimized_model:12,option:[1,2,3,4,6,7,8,11,12,13],order:[6,10],ordereddict:[6,8],org:[3,4],origin:[2,6,8,11,12,13],original_recip:6,otherwis:[2,3,4,6,7,8],output:[6,11,12],over:[7,9],overprecis:9,overrid:[2,3,4,6],override_folder_nam:[2,3,4,6],override_parent_path:[2,3,4,6],overview:[11,12,13],overwrit:[6,8],packag:[0,9,12],page:[1,2,7],page_length:[1,2,7],parallel:8,param1:2,param2:2,param:[2,8],paramet:[2,3,4,5,6,7,8],parameter:9,parent:[2,3,4,6,8],pars:2,parse_zoo_stub:2,patch:6,patch_vers:6,patchvers:6,path:[2,3,4,6,7,8],per:1,perform:[6,9],pip:10,place:12,plu:9,point:[2,12],posit:[1,6,8],pre:[6,11,12],prefix:8,preprocess:[11,12],previou:[6,8],previouslydownloadederror:8,print:[6,12],privat:9,process:[6,9,11,12],product:9,progress:[6,8],progress_titl:8,properli:12,properti:[5,6,7,8,11,12,13],prototyp:9,provid:[2,5],prune:[1,2,3,4,6,7,9,11,12,13],pruned_qu:[1,2,3,4,6,7,11,12,13],ptc:[2,6],pth:[2,6],publish:6,pypi:9,python:[9,10],pytorch:[1,2,3,4,6,7,11,12,13],quant:[11,12,13],quantiz:[9,11,12,13],quick:9,rais:[2,8],random:8,randomdataset:8,recip:[2,6,9,11,12],recipe_id:6,recipe_typ:[2,6,13],recipe_type_origin:6,recipe_type_transfer_learn:6,recommend:10,record:6,recorded_format:6,recorded_unit:6,recorded_valu:6,recov:9,recoveri:9,redund:9,refresh:[2,3,4,6,7],refresh_token:6,relat:[5,6,7,8],releas:[1,2,6,7],release_vers:[0,1,2,7],release_version_id:6,releasevers:6,remov:9,repo:[1,2,3,4,5,6,7,11,12,13],repositori:[9,10,11,12,13],repres:[2,6,8],represent:[11,12],request:[0,1,2,6,9],requir:[7,10],resnet50:4,resnet50_300:[11,13],resnet:[1,2,12],resnet_101:3,resnet_101_2x:3,resnet_152:3,resnet_18:3,resnet_34:3,resnet_50:[3,12],resnet_50_2x:3,resnet_v1:[1,2,6,7,11,12,13],resolut:6,respect:12,respons:7,result:[0,1,9],result_categori:6,result_id:6,result_typ:6,retri:8,retriev:6,root:7,run:[6,8,11,12],same:[6,9],sampl:[6,8,11,12],sample_batch:6,save:[1,2,3,4,6,8],save_dir:1,save_numpi:8,scale:[1,2,6,7,11,12,13],scheme:[1,2,3,4,6,7,12],script:[1,9,10],search:[0,1,2,5,9],search_get_request:7,search_model:[2,12],search_optimized_model:[2,12],search_optimized_recip:2,search_recip:2,search_similar_model:2,segment:[1,2,6,7,11,12,13],select:5,selectdomainwidgetcontain:5,selected_domain_info:5,selected_framework:5,selected_model:5,semant:[1,6],set:[6,7,8],setup:[8,11,12,13],shape:8,should:7,show:[1,6,8],show_progress:[6,8],sign:[2,6],signifi:13,significantli:9,similar:2,simpl:9,simplifi:9,singl:8,size:[6,8],smaller:9,solut:[11,12,13],some:12,sourc:[1,2,3,4,5,6,7,8,9],spars:[1,6,9,11,12],sparse_categori:[11,12,13],sparse_nam:[11,12,13],sparse_target:[11,12,13],sparseml:[1,2,3,4,6,7,9,11,12,13],sparsezoo:[10,11,12,13],sparsezoo_models_path:6,sparsif:[11,12],sparsifi:[9,11,12,13],sparsiti:9,sparszoo:6,specif:[1,11,12,13],spp:[4,11,13],src:9,ssd:[1,2,11,13],ssd_resnet50_300:4,stabl:9,stack:8,standard:8,state:8,std:8,step:[6,8],store:[6,13],str:[2,3,4,5,6,7,8],straight:12,string:2,structur:[11,12,13],stub:[2,7,11,12,13],sub:[1,2,4,5,6,7,11,12,13],sub_architectur:[1,2,4,6,7,11,12,13],sub_domain:[1,2,5,6,7,11,12,13],subdomain:5,submodul:[0,9],subpackag:[0,9],suit:9,support:[7,8,9,11,12],svg:9,system:[8,10,12],tag:[0,1,11,13],tag_id:6,take:[8,9],tar:[11,12],target:[1,2,3,4,6,7,11,12,13],techniqu:9,tensor:[8,11,12],tensor_export:8,tensorflow:[1,2,3,4,6,7,11,12],tensorflow_v1:[11,12,13],tensors_export:8,termin:12,test:10,thei:8,them:[6,8],thi:[1,2,3,4,6,9,10,12],through:[5,6,8,11,12],time:[6,8,9],titl:8,token:[2,3,4,6,7],top1:[11,13],top:9,torchvis:[1,2,3,4,6,7,11,12,13],tour:9,tqdm:[6,8],tqdm_asyncio:8,tqdm_auto:8,tqdm_notebook:8,train:[1,2,3,4,5,6,7,9,11,12,13],training_id:7,training_schem:[1,2,3,4,6,7,11,12,13],transfer:[2,6,9,11,12],transfer_learn:[6,13],transfer_learning_recip:6,treat:8,trust:6,tupl:[2,8],type:[5,6,7,8,11,12,13],typed_shap:8,ultralyt:[4,11,13],under:[2,3,4,6,7,13],unexpect:2,union:[2,6,7,8],uniqu:7,unit:6,upload:6,url:[6,8],url_path:8,use:[6,8,11,12],used:[1,2,3,4,5,6,7,8,11,12],user:[0,1,7,8],user_id:[6,7],using:[5,9,10],util:[0,1,6],valid:[11,13],valid_param:2,valu:[2,6,7,8,9],value1:2,value2:2,variabl:[6,7],version:[1,2,6,7,8,9,11,13],vgg:[1,2,11,13],vgg_11:3,vgg_11bn:3,vgg_13:3,vgg_13bn:3,vgg_16:3,vgg_16bn:3,vgg_19:3,vgg_19bn:3,via:9,view:1,virtual:10,vision:12,voc:[11,13],warn:2,websit:9,weight:[2,11,12,13],well:[6,11,12],what:[7,8,11,12,13],when:[8,9],where:[2,3,4,6,7],whether:6,which:[11,12,13],who:6,widget:5,width:[3,9,11,12,13],winograd:9,within:[5,6,11,12,13],without:[8,11,12],work:[6,8],worker:8,would:10,wrap:7,yaml:[11,12],yolo:[1,2],yolo_v3:[4,11,13],you:[9,10,12],your:[9,10,12],zero:8,zoo:[0,1,6,8,9]},titles:["sparsezoo","sparsezoo package","sparsezoo.models package","sparsezoo.models.classification package","sparsezoo.models.detection package","sparsezoo.nbutils package","sparsezoo.objects package","sparsezoo.requests package","sparsezoo.utils package","SparseZoo 0.1","Installation","Models","Quick Tour","Recipes"],titleterms:{api:12,authent:7,base:[6,7],classif:[3,11,13],common:12,consol:12,content:[1,2,3,4,5,6,7,8],data:[6,8],detect:[4,11,13],download:[6,7,8,12],efficientnet:3,file:6,helper:8,histori:9,imag:[11,13],incept:3,instal:10,learn:9,main:1,metadata:6,mobilenet:3,model:[2,3,4,6,11,12],modul:[1,2,3,4,5,6,7,8],more:9,nbutil:5,numpi:8,object:[6,11,13],optim:12,optimization_recip:6,overview:9,packag:[1,2,3,4,5,6,7,8],python:12,quick:12,recip:13,releas:9,release_vers:6,request:7,resnet:3,resourc:9,result:6,script:12,search:[7,12],sparsezoo:[0,1,2,3,4,5,6,7,8,9],sparsif:9,ssd:4,submodul:[1,2,3,4,5,6,7,8],subpackag:[1,2],tag:6,tour:12,user:6,util:[5,8],version:12,vgg:3,yolo:4,zoo:[2,12]}}) \ No newline at end of file diff --git a/sparsify/userguide/images/image_0.jpg b/sparsify/_images/image_0.jpg similarity index 100% rename from sparsify/userguide/images/image_0.jpg rename to sparsify/_images/image_0.jpg diff --git a/sparsify/userguide/images/image_10.jpg b/sparsify/_images/image_10.jpg similarity index 100% rename from sparsify/userguide/images/image_10.jpg rename to sparsify/_images/image_10.jpg diff --git a/sparsify/userguide/images/image_11.jpg b/sparsify/_images/image_11.jpg similarity index 100% rename from sparsify/userguide/images/image_11.jpg rename to sparsify/_images/image_11.jpg diff --git a/sparsify/userguide/images/image_12.jpg b/sparsify/_images/image_12.jpg similarity index 100% rename from sparsify/userguide/images/image_12.jpg rename to sparsify/_images/image_12.jpg diff --git a/sparsify/userguide/images/image_13.jpg b/sparsify/_images/image_13.jpg similarity index 100% rename from sparsify/userguide/images/image_13.jpg rename to sparsify/_images/image_13.jpg diff --git a/sparsify/userguide/images/image_15.jpg b/sparsify/_images/image_15.jpg similarity index 100% rename from sparsify/userguide/images/image_15.jpg rename to sparsify/_images/image_15.jpg diff --git a/sparsify/userguide/images/image_15a.jpg b/sparsify/_images/image_15a.jpg similarity index 100% rename from sparsify/userguide/images/image_15a.jpg rename to sparsify/_images/image_15a.jpg diff --git a/sparsify/userguide/images/image_15b.jpg b/sparsify/_images/image_15b.jpg similarity index 100% rename from sparsify/userguide/images/image_15b.jpg rename to sparsify/_images/image_15b.jpg diff --git a/sparsify/userguide/images/image_16.jpg b/sparsify/_images/image_16.jpg similarity index 100% rename from sparsify/userguide/images/image_16.jpg rename to sparsify/_images/image_16.jpg diff --git a/sparsify/userguide/images/image_17.jpg b/sparsify/_images/image_17.jpg similarity index 100% rename from sparsify/userguide/images/image_17.jpg rename to sparsify/_images/image_17.jpg diff --git a/sparsify/userguide/images/image_18.jpg b/sparsify/_images/image_18.jpg similarity index 100% rename from sparsify/userguide/images/image_18.jpg rename to sparsify/_images/image_18.jpg diff --git a/sparsify/userguide/images/image_19.jpg b/sparsify/_images/image_19.jpg similarity index 100% rename from sparsify/userguide/images/image_19.jpg rename to sparsify/_images/image_19.jpg diff --git a/sparsify/userguide/images/image_2.jpg b/sparsify/_images/image_2.jpg similarity index 100% rename from sparsify/userguide/images/image_2.jpg rename to sparsify/_images/image_2.jpg diff --git a/sparsify/userguide/images/image_21.jpg b/sparsify/_images/image_21.jpg similarity index 100% rename from sparsify/userguide/images/image_21.jpg rename to sparsify/_images/image_21.jpg diff --git a/sparsify/userguide/images/image_21a.jpg b/sparsify/_images/image_21a.jpg similarity index 100% rename from sparsify/userguide/images/image_21a.jpg rename to sparsify/_images/image_21a.jpg diff --git a/sparsify/userguide/images/image_21b.jpg b/sparsify/_images/image_21b.jpg similarity index 100% rename from sparsify/userguide/images/image_21b.jpg rename to sparsify/_images/image_21b.jpg diff --git a/sparsify/userguide/images/image_22.jpg b/sparsify/_images/image_22.jpg similarity index 100% rename from sparsify/userguide/images/image_22.jpg rename to sparsify/_images/image_22.jpg diff --git a/sparsify/userguide/images/image_23.jpg b/sparsify/_images/image_23.jpg similarity index 100% rename from sparsify/userguide/images/image_23.jpg rename to sparsify/_images/image_23.jpg diff --git a/sparsify/userguide/images/image_24.jpg b/sparsify/_images/image_24.jpg similarity index 100% rename from sparsify/userguide/images/image_24.jpg rename to sparsify/_images/image_24.jpg diff --git a/sparsify/userguide/images/image_25.jpg b/sparsify/_images/image_25.jpg similarity index 100% rename from sparsify/userguide/images/image_25.jpg rename to sparsify/_images/image_25.jpg diff --git a/sparsify/userguide/images/image_27.jpg b/sparsify/_images/image_27.jpg similarity index 100% rename from sparsify/userguide/images/image_27.jpg rename to sparsify/_images/image_27.jpg diff --git a/sparsify/userguide/images/image_29.jpg b/sparsify/_images/image_29.jpg similarity index 100% rename from sparsify/userguide/images/image_29.jpg rename to sparsify/_images/image_29.jpg diff --git a/sparsify/userguide/images/image_29a.jpg b/sparsify/_images/image_29a.jpg similarity index 100% rename from sparsify/userguide/images/image_29a.jpg rename to sparsify/_images/image_29a.jpg diff --git a/sparsify/userguide/images/image_29b.jpg b/sparsify/_images/image_29b.jpg similarity index 100% rename from sparsify/userguide/images/image_29b.jpg rename to sparsify/_images/image_29b.jpg diff --git a/sparsify/userguide/images/image_29c.jpg b/sparsify/_images/image_29c.jpg similarity index 100% rename from sparsify/userguide/images/image_29c.jpg rename to sparsify/_images/image_29c.jpg diff --git a/sparsify/userguide/images/image_29d.jpg b/sparsify/_images/image_29d.jpg similarity index 100% rename from sparsify/userguide/images/image_29d.jpg rename to sparsify/_images/image_29d.jpg diff --git a/sparsify/userguide/images/image_29e.jpg b/sparsify/_images/image_29e.jpg similarity index 100% rename from sparsify/userguide/images/image_29e.jpg rename to sparsify/_images/image_29e.jpg diff --git a/sparsify/userguide/images/image_3.jpg b/sparsify/_images/image_3.jpg similarity index 100% rename from sparsify/userguide/images/image_3.jpg rename to sparsify/_images/image_3.jpg diff --git a/sparsify/userguide/images/image_30.jpg b/sparsify/_images/image_30.jpg similarity index 100% rename from sparsify/userguide/images/image_30.jpg rename to sparsify/_images/image_30.jpg diff --git a/sparsify/userguide/images/image_31.jpg b/sparsify/_images/image_31.jpg similarity index 100% rename from sparsify/userguide/images/image_31.jpg rename to sparsify/_images/image_31.jpg diff --git a/sparsify/userguide/images/image_31a.jpg b/sparsify/_images/image_31a.jpg similarity index 100% rename from sparsify/userguide/images/image_31a.jpg rename to sparsify/_images/image_31a.jpg diff --git a/sparsify/userguide/images/image_32.jpg b/sparsify/_images/image_32.jpg similarity index 100% rename from sparsify/userguide/images/image_32.jpg rename to sparsify/_images/image_32.jpg diff --git a/sparsify/userguide/images/image_33.jpg b/sparsify/_images/image_33.jpg similarity index 100% rename from sparsify/userguide/images/image_33.jpg rename to sparsify/_images/image_33.jpg diff --git a/sparsify/userguide/images/image_35.jpg b/sparsify/_images/image_35.jpg similarity index 100% rename from sparsify/userguide/images/image_35.jpg rename to sparsify/_images/image_35.jpg diff --git a/sparsify/userguide/images/image_36.jpg b/sparsify/_images/image_36.jpg similarity index 100% rename from sparsify/userguide/images/image_36.jpg rename to sparsify/_images/image_36.jpg diff --git a/sparsify/userguide/images/image_36a.jpg b/sparsify/_images/image_36a.jpg similarity index 100% rename from sparsify/userguide/images/image_36a.jpg rename to sparsify/_images/image_36a.jpg diff --git a/sparsify/userguide/images/image_37.jpg b/sparsify/_images/image_37.jpg similarity index 100% rename from sparsify/userguide/images/image_37.jpg rename to sparsify/_images/image_37.jpg diff --git a/sparsify/userguide/images/image_38.jpg b/sparsify/_images/image_38.jpg similarity index 100% rename from sparsify/userguide/images/image_38.jpg rename to sparsify/_images/image_38.jpg diff --git a/sparsify/userguide/images/image_39.jpg b/sparsify/_images/image_39.jpg similarity index 100% rename from sparsify/userguide/images/image_39.jpg rename to sparsify/_images/image_39.jpg diff --git a/sparsify/userguide/images/image_4.jpg b/sparsify/_images/image_4.jpg similarity index 100% rename from sparsify/userguide/images/image_4.jpg rename to sparsify/_images/image_4.jpg diff --git a/sparsify/userguide/images/image_40.jpg b/sparsify/_images/image_40.jpg similarity index 100% rename from sparsify/userguide/images/image_40.jpg rename to sparsify/_images/image_40.jpg diff --git a/sparsify/userguide/images/image_41.jpg b/sparsify/_images/image_41.jpg similarity index 100% rename from sparsify/userguide/images/image_41.jpg rename to sparsify/_images/image_41.jpg diff --git a/sparsify/userguide/images/image_42.jpg b/sparsify/_images/image_42.jpg similarity index 100% rename from sparsify/userguide/images/image_42.jpg rename to sparsify/_images/image_42.jpg diff --git a/sparsify/userguide/images/image_43.jpg b/sparsify/_images/image_43.jpg similarity index 100% rename from sparsify/userguide/images/image_43.jpg rename to sparsify/_images/image_43.jpg diff --git a/sparsify/userguide/images/image_44.jpg b/sparsify/_images/image_44.jpg similarity index 100% rename from sparsify/userguide/images/image_44.jpg rename to sparsify/_images/image_44.jpg diff --git a/sparsify/userguide/images/image_45.jpg b/sparsify/_images/image_45.jpg similarity index 100% rename from sparsify/userguide/images/image_45.jpg rename to sparsify/_images/image_45.jpg diff --git a/sparsify/userguide/images/image_46.jpg b/sparsify/_images/image_46.jpg similarity index 100% rename from sparsify/userguide/images/image_46.jpg rename to sparsify/_images/image_46.jpg diff --git a/sparsify/userguide/images/image_47.jpg b/sparsify/_images/image_47.jpg similarity index 100% rename from sparsify/userguide/images/image_47.jpg rename to sparsify/_images/image_47.jpg diff --git a/sparsify/userguide/images/image_48.jpg b/sparsify/_images/image_48.jpg similarity index 100% rename from sparsify/userguide/images/image_48.jpg rename to sparsify/_images/image_48.jpg diff --git a/sparsify/userguide/images/image_49.jpg b/sparsify/_images/image_49.jpg similarity index 100% rename from sparsify/userguide/images/image_49.jpg rename to sparsify/_images/image_49.jpg diff --git a/sparsify/userguide/images/image_5.jpg b/sparsify/_images/image_5.jpg similarity index 100% rename from sparsify/userguide/images/image_5.jpg rename to sparsify/_images/image_5.jpg diff --git a/sparsify/userguide/images/image_50.jpg b/sparsify/_images/image_50.jpg similarity index 100% rename from sparsify/userguide/images/image_50.jpg rename to sparsify/_images/image_50.jpg diff --git a/sparsify/userguide/images/image_51.jpg b/sparsify/_images/image_51.jpg similarity index 100% rename from sparsify/userguide/images/image_51.jpg rename to sparsify/_images/image_51.jpg diff --git a/sparsify/userguide/images/image_52.jpg b/sparsify/_images/image_52.jpg similarity index 100% rename from sparsify/userguide/images/image_52.jpg rename to sparsify/_images/image_52.jpg diff --git a/sparsify/userguide/images/image_53.jpg b/sparsify/_images/image_53.jpg similarity index 100% rename from sparsify/userguide/images/image_53.jpg rename to sparsify/_images/image_53.jpg diff --git a/sparsify/userguide/images/image_54.jpg b/sparsify/_images/image_54.jpg similarity index 100% rename from sparsify/userguide/images/image_54.jpg rename to sparsify/_images/image_54.jpg diff --git a/sparsify/userguide/images/image_55.jpg b/sparsify/_images/image_55.jpg similarity index 100% rename from sparsify/userguide/images/image_55.jpg rename to sparsify/_images/image_55.jpg diff --git a/sparsify/userguide/images/image_56.jpg b/sparsify/_images/image_56.jpg similarity index 100% rename from sparsify/userguide/images/image_56.jpg rename to sparsify/_images/image_56.jpg diff --git a/sparsify/userguide/images/image_57.jpg b/sparsify/_images/image_57.jpg similarity index 100% rename from sparsify/userguide/images/image_57.jpg rename to sparsify/_images/image_57.jpg diff --git a/sparsify/userguide/images/image_58.jpg b/sparsify/_images/image_58.jpg similarity index 100% rename from sparsify/userguide/images/image_58.jpg rename to sparsify/_images/image_58.jpg diff --git a/sparsify/userguide/images/image_59.jpg b/sparsify/_images/image_59.jpg similarity index 100% rename from sparsify/userguide/images/image_59.jpg rename to sparsify/_images/image_59.jpg diff --git a/sparsify/userguide/images/image_6.jpg b/sparsify/_images/image_6.jpg similarity index 100% rename from sparsify/userguide/images/image_6.jpg rename to sparsify/_images/image_6.jpg diff --git a/sparsify/userguide/images/image_61.jpg b/sparsify/_images/image_61.jpg similarity index 100% rename from sparsify/userguide/images/image_61.jpg rename to sparsify/_images/image_61.jpg diff --git a/sparsify/userguide/images/image_62.jpg b/sparsify/_images/image_62.jpg similarity index 100% rename from sparsify/userguide/images/image_62.jpg rename to sparsify/_images/image_62.jpg diff --git a/sparsify/userguide/images/image_63.jpg b/sparsify/_images/image_63.jpg similarity index 100% rename from sparsify/userguide/images/image_63.jpg rename to sparsify/_images/image_63.jpg diff --git a/sparsify/userguide/images/image_64.jpg b/sparsify/_images/image_64.jpg similarity index 100% rename from sparsify/userguide/images/image_64.jpg rename to sparsify/_images/image_64.jpg diff --git a/sparsify/userguide/images/image_65.jpg b/sparsify/_images/image_65.jpg similarity index 100% rename from sparsify/userguide/images/image_65.jpg rename to sparsify/_images/image_65.jpg diff --git a/sparsify/userguide/images/image_8.jpg b/sparsify/_images/image_8.jpg similarity index 100% rename from sparsify/userguide/images/image_8.jpg rename to sparsify/_images/image_8.jpg diff --git a/sparsify/userguide/images/image_9.jpg b/sparsify/_images/image_9.jpg similarity index 100% rename from sparsify/userguide/images/image_9.jpg rename to sparsify/_images/image_9.jpg diff --git a/sparsify/_modules/index.html b/sparsify/_modules/index.html index 9d8026a5198..2413d853a07 100644 --- a/sparsify/_modules/index.html +++ b/sparsify/_modules/index.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    @@ -206,8 +207,7 @@

    All modules for which code is available

  • sparsify.schemas.system
  • sparsify.utils.system
  • sparsify.workers.base
  • -
  • sparsify.workers.base_manager
  • -
  • sparsify.workers.base_wrapper
  • +
  • sparsify.workers.manager
  • sparsify.workers.projects_benchmark
  • sparsify.workers.projects_data
  • sparsify.workers.projects_model
  • diff --git a/sparsify/_modules/marshmallow/schema.html b/sparsify/_modules/marshmallow/schema.html index 75611710301..dcb1bd5f4b3 100644 --- a/sparsify/_modules/marshmallow/schema.html +++ b/sparsify/_modules/marshmallow/schema.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/peewee.html b/sparsify/_modules/peewee.html index b06be7dd779..9b2b7eb8b65 100644 --- a/sparsify/_modules/peewee.html +++ b/sparsify/_modules/peewee.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/playhouse/sqlite_ext.html b/sparsify/_modules/playhouse/sqlite_ext.html index 9c0e88e6848..d84b29f09f8 100644 --- a/sparsify/_modules/playhouse/sqlite_ext.html +++ b/sparsify/_modules/playhouse/sqlite_ext.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/app.html b/sparsify/_modules/sparsify/app.html index f0a72be5dcb..8443dff85d6 100644 --- a/sparsify/_modules/sparsify/app.html +++ b/sparsify/_modules/sparsify/app.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    @@ -189,6 +190,7 @@

    Source code for sparsify.app

     # limitations under the License.
     
     import argparse
    +import atexit
     import logging
     import os
     from typing import Any, Union
    @@ -272,7 +274,13 @@ 

    Source code for sparsify.app

     
     
     def _worker_setup():
    -    JobWorkerManager().app_startup()
    +    manager = JobWorkerManager()
    +
    +    def _interrupt():
    +        manager.shutdown()
    +
    +    atexit.register(_interrupt)
    +    manager.start()
     
     
     
    [docs]def run( diff --git a/sparsify/_modules/sparsify/blueprints/code_samples/pytorch__training.html b/sparsify/_modules/sparsify/blueprints/code_samples/pytorch__training.html index e7d41feb42f..6070a36c257 100644 --- a/sparsify/_modules/sparsify/blueprints/code_samples/pytorch__training.html +++ b/sparsify/_modules/sparsify/blueprints/code_samples/pytorch__training.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/blueprints/utils/helpers.html b/sparsify/_modules/sparsify/blueprints/utils/helpers.html index 47d0d2adf08..cd7f8c6b57a 100644 --- a/sparsify/_modules/sparsify/blueprints/utils/helpers.html +++ b/sparsify/_modules/sparsify/blueprints/utils/helpers.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/blueprints/utils/projects.html b/sparsify/_modules/sparsify/blueprints/utils/projects.html index 7236065a0b4..186f776f2d9 100644 --- a/sparsify/_modules/sparsify/blueprints/utils/projects.html +++ b/sparsify/_modules/sparsify/blueprints/utils/projects.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/blueprints/utils/projects_benchmark.html b/sparsify/_modules/sparsify/blueprints/utils/projects_benchmark.html index 0f093f2c3ec..d577af0bc29 100644 --- a/sparsify/_modules/sparsify/blueprints/utils/projects_benchmark.html +++ b/sparsify/_modules/sparsify/blueprints/utils/projects_benchmark.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/blueprints/utils/projects_data.html b/sparsify/_modules/sparsify/blueprints/utils/projects_data.html index 043aefb666b..ffc095c789c 100644 --- a/sparsify/_modules/sparsify/blueprints/utils/projects_data.html +++ b/sparsify/_modules/sparsify/blueprints/utils/projects_data.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/blueprints/utils/projects_optimizations.html b/sparsify/_modules/sparsify/blueprints/utils/projects_optimizations.html index 99d88d4dbed..3746f9287b4 100644 --- a/sparsify/_modules/sparsify/blueprints/utils/projects_optimizations.html +++ b/sparsify/_modules/sparsify/blueprints/utils/projects_optimizations.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    @@ -423,7 +424,7 @@

    Source code for sparsify.blueprints.utils.projects_optimizations

    sparsity = 0.85 # TODO: dynamically choose sparsity level balance_perf_loss = 1.0 filter_min_sparsity = 0.4 - filter_min_perf_gain = 0.75 + filter_min_perf_gain = 0.6 filter_min_recovery = -1.0 return PruningSettings( diff --git a/sparsify/_modules/sparsify/blueprints/utils/projects_optimizations_pruning.html b/sparsify/_modules/sparsify/blueprints/utils/projects_optimizations_pruning.html index 1757a3ab27b..972f95047a8 100644 --- a/sparsify/_modules/sparsify/blueprints/utils/projects_optimizations_pruning.html +++ b/sparsify/_modules/sparsify/blueprints/utils/projects_optimizations_pruning.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    @@ -195,8 +196,7 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    import logging -from collections import OrderedDict -from enum import Enum +from collections import OrderedDict, defaultdict from typing import Any, Dict, List, NamedTuple, Tuple, Union import numpy @@ -206,7 +206,6 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    __all__ = [ "PruningSettings", - "PruningNodeEvaluator", "PruningModelEvaluator", ] @@ -227,290 +226,244 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    ) -class ValueRescaler(object): +class _PruningPointRescaler(object): """ Convenience class for normalizing / rescaling values """ def __init__(self): - self._data = [] # type: List[Tuple[float, float, float]] - self._avg_mins = None - self._avg_ranges = None - - def add_rescale_point(self, values: List[float]): - """ - :param values: a list of values to add a point (min, max) for later rescaling - """ - minimum = numpy.min(values).item() if values else 0.0 - maximum = numpy.max(values).item() if values else 0.0 - self._data.append((minimum, maximum, maximum - minimum)) - - def rescale(self, val: float) -> float: - """ - :param val: the value to rescale - :return: the rescaled / normalized value based off of previously added points - """ - if self._avg_mins is None or self._avg_ranges is None: - self._set_averages() - - rescaled = val - self._avg_mins - if self._avg_ranges: - rescaled = rescaled / self._avg_ranges + self.min_val = None # type: Union[None, float] + self.max_val = None # type: Union[None, float] + + def __repr__(self): + return f"_ValueRescaler(min_val={self.min_val}, max_val={self.max_val})" + + def __call__(self, val: Union[None, float]) -> Union[None, float]: + if val is None: + return val + + # normalize the value such that it will fall in the range [0, max] + # by subtracting the min + rescaled = val - self.min_val if self.min_val else val + + # normalize the value such that it will fall in the range [0, 1.0] + # by dividing by the max range + max_range = ( + self.max_val - self.min_val + if self.max_val is not None and self.min_val is not None + else 0.0 + ) + rescaled = rescaled / max_range if max_range else rescaled return rescaled - def _set_averages(self): - self._avg_mins = ( - numpy.average([d[0] for d in self._data]).item() if self._data else 0.0 - ) - self._avg_ranges = ( - numpy.average([d[2] for d in self._data]).item() if self._data else 0.0 - ) + def add_rescale_series(self, min_series_val: float, max_series_val: float): + if self.min_val is None or min_series_val < self.min_val: + self.min_val = min_series_val + if self.max_val is None or max_series_val > self.max_val: + self.max_val = max_series_val -class PruningNodeSeriesSmoothingType(Enum): + +class _PruningNodeSeriesValue: """ - Enum for how to smooth a node's pruning estimations / measurements + Simple data object to map the number of sparse params and the defined sparsity + at that level to a series value """ - none = "none" - maximum = "maximum" - minimum = "minimum" + def __init__( + self, + sparse_params: int, + sparsity: float, + value: float, + node_id: Union[str, None] = None, + ): + self.sparse_params = sparse_params + self.sparsity = sparsity + self.value = value + self.node_id = node_id + + def __repr__(self): + return ( + f"_PruningNodeSeriesValue(sparse_params={self.sparse_params}, " + f"sparsity={self.sparsity}, value={self.value}, node_id={self.node_id})" + ) -class PruningNodeSeries(object): +class _PruningNodeSeries(object): """ - Series of measurements / estimations for a pruning node - - :param measurements: a dictionary containing the measurements for the series - :param baseline_measurement_key: the baseline key that should be used - for series comparisons - :param smoothing_type: the smoothing type to apply to the measurements; - useful for smoothing out sensitivity measurements for pruning - :param invert_sensitivity: True to invert the sensitivity values, - False otherwise + Object to contain a series of data that allows estimations of values + and cleansing of the data for optimization tasks. """ def __init__( self, - measurements: Union[None, Dict[str, float]], - baseline_measurement_key: str, - smoothing_type: PruningNodeSeriesSmoothingType, - invert_sensitivity: bool, + sparsity_measurements: Union[None, Dict[str, float]], + baseline_key: str, + num_params: int, + increasing: bool, ): - self._baseline = None # type: Union[None, float] - self._measurements = [] # type: List[Tuple[float, float]] - self._measurements_smoothed = [] # type: List[Tuple[float, float]] - self._smoothing_type = smoothing_type - self._set_measurements(measurements, baseline_measurement_key) - self._invert_sensitivity = invert_sensitivity - - @property - def baseline(self) -> Union[None, float]: - """ - :return: the baseline measurement value - """ - return self._baseline - - @property - def measurements(self) -> List[Tuple[float, float]]: - """ - :return: the list of measurement tuples (sparsity, measurement) - """ - return self._measurements - - @property - def measurements_smoothed(self) -> List[Tuple[float, float]]: - """ - :return: the list of measurement tuples (sparsity, measurement) - after applying the smoothing type - """ - return self._measurements_smoothed - - @property - def smoothing_type(self) -> PruningNodeSeriesSmoothingType: - """ - :return: the smoothing type to apply to the measurements; - useful for smoothing out sensitivity measurements for pruning - """ - return self._smoothing_type - - @property - def invert_sensitivity(self) -> bool: - """ - :return: True to invert the sensitivity values, - False otherwise - """ - return self._invert_sensitivity - - def sparse( - self, sparsity: Union[None, float], smooth: bool = False - ) -> Union[None, float]: - """ - :param sparsity: the sparsity to get a measurement for - :param smooth: True to pull from the measurements_smoothed, - False otherwise - :return: the measurement at the given sparsity - """ - if not self._measurements: + self.sparsity_measurements = sparsity_measurements + self.baseline_key = baseline_key + self.num_params = num_params + self.increasing = increasing + + self.value_baseline = None # type: Union[None, float] + self.value_min = None # type: Union[None, float] + self.value_max = None # type: Union[None, float] + self.value_smoothed_min = None # type: Union[None, float] + self.value_smoothed_max = None # type: Union[None, float] + self.value_optimized_min = None # type: Union[None, float] + self.value_optimized_max = None # type: Union[None, float] + + self.data = [] # type: List[_PruningNodeSeriesValue] + self.data_smoothed = [] # type: List[_PruningNodeSeriesValue] + self.data_optimization = [] # type: List[_PruningNodeSeriesValue] + self._set_data() + + def estimated_value(self, sparsity: Union[None, float]) -> Union[None, float]: + if not self.data: return None if not sparsity: - return self.baseline + return self.value_baseline - _, interpolated = interpolate_list_linear( - self._measurements if not smooth else self._measurements_smoothed, sparsity - )[0] + measurements = [(val.sparsity, val.value) for val in self.data] + _, interpolated = interpolate_list_linear(measurements, sparsity)[0] return interpolated - def sparse_measurements( - self, smooth: bool = False - ) -> List[Tuple[float, Union[None, float]]]: - """ - :param smooth: True to pull from the measurements_smoothed, - False otherwise - :return: a list of tuples containing the sparsity from - 0 to 99% at increments of 1% and the associated measurements - """ - sparsities = [v / 100.0 for v in range(100)] - - if not self._measurements: - return [v for v in zip(sparsities, [None for _ in range(len(sparsities))])] - - interpolated = interpolate_list_linear( - self._measurements if not smooth else self._measurements_smoothed, - sparsities, - ) - - return interpolated - - def sparse_gain( - self, sparsity: Union[None, float], smooth: bool = False - ) -> Union[None, float]: + def estimated_gain(self, sparsity: Union[None, float]) -> Union[None, float]: """ :param sparsity: the sparsity to get the gain value for - :param smooth: True to pull from the measurements_smoothed, - False otherwise :return: the ratio of the predicted value at the given sparsity as compared with the baseline value """ - if not self._measurements: + + if not self.data: return None if not sparsity: return 1.0 - sparse = self.sparse(sparsity, smooth) + value = self.estimated_value(sparsity) - if not sparse or not self._baseline: + if not value or not self.value_baseline: return 0.0 - return self._baseline / sparse + return self.value_baseline / value - def sparse_sensitivity( - self, sparsity: Union[None, float], smooth: bool = False - ) -> Union[None, float]: + def estimated_sensitivity(self, sparsity: Union[None, float]) -> Union[None, float]: """ :param sparsity: the sparsity to get the sensitivity value for - :param smooth: True to pull from the measurements_smoothed, - False otherwise :return: the sensitivity comparison (difference) of the measurement at the given sparsity compared with the baseline """ - sparse = self.sparse(sparsity, smooth) - baseline = self.baseline - return PruningNodeSeries._sensitivity(sparse, baseline, self.invert_sensitivity) + if not self.data: + return None - def sparse_sensitivities( - self, smooth: bool = False - ) -> List[Tuple[float, Union[None, float]]]: - """ - :param smooth: True to pull from the measurements_smoothed, - False otherwise - :return: a list of tuples containing the sparsity from - 0 to 99% at increments of 1% and the associated sensitivity value - """ - measurements = self.sparse_measurements(smooth) - baseline = self.baseline + if not sparsity: + return None - return PruningNodeSeries._sensitivities( - measurements, baseline, self.invert_sensitivity - ) + value = self.estimated_value(sparsity) - def _set_measurements( - self, - measurements: Dict[str, float], - baseline_measurement_key: str, - ): - if not measurements: - return + if value is None or self.value_baseline is None: + return None + + # subtract from baseline if decreasing so lower values = more sensitive: perf + # subtract from sparse val if increasing so higher values = more sensitive: loss - meas_min = None - meas_max = None + return ( + (self.value_baseline - value) + if not self.increasing + else (value - self.value_baseline) + ) - for key, meas in measurements.items(): - meas_smoothed = meas + def costs( + self, rescaler: _PruningPointRescaler, use_max: bool + ) -> List[Tuple[float, Union[None, float]]]: + """ + :param rescaler: the rescaler to use to rescale the optimized values + to the [0.0, 1.0] range + :param use_max: True to use the max value for all measurements, + False to interpolate between. Max value is used for FLOPS performance + because the slopes for smaller convs are less and therefore prioritized + improperly by optimization + :return: a list of tuples containing (sparsity, cost) + """ + if not self.data_optimization: + return [(float(index) / 100.0, None) for index in range(100)] - if key == baseline_measurement_key: - self._baseline = meas - else: - if meas_min is None or meas < meas_min: - meas_min = meas + def _get_val(v: float) -> float: + return rescaler(v if not use_max else self.value_optimized_max) - if meas_max is None or meas < meas_max: - meas_max = meas + measurements = [ + (val.sparsity, _get_val(val.value)) for val in self.data_optimization + ] - if ( - self._smoothing_type == PruningNodeSeriesSmoothingType.minimum - and meas > meas_min - ): - meas_smoothed = meas_min + # creates the data at increments of 1% levels from 0 to 99 + costs = interpolate_list_linear( + measurements, [float(index) / 100.0 for index in range(100)] + ) - if ( - self._smoothing_type == PruningNodeSeriesSmoothingType.maximum - and meas < meas_max - ): - meas_smoothed = meas_max + return costs - self._measurements.append((float(key), meas)) - self._measurements_smoothed.append((float(key), meas_smoothed)) + def _set_data(self): + if not self.sparsity_measurements: + return - self._measurements.sort(key=lambda x: x[0]) - self._measurements_smoothed.sort(key=lambda x: x[0]) + # smoothed values will always keep with the trend given + # if set as increasing, value will always be equal to or greater than last value + # if set as decreasing, value will always be equal to or less than last value + smoothed_val = None - @staticmethod - def _sensitivity( - sparse: Union[float, None], baseline: Union[float, None], invert: bool - ) -> Union[float, None]: - if sparse is None or baseline is None: - return None + for key, meas in self.sparsity_measurements.items(): + if key == self.baseline_key: + self.value_baseline = meas - sensitivity = (baseline - sparse) if invert else (sparse - baseline) + sparsity = float(key) + sparse_params = int(sparsity * self.num_params) + self.data.append(_PruningNodeSeriesValue(sparse_params, sparsity, meas)) - return sensitivity + if ( + smoothed_val is None + or (self.increasing and meas >= smoothed_val) + or (not self.increasing and meas <= smoothed_val) + ): + smoothed_val = meas - @staticmethod - def _sensitivities( - measurements: List[Tuple[float, Union[float, None]]], - baseline: Union[float, None], - invert: bool, - ): - sensitivities = [] + self.data_smoothed.append( + _PruningNodeSeriesValue(sparse_params, sparsity, smoothed_val) + ) - for (sparsity, measurement) in measurements: - sensitivities.append( - ( - sparsity, - PruningNodeSeries._sensitivity(measurement, baseline, invert), - ) + self.data.sort(key=lambda x: x.sparse_params) + self.data_smoothed.sort(key=lambda x: x.sparse_params) + self.value_min = min([val.value for val in self.data]) + self.value_max = max([val.value for val in self.data]) + self.value_smoothed_min = min([val.value for val in self.data_smoothed]) + self.value_smoothed_max = max([val.value for val in self.data_smoothed]) + + for val in self.data_smoothed: + optimize_value = val.value + + if not self.increasing: + # need to invert the graph so the values are always increasing + # for the optimization algorithms. + # do this by reformulating as the difference from the smoothed max + # ex: -x^2 + 5 => 5 - (-x^2 + 5) + optimize_value = self.value_smoothed_max - optimize_value + + self.data_optimization.append( + _PruningNodeSeriesValue(val.sparse_params, val.sparsity, optimize_value) ) - return sensitivities + self.data_optimization.sort(key=lambda x: x.sparse_params) + self.value_optimized_min = min([val.value for val in self.data_optimization]) + self.value_optimized_max = max([val.value for val in self.data_optimization]) -
    [docs]class PruningNodeEvaluator(object): +class _PruningNodeEvaluator(object): """ Evaluator for a model's node for pruning. Able to estimate the effect of pruning on the node for performance, loss, etc @@ -528,135 +481,95 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    perf_analysis: Union[None, Dict], loss_analysis: Union[None, Dict], ): - self._node_id = node_id - self._analysis = PruningNodeEvaluator._extract_node_analysis( + self.node_id = node_id + self.analysis = _PruningNodeEvaluator._extract_node_analysis( node_id, model_analysis ) - self._perf_analysis = PruningNodeEvaluator._extract_node_perf_analysis( + self.num_params = self.analysis["params"] + self.num_prunable_params = self.analysis["prunable_params"] + self.num_flops = self.analysis["flops"] + + self.analysis_perf = _PruningNodeEvaluator._extract_node_perf_analysis( node_id, perf_analysis ) - self._loss_analysis = PruningNodeEvaluator._extract_node_loss_analysis( + self.analysis_loss = _PruningNodeEvaluator._extract_node_loss_analysis( node_id, loss_analysis ) - self._params = PruningNodeSeries( - measurements=OrderedDict( + self.series_params = _PruningNodeSeries( + sparsity_measurements=OrderedDict( [ - ("0.0", self._analysis["params"]), - ( - "1.0", - self._analysis["params"] - self._analysis["prunable_params"], - ), + ("0.0", self.num_params), + ("1.0", self.num_params - self.num_prunable_params), ] ), - baseline_measurement_key="0.0", - smoothing_type=PruningNodeSeriesSmoothingType.none, - invert_sensitivity=False, + baseline_key="0.0", + num_params=self.num_params, + increasing=False, ) - self._flops = PruningNodeSeries( - measurements=OrderedDict([("0.0", self._analysis["flops"]), ("1.0", 0.0)]) - if self._analysis["flops"] - else None, - baseline_measurement_key="0.0", - smoothing_type=PruningNodeSeriesSmoothingType.none, - invert_sensitivity=True, + self.series_flops = _PruningNodeSeries( + sparsity_measurements=( + OrderedDict([("0.0", self.num_flops), ("1.0", 0.0)]) + if self.num_flops + else None + ), + baseline_key="0.0", + num_params=self.num_params, + increasing=False, ) - self._performance = PruningNodeSeries( - measurements=self._perf_analysis["measurements"] - if self._perf_analysis - else None, - baseline_measurement_key=self._perf_analysis["baseline_measurement_key"] - if self._perf_analysis - else None, - smoothing_type=PruningNodeSeriesSmoothingType.minimum, - invert_sensitivity=True, + self.series_perf = _PruningNodeSeries( + sparsity_measurements=( + self.analysis_perf["measurements"] if self.analysis_perf else None + ), + baseline_key=( + self.analysis_perf["baseline_measurement_key"] + if self.analysis_perf + else None + ), + num_params=self.num_params, + increasing=False, ) - self._loss = PruningNodeSeries( - measurements=self._loss_analysis["measurements"] - if self._loss_analysis - else None, - baseline_measurement_key=self._loss_analysis["baseline_measurement_key"] - if self._loss_analysis - else None, - smoothing_type=PruningNodeSeriesSmoothingType.maximum, - invert_sensitivity=False, + self.series_loss = _PruningNodeSeries( + sparsity_measurements=( + self.analysis_loss["measurements"] if self.analysis_loss else None + ), + baseline_key=( + self.analysis_loss["baseline_measurement_key"] + if self.analysis_loss + else None + ), + num_params=self.num_params, + increasing=True, ) - self._loss_estimated = PruningNodeSeries( - OrderedDict( + self.series_loss_est = _PruningNodeSeries( + sparsity_measurements=OrderedDict( [ ("0.0", 0.0), - ("1.0", self._analysis["prunable_equation_sensitivity"]), + ("1.0", self.analysis["prunable_equation_sensitivity"]), ] ), - baseline_measurement_key="0.0", - smoothing_type=PruningNodeSeriesSmoothingType.none, - invert_sensitivity=False, + baseline_key="0.0", + num_params=self.num_params, + increasing=True, ) @property - def node_id(self) -> str: - """ - :return: id of the node the evaluator is created for - """ - return self._node_id - - @property - def prunable_params(self) -> Union[int, None]: - """ - :return: number of prunable params in the node - """ - return self._analysis["prunable_params"] - - @property - def params(self) -> PruningNodeSeries: + def available_series_perf(self) -> _PruningNodeSeries: """ - :return: the params pruning series for the node - """ - return self._params - - @property - def flops(self) -> PruningNodeSeries: - """ - :return: the flops pruning series for the node - """ - return self._flops - - @property - def performance(self) -> PruningNodeSeries: - """ - :return: the performance timings pruning series for the node - """ - return self._performance - - @property - def performance_metric(self) -> PruningNodeSeries: - """ - :return: the available performance metric, + :return: the available performance series, falls back on flops if perf sensitivity is not available """ - return self.performance if self._perf_analysis is not None else self.flops + return self.series_perf if self.analysis_perf is not None else self.series_flops @property - def loss(self) -> PruningNodeSeries: + def available_series_loss(self) -> _PruningNodeSeries: """ - :return: the loss measurements pruning series for the node - """ - return self._loss - - @property - def loss_estimated(self) -> PruningNodeSeries: - """ - :return: the estimated loss measurements pruning series for the node - """ - return self._loss_estimated - - @property - def loss_metric(self) -> PruningNodeSeries: - """ - :return: the available loss metric, + :return: the available loss series, falls back on estimated loss if loss sensitivity is not available """ - return self.loss if self._loss_analysis is not None else self.loss_estimated + return ( + self.series_loss if self.analysis_loss is not None else self.series_loss_est + ) @property def structurally_pruned(self) -> bool: @@ -665,12 +578,12 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    False otherwise """ attributes = ( - self._analysis["attributes"] if "attributes" in self._analysis else None + self.analysis["attributes"] if "attributes" in self.analysis else None ) return attributes and "group" in attributes and attributes["group"] > 1 -
    [docs] def eval_dict( + def eval_dict( self, sparsity: Union[float, None], baseline_sparsity: Union[float, None], @@ -678,11 +591,11 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    ) -> Dict[str, Any]: sensitivity_sparsities = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9, 0.95, 0.99] perf_sensitivities = [ - (sparsity, self.performance_metric.sparse_sensitivity(sparsity)) + (sparsity, self.available_series_perf.estimated_sensitivity(sparsity)) for sparsity in sensitivity_sparsities ] loss_sensitivities = [ - (sparsity, self.loss_metric.sparse_sensitivity(sparsity)) + (sparsity, self.available_series_loss.estimated_sensitivity(sparsity)) for sparsity in sensitivity_sparsities ] @@ -693,24 +606,24 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    "perf_sensitivities": perf_sensitivities, "loss_sensitivities": loss_sensitivities, "est_recovery": self.recovery(sparsity, baseline_sparsity), - "est_loss_sensitivity": self.loss_metric.sparse_sensitivity( + "est_loss_sensitivity": self.available_series_loss.estimated_sensitivity( PruningModelEvaluator.EVAL_SENSITIVITY_SPARSITY, ), - "est_perf_sensitivity": self.performance_metric.sparse_sensitivity( + "est_perf_sensitivity": self.available_series_perf.estimated_sensitivity( PruningModelEvaluator.EVAL_SENSITIVITY_SPARSITY, ), - "est_time": self.performance.sparse(sparsity), - "est_time_baseline": self.performance.baseline, - "est_time_gain": self.performance.sparse_gain(sparsity), - "params_baseline": self.params.baseline, - "params": self.params.sparse(sparsity), - "compression": self.params.sparse_gain(sparsity), - "flops_baseline": self.flops.baseline, - "flops": self.flops.sparse(sparsity), - "flops_gain": self.flops.sparse_gain(sparsity), - }
    - -
    [docs] def recovery( + "est_time": self.series_perf.estimated_value(sparsity), + "est_time_baseline": self.series_perf.value_baseline, + "est_time_gain": self.series_perf.estimated_gain(sparsity), + "params_baseline": self.series_params.value_baseline, + "params": self.series_params.estimated_value(sparsity), + "compression": self.series_params.estimated_gain(sparsity), + "flops_baseline": self.series_flops.value_baseline, + "flops": self.series_flops.estimated_value(sparsity), + "flops_gain": self.series_flops.estimated_gain(sparsity), + } + + def recovery( self, sparsity: Union[float, None], baseline_sparsity: Union[float, None], @@ -721,8 +634,9 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    :return: the estimated confidence of recovery for the given sparsity as compared to the baseline """ - baseline = self.loss_metric.sparse_sensitivity(baseline_sparsity) - estimated = self.loss_metric.sparse_sensitivity(sparsity) + + baseline = self.available_series_loss.estimated_sensitivity(baseline_sparsity) + estimated = self.available_series_loss.estimated_sensitivity(sparsity) if baseline == estimated or not sparsity: # baseline equals estimated or layer is not pruned @@ -740,14 +654,14 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    # less than 1.0 gives a worse chance of recovery # greater than 1.0 gives a better chance of recovery - return (baseline - estimated) / baseline + 1.0
    + return (baseline - estimated) / baseline + 1.0 -
    [docs] def sparse_costs( + def optimization_costs( self, balance_perf_loss: float, - perf_rescaler: ValueRescaler, - loss_rescaler: ValueRescaler, - ) -> List[Tuple[str, float, Union[float, None]]]: + perf_rescaler: _PruningPointRescaler, + loss_rescaler: _PruningPointRescaler, + ) -> List[_PruningNodeSeriesValue]: """ :param balance_perf_loss: the weight [0.0, 1.0] for balancing perf vs loss; 0.0 for all performance, 1.0 for all loss @@ -758,38 +672,22 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    :return: a list of tuples containing the sparsities from 0% to 99% and their associated cost for pruning the node to that sparsity """ - perfs = self.performance_metric.sparse_sensitivities(True) - losses = self.loss_metric.sparse_sensitivities(True) + loss_costs = self.available_series_loss.costs(loss_rescaler, use_max=False) + perf_costs = self.available_series_perf.costs( + perf_rescaler, + use_max=self.available_series_perf == self.series_flops, + ) costs = [] - for ((sparsity, perf), (_, loss)) in zip(perfs, losses): - perf = ( - perf_rescaler.rescale(perf) - if perf is not None and perf_rescaler - else perf - ) - loss = ( - loss_rescaler.rescale(loss) - if loss is not None and loss_rescaler - else loss - ) - - if balance_perf_loss <= 0.0: - # all performance - cost = perf - elif balance_perf_loss >= 1.0: - # all loss - cost = loss - else: - cost = ( - balance_perf_loss * loss + (1.0 - balance_perf_loss) * perf - if loss is not None and perf is not None - else None + for ((sparsity, loss_cost), (_, perf_cost)) in zip(loss_costs, perf_costs): + cost = balance_perf_loss * loss_cost + (1.0 - balance_perf_loss) * perf_cost + costs.append( + _PruningNodeSeriesValue( + round(sparsity * self.num_params), sparsity, cost, self.node_id ) + ) - costs.append((self.node_id, sparsity, cost)) - - return costs
    + return costs @staticmethod def _extract_node_analysis(node_id: str, model_analysis: Dict) -> Dict: @@ -829,7 +727,7 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    if op["id"] == node_id: analysis = op - return analysis
    + return analysis class _PruningNodeSetting(object): @@ -850,7 +748,7 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    :param loss_analysis: loss analysis of the model, if any """ - MAX_NODE_SPARSITY = 0.975 + MAX_NODE_SPARSITY = 0.95 EVAL_SENSITIVITY_SPARSITY = 0.95 def __init__( @@ -873,35 +771,31 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    else None ) - self._nodes = [] # type: List[PruningNodeEvaluator] + self._nodes = [] # type: List[_PruningNodeEvaluator] self._nodes_settings = {} # type: Dict[str, _PruningNodeSetting] - self._perf_rescaler = ValueRescaler() - self._loss_rescaler = ValueRescaler() + self._perf_rescaler = _PruningPointRescaler() + self._loss_rescaler = _PruningPointRescaler() for node in model_analysis["nodes"]: if not node["prunable"]: continue - pruning_node = PruningNodeEvaluator( + pruning_node = _PruningNodeEvaluator( node["id"], model_analysis, perf_analysis, loss_analysis ) self._nodes.append(pruning_node) self._nodes_settings[pruning_node.node_id] = _PruningNodeSetting() - if pruning_node.performance_metric.measurements: - self._perf_rescaler.add_rescale_point( - [ - pruning_node.performance_metric.sparse_sensitivity(0.0, True), - pruning_node.performance_metric.sparse_sensitivity(0.95, True), - ] + if pruning_node.available_series_perf.data: + self._perf_rescaler.add_rescale_series( + pruning_node.available_series_perf.value_optimized_min, + pruning_node.available_series_perf.value_optimized_max, ) - if pruning_node.loss_metric.measurements: - self._loss_rescaler.add_rescale_point( - [ - pruning_node.loss_metric.sparse_sensitivity(0.0, True), - pruning_node.loss_metric.sparse_sensitivity(0.95, True), - ] + if pruning_node.available_series_loss.data: + self._loss_rescaler.add_rescale_series( + pruning_node.available_series_loss.value_optimized_min, + pruning_node.available_series_loss.value_optimized_max, )
    [docs] def eval_baseline(self, baseline_sparsity: float): @@ -911,12 +805,8 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    :param baseline_sparsity: the baseline_sparsity to use and evaluate with """ - node_sparsities = PruningModelEvaluator._optimize_nodes_sparsity( - self._nodes, - baseline_sparsity, - balance_perf_loss=1.0, - perf_rescaler=self._perf_rescaler, - loss_rescaler=self._loss_rescaler, + node_sparsities = self._get_nodes_optimized_sparsities( + baseline_sparsity, balance_perf_loss=1.0, settings=None ) for node_id, sparsity in node_sparsities.items(): @@ -929,40 +819,12 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    :param settings: the pruning settings to use and evaluate with """ - node_sparsities = PruningModelEvaluator._optimize_nodes_sparsity( - self._nodes, - settings.sparsity, - balance_perf_loss=settings.balance_perf_loss, - perf_rescaler=self._perf_rescaler, - loss_rescaler=self._loss_rescaler, + node_sparsities = self._get_nodes_optimized_sparsities( + settings.sparsity, settings.balance_perf_loss, settings ) for node in self._nodes: - sparsity = node_sparsities[node.node_id] - est_perf_gain = node.performance_metric.sparse_gain(sparsity) - est_recovery = node.recovery( - sparsity, self._nodes_settings[node.node_id].baseline_sparsity - ) - - if sparsity is None or ( - ( - settings.filter_min_sparsity - and sparsity < settings.filter_min_sparsity - ) - or ( - settings.filter_min_perf_gain - and est_perf_gain is not None - and est_perf_gain < settings.filter_min_perf_gain - ) - or ( - settings.filter_min_recovery - and est_recovery is not None - and est_recovery < settings.filter_min_recovery - ) - ): - sparsity = None - - self._nodes_settings[node.node_id].sparsity = sparsity + self._nodes_settings[node.node_id].sparsity = node_sparsities[node.node_id] self._nodes_settings[node.node_id].overridden = False
    [docs] def apply_node_overrides(self, node_overrides: List[Dict[str, Any]]): @@ -1051,12 +913,12 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    model_values = { "est_recovery": numpy.average(recoveries).item() if recoveries else None, - "est_loss_sensitivity": numpy.average(loss_sensitivities).item() - if loss_sensitivities - else None, - "est_perf_sensitivity": numpy.average(perf_sensitivities).item() - if perf_sensitivities - else None, + "est_loss_sensitivity": ( + numpy.average(loss_sensitivities).item() if loss_sensitivities else None + ), + "est_perf_sensitivity": ( + numpy.average(perf_sensitivities).item() if perf_sensitivities else None + ), "est_time": est_time, "est_time_baseline": self._baseline_time, "est_time_gain": est_time_gain, @@ -1070,62 +932,119 @@

    Source code for sparsify.blueprints.utils.projects_optimizations_pruning

    return node_values, model_values
    - @staticmethod - def _optimize_nodes_sparsity( - nodes: List[PruningNodeEvaluator], + def _get_nodes_optimized_sparsities( + self, sparsity: float, balance_perf_loss: float, - perf_rescaler: ValueRescaler, - loss_rescaler: ValueRescaler, - ) -> Dict[str, float]: - sparsities = {} - nodes_costs = {} - costs = [] + settings: Union[None, PruningSettings], + ) -> Dict[str, Union[float, None]]: + sparsities = {node.node_id: None for node in self._nodes} + nodes_costs = self._optimize_sparsity_get_costs(balance_perf_loss) - for index, node in enumerate(nodes): - sparsities[node.node_id] = None + if not nodes_costs: + return sparsities - if index == 0 or node.structurally_pruned: - # skip the first node in a graph since this is almost always ' - # one of the most sensitive for loss. - # additionally skip any structurally pruned nodes (group convolutions) - # since those already have removed connections - continue + self._optimize_sparsity_update_from_costs(sparsities, nodes_costs, sparsity) + self._optimize_sparsity_update_from_restrictions(sparsities, settings) - costs = node.sparse_costs(balance_perf_loss, perf_rescaler, loss_rescaler) + return sparsities - if costs and costs[0][2] is not None: - nodes_costs[node.node_id] = costs + def _optimize_sparsity_get_costs( + self, balance_perf_loss: float + ) -> List[_PruningNodeSeriesValue]: + nodes_costs = [] - if not nodes_costs: - return sparsities + for index, node in enumerate(self._nodes): + costs = node.optimization_costs( + balance_perf_loss, + self._perf_rescaler, + self._loss_rescaler, + ) + + # make sure we have sensitivity data for the node to add for consideration + if costs and costs[-1].value is not None: + nodes_costs.extend(costs) - nodes_costs_indices = {node_id: 0 for node_id in nodes_costs.keys()} - available_steps = len(nodes_costs) * len(costs) - num_optim_steps = round(available_steps * sparsity) + return nodes_costs - for step in range(num_optim_steps): - smallest_id = None - smallest_cost = None + def _optimize_sparsity_update_from_costs( + self, + sparsities: Dict[str, Union[float, None]], + costs: List[_PruningNodeSeriesValue], + sparsity: float, + ): + # all costs are assumed to be on the same scale across layers, + # normalized, and always increasing. + # therefore we can simply sort by the values to get the desired sparsity dist + costs.sort(key=lambda c: c.value) + total_params = sum([node.num_params for node in self._nodes]) + target_sparse_params = sparsity * total_params + sparse_params = defaultdict(lambda: 0) + + for index, cost in enumerate(costs): + sparse_params[cost.node_id] = cost.sparse_params + current_sparse_params = sum(sparse_params.values()) + + if current_sparse_params > target_sparse_params: + break - for node_id, cost_index in nodes_costs_indices.items(): - _, cost_sparsity, cost = nodes_costs[node_id][cost_index + 1] + # if we're not above our sparse param target, set the sparsity for the node + sparsities[cost.node_id] = cost.sparsity - if cost_sparsity < PruningModelEvaluator.MAX_NODE_SPARSITY and ( - smallest_cost is None or cost < smallest_cost - ): - smallest_id = node_id - smallest_cost = cost + def _optimize_sparsity_update_from_restrictions( + self, + sparsities: Dict[str, Union[float, None]], + settings: Union[None, PruningSettings], + ): + for index, node in enumerate(self._nodes): + node_id = node.node_id + sparsity = sparsities[node_id] - if smallest_id is None: - break + if sparsity is None: + continue + + # clip the max sparsity for everything + if sparsity > PruningModelEvaluator.MAX_NODE_SPARSITY: + sparsities[node_id] = PruningModelEvaluator.MAX_NODE_SPARSITY + + # if there aren't any pruning settings provided, then don't filter + # the desired sparsities + if settings is None: + continue - nodes_costs_indices[smallest_id] += 1 + if index == 0 or node.structurally_pruned: + # skip the first node in a graph since this is almost always ' + # one of the most sensitive for loss. + # additionally skip any structurally pruned nodes (group convolutions) + # since those already have removed connections + sparsities[node.node_id] = None + continue - for node_id, cost_index in nodes_costs_indices.items(): - sparsities[node_id] = nodes_costs[node_id][cost_index][1] + # if the desired sparsity is too low for any of the filters metrics + # (minimum sparsity, minimum perf gain, minimum recovery) + # then set to None so we don't prune the layer that didn't reach + # a high enough sparsity for the desired effects + baseline_sparsity = self._nodes_settings[node_id].baseline_sparsity + est_perf_gain = node.available_series_perf.estimated_gain(sparsity) + est_recovery = node.recovery(sparsity, baseline_sparsity) - return sparsities
    + if ( + ( + settings.filter_min_sparsity + and sparsities[node.node_id] < settings.filter_min_sparsity + ) + or ( + settings.filter_min_perf_gain + and est_perf_gain is not None + and est_perf_gain < settings.filter_min_perf_gain + ) + or ( + settings.filter_min_recovery + and est_recovery is not None + and est_recovery < settings.filter_min_recovery + ) + ): + sparsities[node_id] = None
    diff --git a/sparsify/_modules/sparsify/log.html b/sparsify/_modules/sparsify/log.html index 5612d2dfa4b..48900613306 100644 --- a/sparsify/_modules/sparsify/log.html +++ b/sparsify/_modules/sparsify/log.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/models/base.html b/sparsify/_modules/sparsify/models/base.html index 11b20f45689..7b18b70800b 100644 --- a/sparsify/_modules/sparsify/models/base.html +++ b/sparsify/_modules/sparsify/models/base.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    @@ -265,7 +266,13 @@

    Source code for sparsify.models.base

     
         class Meta(object):
             database = database
    -        storage = storage
    + storage=storage + +
    [docs] def refresh(self): + """ + Refresh the data for the model instance from the DB + """ + return type(self).get_by_id(self._pk)
    [docs]class BaseCreatedModifiedModel(BaseModel): diff --git a/sparsify/_modules/sparsify/models/jobs.html b/sparsify/_modules/sparsify/models/jobs.html index ca01d5fe8ae..e3b8c3fab24 100644 --- a/sparsify/_modules/sparsify/models/jobs.html +++ b/sparsify/_modules/sparsify/models/jobs.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    @@ -197,7 +198,7 @@

    Source code for sparsify.models.jobs

     import uuid
     from enum import Enum
     
    -from peewee import CharField, DateTimeField, Field, TextField
    +from peewee import BooleanField, CharField, DateTimeField, Field, TextField
     from playhouse.sqlite_ext import JSONField
     
     from sparsify.models.base import BaseModel
    @@ -247,6 +248,7 @@ 

    Source code for sparsify.models.jobs

         modified = DateTimeField(default=datetime.datetime.now)
         type_ = CharField()
         worker_args = JSONField(null=True, default=None)
    +    worker_ack = BooleanField(default=False)
         status = JobStatusField(default=JobStatus.pending)
         progress = JSONField(null=True, default=None)
         error = TextField(null=True)
    diff --git a/sparsify/_modules/sparsify/models/projects.html b/sparsify/_modules/sparsify/models/projects.html
    index 8d4f82c23a5..202ae300edb 100644
    --- a/sparsify/_modules/sparsify/models/projects.html
    +++ b/sparsify/_modules/sparsify/models/projects.html
    @@ -104,10 +104,11 @@
     
    -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/models/projects_benchmark.html b/sparsify/_modules/sparsify/models/projects_benchmark.html index 39c04fca8bd..84de54753ea 100644 --- a/sparsify/_modules/sparsify/models/projects_benchmark.html +++ b/sparsify/_modules/sparsify/models/projects_benchmark.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/models/projects_data.html b/sparsify/_modules/sparsify/models/projects_data.html index 7449add7bfc..b00adec9331 100644 --- a/sparsify/_modules/sparsify/models/projects_data.html +++ b/sparsify/_modules/sparsify/models/projects_data.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/models/projects_model.html b/sparsify/_modules/sparsify/models/projects_model.html index c04483d29ec..135a108b741 100644 --- a/sparsify/_modules/sparsify/models/projects_model.html +++ b/sparsify/_modules/sparsify/models/projects_model.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/models/projects_optimizations.html b/sparsify/_modules/sparsify/models/projects_optimizations.html index 5e9963da907..cb36247b05b 100644 --- a/sparsify/_modules/sparsify/models/projects_optimizations.html +++ b/sparsify/_modules/sparsify/models/projects_optimizations.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/models/projects_profiles.html b/sparsify/_modules/sparsify/models/projects_profiles.html index f979679b87c..515aec45168 100644 --- a/sparsify/_modules/sparsify/models/projects_profiles.html +++ b/sparsify/_modules/sparsify/models/projects_profiles.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/models/utils.html b/sparsify/_modules/sparsify/models/utils.html index 3e1a4aed630..8fb24c0990e 100644 --- a/sparsify/_modules/sparsify/models/utils.html +++ b/sparsify/_modules/sparsify/models/utils.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/schemas/errors.html b/sparsify/_modules/sparsify/schemas/errors.html index da9126a4420..d7af42bca68 100644 --- a/sparsify/_modules/sparsify/schemas/errors.html +++ b/sparsify/_modules/sparsify/schemas/errors.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/schemas/helpers.html b/sparsify/_modules/sparsify/schemas/helpers.html index 245dd98149b..d7c797fa882 100644 --- a/sparsify/_modules/sparsify/schemas/helpers.html +++ b/sparsify/_modules/sparsify/schemas/helpers.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/schemas/jobs.html b/sparsify/_modules/sparsify/schemas/jobs.html index c58271bf111..7b690582215 100644 --- a/sparsify/_modules/sparsify/schemas/jobs.html +++ b/sparsify/_modules/sparsify/schemas/jobs.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/schemas/model_repo.html b/sparsify/_modules/sparsify/schemas/model_repo.html index d16d4041168..ca210f5a83f 100644 --- a/sparsify/_modules/sparsify/schemas/model_repo.html +++ b/sparsify/_modules/sparsify/schemas/model_repo.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/schemas/projects.html b/sparsify/_modules/sparsify/schemas/projects.html index 54d1d73d819..91a2e1fdcef 100644 --- a/sparsify/_modules/sparsify/schemas/projects.html +++ b/sparsify/_modules/sparsify/schemas/projects.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/schemas/projects_benchmarks.html b/sparsify/_modules/sparsify/schemas/projects_benchmarks.html index 8eca52664e9..496a90f7202 100644 --- a/sparsify/_modules/sparsify/schemas/projects_benchmarks.html +++ b/sparsify/_modules/sparsify/schemas/projects_benchmarks.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/schemas/projects_data.html b/sparsify/_modules/sparsify/schemas/projects_data.html index 7da5ae068d5..31cb38429fe 100644 --- a/sparsify/_modules/sparsify/schemas/projects_data.html +++ b/sparsify/_modules/sparsify/schemas/projects_data.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/schemas/projects_model.html b/sparsify/_modules/sparsify/schemas/projects_model.html index d22608f6d7a..cedd303d456 100644 --- a/sparsify/_modules/sparsify/schemas/projects_model.html +++ b/sparsify/_modules/sparsify/schemas/projects_model.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/schemas/projects_optimizations.html b/sparsify/_modules/sparsify/schemas/projects_optimizations.html index 6eb7f668a83..248111a8960 100644 --- a/sparsify/_modules/sparsify/schemas/projects_optimizations.html +++ b/sparsify/_modules/sparsify/schemas/projects_optimizations.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/schemas/projects_profiles.html b/sparsify/_modules/sparsify/schemas/projects_profiles.html index 22f501bb1f4..46eea9c08c6 100644 --- a/sparsify/_modules/sparsify/schemas/projects_profiles.html +++ b/sparsify/_modules/sparsify/schemas/projects_profiles.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/schemas/system.html b/sparsify/_modules/sparsify/schemas/system.html index 6b47432a5d1..8e0415a307c 100644 --- a/sparsify/_modules/sparsify/schemas/system.html +++ b/sparsify/_modules/sparsify/schemas/system.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/_modules/sparsify/utils/system.html b/sparsify/_modules/sparsify/utils/system.html index 10d95ef9ce8..904afc67cd7 100644 --- a/sparsify/_modules/sparsify/utils/system.html +++ b/sparsify/_modules/sparsify/utils/system.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    @@ -267,8 +268,12 @@

    Source code for sparsify.utils.system

         :return: a dictionary containing the version information of sparseml,
             deepsparse, onnxruntime, and onnx if installed.
         """
    -    sparseml_version = _get_package_version("sparseml")
    -    deepsparse_version = _get_package_version("deepsparse")
    +    sparseml_version = _get_package_version("sparseml") or _get_package_version(
    +        "sparseml-nightly"
    +    )
    +    deepsparse_version = _get_package_version("deepsparse") or _get_package_version(
    +        "deepsparse-nightly"
    +    )
         onnxruntime_version = _get_package_version("onnxruntime")
         onnx_version = _get_package_version("onnx")
     
    diff --git a/sparsify/_modules/sparsify/workers/base.html b/sparsify/_modules/sparsify/workers/base.html
    index 36a407eb1f4..f4a41aa305c 100644
    --- a/sparsify/_modules/sparsify/workers/base.html
    +++ b/sparsify/_modules/sparsify/workers/base.html
    @@ -104,10 +104,11 @@
     
    -

    Help and Support

    +

    Help

    @@ -196,10 +197,10 @@

    Source code for sparsify.workers.base

     from typing import Any, Dict, Iterator
     
     
    -__all__ = ["JobWorkerRegistryHolder", "BaseJobWorker"]
    +__all__ = ["JobWorkerRegistry", "JobWorker"]
     
     
    -
    [docs]class JobWorkerRegistryHolder(type): +
    [docs]class JobWorkerRegistry(type): """ Registry class for handling and storing BaseJobWorker sub class instances. All subclasses are added to the the REGISTRY property @@ -211,10 +212,17 @@

    Source code for sparsify.workers.base

             new_cls = type.__new__(cls, name, bases, attrs)
             cls.REGISTRY[new_cls.__name__] = new_cls
     
    -        return new_cls
    + return new_cls +
    [docs] @staticmethod + def create_worker(job): + cls = JobWorkerRegistry.REGISTRY[job.type_] + worker = cls(job.job_id, job.project_id, **job.worker_args) -
    [docs]class BaseJobWorker(object, metaclass=JobWorkerRegistryHolder): + return worker
    + + +
    [docs]class JobWorker(object, metaclass=JobWorkerRegistry): """ The base job worker instance all job workers must extend @@ -222,14 +230,14 @@

    Source code for sparsify.workers.base

         :param project_id: the id of the project the job belongs to
         """
     
    -
    [docs] @classmethod +
    [docs] @classmethod def get_type(cls) -> str: """ :return: the type of job worker """ return cls.__name__
    -
    [docs] @classmethod +
    [docs] @classmethod @abstractmethod def format_args(cls, **kwargs) -> Dict[str, Any]: """ @@ -259,7 +267,7 @@

    Source code for sparsify.workers.base

             """
             return self._project_id
     
    -
    [docs] @abstractmethod +
    [docs] @abstractmethod def run(self) -> Iterator[Dict[str, Any]]: """ Perform the work for the job. diff --git a/sparsify/_modules/sparsify/workers/base_manager.html b/sparsify/_modules/sparsify/workers/base_manager.html deleted file mode 100644 index dcae6baf591..00000000000 --- a/sparsify/_modules/sparsify/workers/base_manager.html +++ /dev/null @@ -1,445 +0,0 @@ - - - - - - - - - - sparsify.workers.base_manager — Sparsify 0.1.0 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - -
    - - - - - -
    - -
    - - - - - - - - - - - - - - - - - - - -
    - -
      - -
    • »
    • - -
    • Module code »
    • - -
    • sparsify.workers.base_manager
    • - - -
    • - -
    • - -
    - - -
    -
    -
    -
    - -

    Source code for sparsify.workers.base_manager

    -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
    -#
    -# Licensed under the Apache License, Version 2.0 (the "License");
    -# you may not use this file except in compliance with the License.
    -# You may obtain a copy of the License at
    -#
    -#    http://www.apache.org/licenses/LICENSE-2.0
    -#
    -# Unless required by applicable law or agreed to in writing,
    -# software distributed under the License is distributed on an "AS IS" BASIS,
    -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -# See the License for the specific language governing permissions and
    -# limitations under the License.
    -
    -"""
    -Code related to managing jobs in the server
    -"""
    -
    -import logging
    -import threading
    -from typing import Union
    -
    -from sparseml.utils import Singleton
    -from sparsify.models import Job, JobStatus, database
    -from sparsify.workers.base import JobWorkerRegistryHolder
    -from sparsify.workers.base_wrapper import JobWorkerWrapper
    -
    -
    -__all__ = ["JobNotFoundError", "JobCancelationFailureError", "JobWorkerManager"]
    -
    -
    -_LOGGER = logging.getLogger(__name__)
    -
    -
    -
    [docs]class JobNotFoundError(Exception): - """ - Error raised if a job is not found in the database - """ - - def __init__(self, *args: object) -> None: - super().__init__(*args)
    - - -
    [docs]class JobCancelationFailureError(Exception): - """ - Error raised if a job could not be canceled - """ - - def __init__(self, *args: object) -> None: - super().__init__(*args)
    - - -
    [docs]class JobWorkerManager(object, metaclass=Singleton): - """ - Manager class for handling running job workers in the background. - Only one job worker can run at once. - Once one completes, the next oldest one marked as pending in the db is launched. - """ - - def __init__(self): - self._lock = threading.Lock() - self._current = None # type: Union[None, JobWorkerWrapper] - -
    [docs] def app_startup(self): - """ - Handle app startup to clear uncompleted state for jobs and begin running - """ - - # cancel any jobs that were left in an uncompleted state - with database.connection_context(): - Job.update(status=JobStatus.canceled).where(Job.status == JobStatus.started) - - self.refresh()
    - -
    [docs] def refresh(self): - """ - Refresh the available jobs. - If a new job is marked as pending and no current job is running, - will start the new job. - - Otherwise will exit out without doing anything and - subsequent jobs will be launched after the current one completes. - """ - refresh_thread = threading.Thread(target=self._refresh_worker) - refresh_thread.start()
    - -
    [docs] def cancel_job(self, job_id: str): - """ - Cancel a job with the given job_id so it won't be run. - Blocks until the job can be canceled. - - :param job_id: the job_id to cancel - :raise JobNotFoundError: if the job could not be found in the database - :raise JobCancelationFailureError: if the job could not be canceled - """ - _LOGGER.info("Canceling job with id {}".format(job_id)) - - with self._lock: - if self._current is not None and self._current.job_id == job_id: - self._current.cancel() - - return - - with database.connection_context(): - job = Job.get_or_none(Job.job_id == job_id) - - if job is None: - _LOGGER.error("Could not find job with id {}".format(job_id)) - - raise JobNotFoundError( - "Could not find job with id {}".format(job_id) - ) - - if ( - job.status == JobStatus.error - or job.status == JobStatus.completed - or job.status == JobStatus.canceled - ): - _LOGGER.error( - "Could not cancel job with status {}".format(job.status) - ) - - raise JobCancelationFailureError( - "Job with status {} cannot be canceled".format(job.status) - ) - - job.status = JobStatus.canceled - job.save()
    - - def _refresh_worker(self): - _LOGGER.info("refreshing JobWorkerManager state") - - with self._lock: - if ( - self._current is not None - and not self._current.completed - and not self._current.canceled - and not self._current.errored - ): - return - - self._current = JobWorkerManager._load_next_pending() - - if self._current is not None: - _LOGGER.info( - ( - "found pending job with job_id {} " - "and project_id {}, starting" - ).format( - self._current.worker.job_id, self._current.worker.project_id - ) - ) - self._current.start(self.refresh) - else: - _LOGGER.info("no pending jobs found") - - @staticmethod - def _load_next_pending() -> Union[None, JobWorkerWrapper]: - _LOGGER.debug("loading next pending job for JobWorkerManager") - err_count = 0 - - while err_count < 5: - try: - worker = JobWorkerManager._load_next_pending_helper() - - return worker - except Exception as err: - _LOGGER.error( - ( - "error while loading next pending job " - "for JobWorkerManager {}" - ).format(err) - ) - err_count += 1 - - @staticmethod - def _load_next_pending_helper() -> Union[None, JobWorkerWrapper]: - with database.connection_context(): - next_job = None # type: Union[None, Job] - query = ( - Job.select() - .where(Job.status == JobStatus.pending) - .order_by(Job.created) - .limit(1) - ) - - for job in query: - next_job = job - break - - if next_job is None: - return None - - try: - if next_job.type_ not in JobWorkerRegistryHolder.REGISTRY: - raise ValueError( - "Cannot find job of type {}".format(next_job.type_) - ) - - cls = JobWorkerRegistryHolder.REGISTRY[next_job.type_] - worker = cls(job.job_id, job.project_id, **job.worker_args) - wrapper = JobWorkerWrapper(worker) - - return wrapper - except Exception as err: - next_job.error = str(err) - next_job.status = JobStatus.error - next_job.save() - - raise err
    -
    - -
    - -
    -
    - -
    - -
    -

    - © Copyright 2021 - present / Neuralmagic, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). - -

    -
    - - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
    -
    -
    - -
    - -
    - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/sparsify/_modules/sparsify/workers/base_wrapper.html b/sparsify/_modules/sparsify/workers/base_wrapper.html deleted file mode 100644 index 3992a473eec..00000000000 --- a/sparsify/_modules/sparsify/workers/base_wrapper.html +++ /dev/null @@ -1,541 +0,0 @@ - - - - - - - - - - sparsify.workers.base_wrapper — Sparsify 0.1.0 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - -
    - - - - - -
    - -
    - - - - - - - - - - - - - - - - - - - -
    - -
      - -
    • »
    • - -
    • Module code »
    • - -
    • sparsify.workers.base_wrapper
    • - - -
    • - -
    • - -
    - - -
    -
    -
    -
    - -

    Source code for sparsify.workers.base_wrapper

    -# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
    -#
    -# Licensed under the Apache License, Version 2.0 (the "License");
    -# you may not use this file except in compliance with the License.
    -# You may obtain a copy of the License at
    -#
    -#    http://www.apache.org/licenses/LICENSE-2.0
    -#
    -# Unless required by applicable law or agreed to in writing,
    -# software distributed under the License is distributed on an "AS IS" BASIS,
    -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -# See the License for the specific language governing permissions and
    -# limitations under the License.
    -
    -"""
    -Code related to wrappers for the job worker to handle running them
    -through the proper flow and update state to the manager and the dataabase.
    -"""
    -
    -import logging
    -import threading
    -import time
    -from typing import Any, Callable, Dict, Union
    -
    -from sparsify.models import Job, JobStatus, database
    -from sparsify.workers.base import BaseJobWorker
    -
    -
    -__all__ = ["JobCancelError", "JobWorkerWrapper"]
    -
    -
    -_LOGGER = logging.getLogger(__name__)
    -
    -
    -
    [docs]class JobCancelError(Exception): - """ - Error raised if a job was canceled - """ - - def __init__(self, *args: object): - super().__init__(*args)
    - - -
    [docs]class JobWorkerWrapper(object): - """ - The wrapper for a job worker to handle running an instance - through the proper flow and update state to the manager and the database. - - :param worker: the worker instance to run - """ - - def __init__(self, worker: BaseJobWorker): - self._worker = worker - self._done_callback = None # type: Union[Callable[[], None], None] - self._lock = threading.Lock() - - self._started = False - self._progress = None - self._progress_time = None - self._completed = False - self._canceling = False - self._canceled = False - self._errored = False - self._error = None - - @property - def job_id(self) -> str: - """ - :return: the job id - """ - return self._worker.job_id - - @property - def worker(self) -> BaseJobWorker: - """ - :return: the worker instance to run - """ - return self._worker - - @property - def started(self) -> bool: - """ - :return: True if start has been called, False otherwise - """ - with self._lock: - return self._started - - @property - def progress(self) -> Union[None, Dict[str, Any]]: - """ - :return: current progress, if any, for the running job worker - """ - with self._lock: - return self._progress - - @property - def completed(self) -> bool: - """ - :return: True if the job is completed, False otherwise - """ - with self._lock: - return self._completed - - @property - def canceling(self) -> bool: - """ - :return: True if the job is being canceled, False otherwise - """ - with self._lock: - return self._canceling - - @property - def canceled(self) -> bool: - """ - :return: True if the job is canceled, False otherwise - """ - with self._lock: - return self._canceled - - @property - def errored(self) -> bool: - """ - :return: True if the job has errored, False otherwise - """ - with self._lock: - return self._errored - - @property - def error(self) -> Union[str, None]: - """ - :return: The error, if any, encountered while running the job worker - """ - with self._lock: - return self._error - -
    [docs] def start(self, done_callback: Callable[[], None]): - """ - Start running the contained job worker in a separate thread - - :param done_callback: the callback to invoke once completed running - """ - _LOGGER.info( - "starting job worker for job_id {} and project_id {}".format( - self._worker.job_id, self._worker.project_id - ) - ) - assert done_callback is not None - - with self._lock: - if self._started: - raise RuntimeError("start can only be called once") - - self._started = True - self._done_callback = done_callback - worker_thread = threading.Thread(target=self._worker_thread) - worker_thread.start() - - _LOGGER.debug( - "started job worker for job_id {} and project_id {}".format( - self._worker.job_id, self._worker.project_id - ) - )
    - -
    [docs] def cancel(self): - """ - Cancel the running job. start must have been called first - """ - _LOGGER.info( - "canceling job worker for job_id {} and project_id {}".format( - self._worker.job_id, self._worker.project_id - ) - ) - - with self._lock: - if self._completed: - return - - self._canceling = True - - # freeze the caller thread until canceled, completed, or error - freeze = True - - while freeze: - # don't hammer the CPU with constant checks - time.sleep(0.01) - - with self._lock: - freeze = not (self._errored or self._canceled or self._completed) - - _LOGGER.debug( - "canceled job worker for job_id {} and project_id {}".format( - self._worker.job_id, self._worker.project_id - ) - )
    - - def _worker_thread(self): - _LOGGER.debug( - "job worker for job_id {} and project_id {} thead init".format( - self._worker.job_id, self._worker.project_id - ) - ) - - with database.connection_context(): - with self._lock: - job = Job.get(Job.job_id == self._worker.job_id) - self._report_started(job) - - canceled = False - error = None - - try: - # initial check to see if job was canceled before it started - if self._should_cancel(): - raise JobCancelError() - - for progress in self._worker.run(): - with self._lock: - if self._should_report_progress(): - self._report_progress(job, progress) - - if self._should_cancel(): - raise JobCancelError() - except JobCancelError: - canceled = True - - _LOGGER.debug( - "cancel job worker for job_id {} and project_id {} received".format( - self._worker.job_id, self._worker.project_id - ) - ) - except Exception as err: - _LOGGER.info( - ( - "job worker for job_id {} and project_id {} " - "encountered error: {}" - ).format(self._worker.job_id, self._worker.project_id, err) - ) - error = err - - with self._lock: - self._start_completed = True - - if canceled: - self._report_canceled(job) - _LOGGER.info( - "canceled job worker for job_id {} and project_id {}".format( - self._worker.job_id, self._worker.project_id - ) - ) - elif error is not None: - self._report_error(job, str(error)) - _LOGGER.info( - "errored job worker for job_id {} and project_id {}".format( - self._worker.job_id, self._worker.project_id - ) - ) - else: - self._report_completed(job) - _LOGGER.info( - "completed job worker for job_id {} and project_id {}".format( - self._worker.job_id, self._worker.project_id - ) - ) - - self._done_callback() - - def _report_started(self, job: Job): - self._started = True - job.status = JobStatus.started - job.save() - - def _should_report_progress(self) -> bool: - # let's not hammer the database, limit progress saves to 10 per second - return self._progress_time is None or time.time() - self._progress_time >= 0.1 - - def _report_progress(self, job: Job, progress: Dict[str, Any]): - self._progress = progress - self._progress_time = time.time() - job.progress = progress - job.save() - - def _report_completed(self, job: Job): - self._completed = True - job.status = JobStatus.completed - job.progress = None - job.save() - - def _should_cancel(self) -> bool: - if self._canceling: - return True - - return not threading.main_thread().is_alive() - - def _report_canceled(self, job: Job): - self._canceled = True - job.status = JobStatus.canceled - job.progress = None - job.save() - - def _report_error(self, job: Job, error: str): - self._errored = True - self._error = error - job.status = JobStatus.error - job.error = error - job.progress = None - job.save()
    -
    - -
    - -
    -
    - -
    - -
    -

    - © Copyright 2021 - present / Neuralmagic, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). - -

    -
    - - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
    -
    -
    - -
    - -
    - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/sparsify/_modules/sparsify/workers/manager.html b/sparsify/_modules/sparsify/workers/manager.html new file mode 100644 index 00000000000..8d7922f1a3d --- /dev/null +++ b/sparsify/_modules/sparsify/workers/manager.html @@ -0,0 +1,554 @@ + + + + + + + + + + sparsify.workers.manager — Sparsify 0.1.0 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + +
    + + + + + +
    + +
    + + + + + + + + + + + + + + + + + + + +
    + +
      + +
    • »
    • + +
    • Module code »
    • + +
    • sparsify.workers.manager
    • + + +
    • + +
    • + +
    + + +
    +
    +
    +
    + +

    Source code for sparsify.workers.manager

    +# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
    +#
    +# Licensed under the Apache License, Version 2.0 (the "License");
    +# you may not use this file except in compliance with the License.
    +# You may obtain a copy of the License at
    +#
    +#    http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing,
    +# software distributed under the License is distributed on an "AS IS" BASIS,
    +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +# See the License for the specific language governing permissions and
    +# limitations under the License.
    +
    +"""
    +Code related to managing jobs in the server
    +"""
    +
    +import logging
    +import threading
    +import time
    +from concurrent.futures import ThreadPoolExecutor
    +from typing import Any, Dict, Generic, TypeVar
    +
    +from sparseml.utils import Singleton
    +from sparsify.models import Job, JobStatus, database
    +from sparsify.workers.base import JobWorker, JobWorkerRegistry
    +
    +
    +__all__ = ["JobNotFoundError", "JobCancelationFailureError", "JobWorkerManager"]
    +
    +
    +_LOGGER = logging.getLogger(__name__)
    +
    +
    +
    [docs]class JobNotFoundError(Exception): + """ + Error raised if a job is not found in the database + """ + + def __init__(self, *args: object) -> None: + super().__init__(*args)
    + + +
    [docs]class JobCancelationFailureError(Exception): + """ + Error raised if a job could not be canceled + """ + + def __init__(self, *args: object) -> None: + super().__init__(*args)
    + + +class JobCancelError(Exception): + """ + Error raised if a job was canceled + """ + + def __init__(self, *args: object): + super().__init__(*args) + + +_T = TypeVar("_T") + + +class _LockedVar(Generic[_T]): + def __init__(self, initial: _T): + self._lock = threading.Lock() + self._value = initial + + def get(self) -> _T: + with self._lock: + return self._value + + def set(self, value: _T): + with self._lock: + self._value = value + + +class _JobExecutionState(object): + def __init__(self): + self._db_canceled_check_time = None + self._db_progress_saved_time = None + + @property + def db_canceled_check_time(self) -> float: + return self._db_canceled_check_time + + @db_canceled_check_time.setter + def db_canceled_check_time(self, value: float): + self._db_canceled_check_time = value + + @property + def db_progress_saved_time(self) -> float: + return self._db_progress_saved_time + + @db_progress_saved_time.setter + def db_progress_saved_time(self, value: float): + self._db_progress_saved_time = value + + +
    [docs]class JobWorkerManager(object, metaclass=Singleton): + """ + Manager class for handling running job workers in the background. + Only one job worker can run at once. + Once one completes, the next oldest one marked as pending in the db is launched. + + :param max_workers: The maximum number of workers to allow the ThreadPoolExecutor + to work with in parallel + """ + + def __init__(self, max_workers: int = 1): + self._pool = ThreadPoolExecutor(max_workers=max_workers) + self._canceled = _LockedVar(False) + self._refresh_lock = threading.Lock() + +
    [docs] def start(self): + """ + Start the JobWorkerManager to begin processing any background jobs present + """ + _LOGGER.info("Starting JobWorkerManager") + self._cancel_pending_jobs() + self.refresh()
    + +
    [docs] def shutdown(self): + """ + Shutdown the JobWorkerManager to stop processing any background jobs + """ + _LOGGER.info("Canceling JobWorkerManager") + self._canceled.set(True) + self._pool.shutdown() + _LOGGER.info("Canceled JobWorkerManager")
    + +
    [docs] @database.connection_context() + def refresh(self): + """ + Refresh the available jobs and put any pending ones since last refresh + onto the ThreadPoolExecutor. + + Otherwise will exit out without doing anything and + subsequent jobs will be launched after the current one completes. + """ + # lock to make sure the queries are safe across threads and jobs are unique + # should be done with a DB transaction, but current setup with + # peewee and the connection pooling does not support transactions + with self._refresh_lock: + _LOGGER.info("Refreshing JobWorkerManager") + query = ( + Job.select() + .where(Job.status == JobStatus.pending and Job.worker_ack == False) + .order_by(Job.created) + ) + job_ids = [job.job_id for job in query] + _LOGGER.info(f"Found {len(job_ids)} pending jobs, adding to threadpool") + + for job in query: + _LOGGER.debug(f"Adding job {job.job_id} to threadpool") + self._pool.submit(self._execute_job, str(job.job_id), self._canceled) + + _LOGGER.debug(f"Updating jobs in db to ack that worker received them") + Job.update(worker_ack=True).where(Job.job_id.in_(job_ids)).execute()
    + +
    [docs] @database.connection_context() + def cancel_job(self, job_id: str): + """ + Cancel a job with the given job_id so it won't be run. + + :param job_id: the job_id to cancel + :raise JobNotFoundError: if the job could not be found in the database + :raise JobCancelationFailureError: if the job could not be canceled + """ + _LOGGER.info("Canceling job {}".format(job_id)) + + try: + _LOGGER.debug(f"Getting job {job_id} from DB") + job = Job.get_or_none(Job.job_id == job_id) + + if job is None: + _LOGGER.error(f"Could not find job {job_id} to cancel") + raise JobNotFoundError() + + if ( + job.status == JobStatus.error + or job.status == JobStatus.completed + or job.status == JobStatus.canceled + ): + _LOGGER.error(f"Could not cancel job {job_id} with status {job.status}") + + raise JobCancelationFailureError( + "Job with status {} cannot be canceled".format(job.status) + ) + + self._update_job_canceled(job) + except (JobNotFoundError, JobCancelationFailureError) as passthrough_err: + raise passthrough_err + except Exception as err: + _LOGGER.warning(f"Error while canceling job {job_id} in db: {err}")
    + + @database.connection_context() + def _cancel_pending_jobs(self): + _LOGGER.debug("Canceling any pending jobs") + query = Job.update( + status=JobStatus.canceled, + error=( + "Job was left in a stranded state and did not complete on last run, " + "canceled on server startup" + ), + ).where(Job.status == JobStatus.started or Job.status == JobStatus.pending) + row_count = query.execute() + + if row_count > 0: + _LOGGER.info(f"Canceled {row_count} stranded jobs") + + @database.connection_context() + def _execute_job(self, job_id: str, canceled: _LockedVar[bool]): + _LOGGER.info(f"Starting job {job_id} in JobWorkerManager") + state = _JobExecutionState() + job = None + + try: + _LOGGER.debug(f"Getting job {job_id} from DB") + job = Job.get(Job.job_id == job_id) + + if self._check_cancel_job(job, canceled, state): + _LOGGER.debug( + f"Job {job_id} cancel requested before starting, canceling" + ) + raise JobCancelError() + + self._update_job_started(job) + + _LOGGER.debug(f"Creating worker for job {job_id}") + worker = JobWorkerRegistry.create_worker(job) # type: JobWorker + + _LOGGER.debug(f"Starting worker run for job {job_id}") + for progress in worker.run(): + if self._check_cancel_job(job, canceled, state): + _LOGGER.debug(f"Job {job_id} cancel requested, canceling") + raise JobCancelError() + + self._update_job_progress(job, state, progress) + + self._update_job_completed(job) + except JobCancelError: + if not job: + raise RuntimeError("job is None after JobCancelError") + + self._update_job_canceled(job) + _LOGGER.info(f"Job {job_id} canceled in JobWorkerManager") + except Exception as err: + # try to update the job in the DB in case the job doesn't exist + # or the job was deleted from the DB + try: + self._update_job_error(job, err) + except Exception as save_err: + _LOGGER.warning( + f"Could not update job state in db to errored " + f"for job {job_id}: {save_err}: for error {err}" + ) + + def _check_cancel_job( + self, job: Job, canceled: _LockedVar[bool], state: _JobExecutionState + ) -> bool: + # cancel if overall system is being shutdown + if canceled.get() or not threading.main_thread().is_alive(): + return True + + # refresh job state at maximum one second intervals to see if job was canceled + if ( + state.db_canceled_check_time + and time.time() - state.db_canceled_check_time < 1.0 + ): + return False + + job = job.refresh() + state.db_canceled_check_time = time.time() + + return job.status == JobStatus.canceled + + def _update_job_started(self, job: Job): + _LOGGER.debug(f"Updating job {job.job_id} to started status") + job.status = JobStatus.started + job.save() + + def _update_job_progress(self, job: Job, state: _JobExecutionState, progress: Dict): + # update the progress max 5 times a second to not hammer the DB + if ( + state.db_progress_saved_time + and time.time() - state.db_progress_saved_time < 0.2 + ): + return + + _LOGGER.debug(f"Job {job.job_id} saving progress to DB") + job.progress = progress + job.save() + state.db_progress_saved_time = time.time() + + def _update_job_completed(self, job: Job): + _LOGGER.debug(f"Job {job.job_id} completed, saving DB state") + job.status = JobStatus.completed + job.progress = None + job.save() + _LOGGER.info(f"Job {job.job_id} completed in JobWorkerManager") + + def _update_job_canceled(self, job: Job): + _LOGGER.debug(f"Job {job.job_id} cancel requested, saving in DB") + job.status = JobStatus.canceled + job.progress = None + job.save() + _LOGGER.info(f"Job {job.job_id} canceled in DB") + + def _update_job_error(self, job: Job, err: Any): + _LOGGER.debug(f"Job {job.job_id} errored, saving to DB") + job.status = JobStatus.canceled + job.error = err + job.progress = None + job.save() + _LOGGER.warning(f"Job {job.job_id} errored out {err}")
    +
    + +
    + +
    +
    + +
    + +
    +

    + © Copyright 2021 - present / Neuralmagic, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). + +

    +
    + + + + Built with Sphinx using a + + theme + + provided by Read the Docs. + +
    +
    +
    + +
    + +
    + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/sparsify/_modules/sparsify/workers/projects_benchmark.html b/sparsify/_modules/sparsify/workers/projects_benchmark.html index 2ff90365db1..7ab044fea2d 100644 --- a/sparsify/_modules/sparsify/workers/projects_benchmark.html +++ b/sparsify/_modules/sparsify/workers/projects_benchmark.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    @@ -215,7 +216,7 @@

    Source code for sparsify.workers.projects_benchmark

    data_dump_and_validation, ) from sparsify.utils import get_ml_sys_info -from sparsify.workers.base import BaseJobWorker +from sparsify.workers.base import JobWorker _LOGGER = logging.getLogger(__name__) @@ -227,7 +228,7 @@

    Source code for sparsify.workers.projects_benchmark

    ORT_GPU_ENGINE = "ort_gpu" -
    [docs]class CreateBenchmarkJobWorker(BaseJobWorker): +
    [docs]class CreateBenchmarkJobWorker(JobWorker): """ A job worker for running and saving a benchmark for a given project and configuration. diff --git a/sparsify/_modules/sparsify/workers/projects_data.html b/sparsify/_modules/sparsify/workers/projects_data.html index 7458380448e..b3af497e8d3 100644 --- a/sparsify/_modules/sparsify/workers/projects_data.html +++ b/sparsify/_modules/sparsify/workers/projects_data.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    @@ -204,7 +205,7 @@

    Source code for sparsify.workers.projects_data

    from sparsify.blueprints.utils import validate_model_data from sparsify.models import ProjectData, ProjectModel from sparsify.schemas import JobProgressSchema -from sparsify.workers.base import BaseJobWorker +from sparsify.workers.base import JobWorker _LOGGER = logging.getLogger(__name__) @@ -212,7 +213,7 @@

    Source code for sparsify.workers.projects_data

    __all__ = ["DataFromPathJobWorker", "DataFromRepoJobWorker"] -class _DataLoaderJobWorker(BaseJobWorker): +class _DataLoaderJobWorker(JobWorker): @classmethod def format_args( cls, data_id: str, uri: str, **kwargs diff --git a/sparsify/_modules/sparsify/workers/projects_model.html b/sparsify/_modules/sparsify/workers/projects_model.html index ec74478590a..8dedf6080f5 100644 --- a/sparsify/_modules/sparsify/workers/projects_model.html +++ b/sparsify/_modules/sparsify/workers/projects_model.html @@ -104,10 +104,11 @@

    -

    Help and Support

    +

    Help

    @@ -203,7 +204,7 @@

    Source code for sparsify.workers.projects_model

    < from sparsezoo.utils import download_file_iter from sparsify.models import ProjectModel from sparsify.schemas import JobProgressSchema -from sparsify.workers.base import BaseJobWorker +from sparsify.workers.base import JobWorker __all__ = ["ModelFromPathJobWorker", "ModelFromRepoJobWorker"] @@ -212,7 +213,7 @@

    Source code for sparsify.workers.projects_model

    < _LOGGER = logging.getLogger(__name__) -class _ModelLoaderJobWorker(BaseJobWorker): +class _ModelLoaderJobWorker(JobWorker): """ A base job worker for retrieving a model from a given uri. diff --git a/sparsify/_modules/sparsify/workers/projects_profiles.html b/sparsify/_modules/sparsify/workers/projects_profiles.html index 5427a66d619..3f41980dc41 100644 --- a/sparsify/_modules/sparsify/workers/projects_profiles.html +++ b/sparsify/_modules/sparsify/workers/projects_profiles.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    @@ -212,7 +213,7 @@

    Source code for sparsify.workers.projects_profiles

    ProjectProfileModelOpsBaselineMeasurementsSchema, ProjectProfileModelOpsMeasurementsSchema, ) -from sparsify.workers.base import BaseJobWorker +from sparsify.workers.base import JobWorker __all__ = [ @@ -224,7 +225,7 @@

    Source code for sparsify.workers.projects_profiles

    _LOGGER = logging.getLogger(__name__) -class BaseProfileJobWorker(BaseJobWorker): +class BaseProfileJobWorker(JobWorker): """ Base job worker for working with profiles for projects diff --git a/sparsify/_sources/api/sparsify.workers.rst.txt b/sparsify/_sources/api/sparsify.workers.rst.txt index f47ae2eae1a..9b1295a3acc 100644 --- a/sparsify/_sources/api/sparsify.workers.rst.txt +++ b/sparsify/_sources/api/sparsify.workers.rst.txt @@ -12,18 +12,10 @@ sparsify.workers.base module :undoc-members: :show-inheritance: -sparsify.workers.base\_manager module -------------------------------------- +sparsify.workers.manager module +------------------------------- -.. automodule:: sparsify.workers.base_manager - :members: - :undoc-members: - :show-inheritance: - -sparsify.workers.base\_wrapper module -------------------------------------- - -.. automodule:: sparsify.workers.base_wrapper +.. automodule:: sparsify.workers.manager :members: :undoc-members: :show-inheritance: diff --git a/sparsify/_sources/index.rst.txt b/sparsify/_sources/index.rst.txt index d2b1aefd3e6..80bca3602a8 100644 --- a/sparsify/_sources/index.rst.txt +++ b/sparsify/_sources/index.rst.txt @@ -17,7 +17,7 @@ Sparsify |version| =================== -Neural network model repository for highly sparse models and optimization recipes +Easy-to-use UI for automatically sparsifying neural networks and creating sparsification recipes for better inference performance and a smaller footprint .. raw:: html @@ -48,40 +48,52 @@ Neural network model repository for highly sparse models and optimization recipe Overview ======== -Sparsify is a deep learning autoML tool that simplifies the model optimization process to rapidly achieve the best combination of size, speed, and accuracy on any deep learning model. Sparsify optimizes and benchmarks models informed by industry research insights for ML practitioners, including ML engineers and operators, who need to deploy performant deep learning models fast and at scale. Sparsify shows visual performance potential for your model, including a sliding scale between performance and loss sensitivity, ultimately speeding up the model optimization process from weeks to minutes. +Sparsify is an easy-to-use UI tool that simplifies the deep learning model optimization process to rapidly achieve the best combination of size, speed, and accuracy. +Sparsify sparsifies and benchmarks models informed by industry research insights for ML practitioners, including ML engineers and operators, who need to deploy performant deep learning models fast and at scale. +Sparsify shows visual performance potential for your model, including a sliding scale between performance and recovery, ultimately speeding up the model sparsification process from weeks to minutes. -This repository contains the package to locally launch Sparsify where you can create projects to load and optimize your deep learning models. At the end, you can export optimization recipes to integrate with your training workflow. +`This repository `_ contains the package to locally launch Sparsify where you can create projects to load and sparsify your deep learning models. +At the end, you can export sparsification recipes to integrate with your training workflow. -Related Products -================ +Sparsification +============== -- `DeepSparse `_: - CPU inference engine that delivers unprecedented performance for sparse models -- `SparseZoo `_: - Neural network model repository for highly sparse models and optimization recipes -- `SparseML `_: - Libraries for state-of-the-art deep neural network optimization algorithms, - enabling simple pipelines integration with a few lines of code +Sparsification is the process of taking a trained deep learning model and removing redundant information from the overprecise and over-parameterized network resulting in a faster and smaller model. +Techniques for sparsification are all encompassing including everything from inducing sparsity using `pruning `_ and `quantization `_ to enabling naturally occurring sparsity using `activation sparsity `_ or `winograd/FFT `_. +When implemented correctly, these techniques result in significantly more performant and smaller models with limited to no effect on the baseline metrics. +For example, pruning plus quantization can give over `7x improvements in performance `_ while recovering to nearly the same baseline accuracy. + +The Deep Sparse product suite builds on top of sparsification enabling you to easily apply the techniques to your datasets and models using recipe-driven approaches. +Recipes encode the directions for how to sparsify a model into a simple, easily editable format. +- Download a sparsification recipe and sparsified model from the `SparseZoo `_. +- Alternatively, create a recipe for your model using `Sparsify `_. +- Apply your recipe with only a few lines of code using `SparseML `_. +- Finally, for GPU-level performance on CPUs, deploy your sparse-quantized model with the `DeepSparse Engine `_. + + +**Full Deep Sparse product flow:** + + Resources and Learning More =========================== -- `DeepSparse Documentation `_ -- `SparseZoo Documentation `_ -- `SparseML Documentation `_ -- `Neural Magic Blog `_, - `Resources `_, - `Website `_ +- `DeepSparse Documentation `_ +- `SparseZoo Documentation `_ +- `SparseML Documentation `_ +- `Neural Magic Blog `_, + `Resources `_, + `Website `_ Release History =============== Official builds are hosted on PyPi -- stable: `sparsify `_ -- nightly (dev): `sparsify-nightly `_ +- stable: `sparsify `_ +- nightly (dev): `sparsify-nightly `_ Additionally, more information can be found via -`GitHub Releases `_. +`GitHub Releases `_. .. toctree:: :maxdepth: 3 @@ -98,8 +110,9 @@ Additionally, more information can be found via api/sparsify .. toctree:: - :maxdepth: 2 - :caption: Help and Support + :maxdepth: 3 + :caption: Help Bugs, Feature Requests Support, General Q&A + Neural Magic Docs diff --git a/sparsify/_sources/quicktour.md.txt b/sparsify/_sources/quicktour.md.txt index bbbb2481375..89b73b26b2b 100644 --- a/sparsify/_sources/quicktour.md.txt +++ b/sparsify/_sources/quicktour.md.txt @@ -39,16 +39,18 @@ then you will need to substitute in the proper IP address for that server in pla Additionally, confirm that the networking rules on your server allow for access to port 5543. After visiting `http://0.0.0.0:5543` in a web browser, the home page for Sparsify will load if configured correctly: -![SparseZoo main page Icon](userguide/images/image_1.jpg) -A quick start flow is given below. For a more in-depth read, check out the [Sparsify User Guide](userguide/index). +
    + +A quick start flow is given below. For a more in-depth read, check out [Sparsify documentation](https://docs.neuralmagic.com/sparsify/). ### New Project -To begin optimizing a model, a new project must be created. +To begin sparsifying a model, a new project must be created. The New Project button is located in the lower right of Sparsify's home screen. After clicking, the create project popup will be displayed: -![Sparsify new project popup](userguide/images/image_7.jpg) + +
    Sparsify only accepts [ONNX](https://onnx.ai/) model formats currently. To easily convert to ONNX from common ML frameworks, see the [SparseML repository](https://github.com/neuralmagic/sparseml). @@ -63,40 +65,45 @@ Continue through the popup and fill in information as specified to finish creati ### Analyzing a Model -After model creation, optimization sensitivity analysis for the model are shown under the `Performance Profiles` and `Loss Profiles` in the left navigation. +After model creation, sensitivity analysis for the model are shown under the `Performance Profiles` and `Loss Profiles` in the left navigation. -The profiles will show the effects that different types of optimizations and degrees of those optimizations have on both the models inference speed and the baseline loss. +The profiles will show the effects that different types of algorithms and degrees of those algorithms have on both the models inference speed and the baseline loss. Performance Profiles: -![Sparsify performance profiles](userguide/images/image_14.jpg) + +
    Loss Profiles: -![Sparsify loss profiles](userguide/images/image_20.jpg) + +
    ### Optimizing a Model -Click on the `Optimization` in the left navigation or the `Start Optimizing` button on the analyzing pages to begin optimizing your model. After clicking, the optimization creation popup will be displayed: -![Sparsify loss profiles](userguide/images/image_26.jpg) +Click on the `Optimization` in the left navigation or the `Start Optimizing` button on the analyzing pages to begin sparsifying your model. +After clicking, the sparsification creation popup will be displayed: + +
    Fill in the information as required in the modal. Once completed, Sparsify's autoML algorithms will choose the best settings it can find for optimizing your model. The resulting recipe will be displayed along with estimated metrics for the optimized model. The recipe can then be further edited if desired: -![Sparsify loss profiles](userguide/images/image_28.jpg) + +
    ### Exporting a Recipe -Currently Sparsify is focused on training-aware optimizations; -these allow much better loss recovery for a given target performance. -A future release will enable the option of one-shot optimizations with limited to no retraining. +Currently Sparsify is focused on training-aware methods; these allow much better loss recovery for a given target performance. +A future release will enable the option of one-shot sparsification with limited to no retraining. -Given that the optimization recipe is created with training-aware optimizations, it must be exported for inclusion in your original training pipeline using [SparseML](https://github.com/neuralmagic/sparseml). +Given that the recipe is created with training-aware algorithms, it must be exported for inclusion in your original training pipeline using [SparseML](https://github.com/neuralmagic/sparseml). SparseML enables this inclusion with only a few lines of code for most training workflows. On the optimization page, click the `Export` button in the bottom right. This will open up the export popup: -![Sparsify loss profiles](userguide/images/image_60.jpg) + +
    Select the framework the model was originally trained in on the upper right of the popup. Once selected, either copy or download the recipe for use with SparseML. -In addition, some sample code using SparseML is given to integrate the exported optimization recipe. \ No newline at end of file +In addition, some sample code using SparseML is given to integrate the exported sparsification recipe. diff --git a/sparsify/api/modules.html b/sparsify/api/modules.html index 0fde0a09252..0214c2f4b5f 100644 --- a/sparsify/api/modules.html +++ b/sparsify/api/modules.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    @@ -237,8 +238,7 @@

    sparsifysparsify.workers package -

    Help and Support

    +

    Help

    diff --git a/sparsify/api/sparsify.blueprints.html b/sparsify/api/sparsify.blueprints.html index b49271c2dfc..9ae170f8ea0 100644 --- a/sparsify/api/sparsify.blueprints.html +++ b/sparsify/api/sparsify.blueprints.html @@ -136,10 +136,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/api/sparsify.blueprints.utils.html b/sparsify/api/sparsify.blueprints.utils.html index 32c53b3c5b7..a4c0a972083 100644 --- a/sparsify/api/sparsify.blueprints.utils.html +++ b/sparsify/api/sparsify.blueprints.utils.html @@ -136,10 +136,11 @@ -

    Help and Support

    +

    Help

    @@ -650,7 +651,7 @@

    Submodules
    -MAX_NODE_SPARSITY = 0.975
    +MAX_NODE_SPARSITY = 0.95
    @@ -706,180 +707,6 @@

    Submodules -
    -class sparsify.blueprints.utils.projects_optimizations_pruning.PruningNodeEvaluator(node_id: str, model_analysis: Dict, perf_analysis: Union[None, Dict], loss_analysis: Union[None, Dict])[source]
    -

    Bases: object

    -

    Evaluator for a model’s node for pruning. -Able to estimate the effect of pruning on the node for performance, loss, etc

    -
    -
    Parameters
    -
      -
    • node_id – id of the node to create the evaluator for

    • -
    • model_analysis – analysis of the model

    • -
    • perf_analysis – performance analysis of the model, if any

    • -
    • loss_analysis – loss analysis of the model, if any

    • -
    -
    -
    -
    -
    -eval_dict(sparsity: Optional[float], baseline_sparsity: Optional[float], overridden: bool) → Dict[str, Any][source]
    -
    - -
    -
    -property flops
    -

    the flops pruning series for the node

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property loss
    -

    the loss measurements pruning series for the node

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property loss_estimated
    -

    the estimated loss measurements pruning series for the node

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property loss_metric
    -

    the available loss metric, -falls back on estimated loss if loss sensitivity is not available

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property node_id
    -

    id of the node the evaluator is created for

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property params
    -

    the params pruning series for the node

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property performance
    -

    the performance timings pruning series for the node

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property performance_metric
    -

    the available performance metric, -falls back on flops if perf sensitivity is not available

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property prunable_params
    -

    number of prunable params in the node

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -recovery(sparsity: Optional[float], baseline_sparsity: Optional[float]) → Optional[float][source]
    -
    -
    Parameters
    -
      -
    • sparsity – the sparsity to get recovery for

    • -
    • baseline_sparsity – the baseline sparsity to use for recovery

    • -
    -
    -
    Returns
    -

    the estimated confidence of recovery for the given sparsity -as compared to the baseline

    -
    -
    -
    - -
    -
    -sparse_costs(balance_perf_loss: float, perf_rescaler: sparsify.blueprints.utils.projects_optimizations_pruning.ValueRescaler, loss_rescaler: sparsify.blueprints.utils.projects_optimizations_pruning.ValueRescaler) → List[Tuple[str, float, Optional[float]]][source]
    -
    -
    Parameters
    -
      -
    • balance_perf_loss – the weight [0.0, 1.0] for balancing perf vs loss; -0.0 for all performance, 1.0 for all loss

    • -
    • perf_rescaler – rescaler to use to rescale vales for performance -before calculating cost

    • -
    • loss_rescaler – rescaler to use to rescale vales for loss -before calculating cost

    • -
    -
    -
    Returns
    -

    a list of tuples containing the sparsities from 0% to 99% and -their associated cost for pruning the node to that sparsity

    -
    -
    -
    - -
    -
    -property structurally_pruned
    -

    True if the node is structurally pruned (group convolutions), -False otherwise

    -
    -
    Type
    -

    return

    -
    -
    -
    - -

    -
    class sparsify.blueprints.utils.projects_optimizations_pruning.PruningSettings(mask_type, sparsity, balance_perf_loss, filter_min_sparsity, filter_min_perf_gain, filter_min_recovery)
    diff --git a/sparsify/api/sparsify.html b/sparsify/api/sparsify.html index ef5c8446e9c..4dbadd236e7 100644 --- a/sparsify/api/sparsify.html +++ b/sparsify/api/sparsify.html @@ -120,10 +120,11 @@ -

    Help and Support

    +

    Help

    @@ -274,8 +275,7 @@

    Subpackagessparsify.workers package -

    Help and Support

    +

    Help

    @@ -263,6 +264,12 @@

    Submodulesid = <AutoField: BaseModel.id>

    +
    +
    +refresh()[source]
    +

    Refresh the data for the model instance from the DB

    +
    +
    @@ -446,6 +453,11 @@

    Submodulestype_ = <CharField: Job.type_>

    +
    +
    +worker_ack = <BooleanField: Job.worker_ack>
    +
    +
    worker_args = <JSONField: Job.worker_args>
    diff --git a/sparsify/api/sparsify.schemas.html b/sparsify/api/sparsify.schemas.html index eaacd2dfd1c..08ffd2d704c 100644 --- a/sparsify/api/sparsify.schemas.html +++ b/sparsify/api/sparsify.schemas.html @@ -135,10 +135,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/api/sparsify.utils.html b/sparsify/api/sparsify.utils.html index 0f8c5cda912..bd542b32f33 100644 --- a/sparsify/api/sparsify.utils.html +++ b/sparsify/api/sparsify.utils.html @@ -125,10 +125,11 @@ -

    Help and Support

    +

    Help

    diff --git a/sparsify/api/sparsify.workers.html b/sparsify/api/sparsify.workers.html index d61fa7a5340..385b9e1a824 100644 --- a/sparsify/api/sparsify.workers.html +++ b/sparsify/api/sparsify.workers.html @@ -112,8 +112,7 @@
  • sparsify.workers package
  • -

    Help and Support

    +

    Help

    @@ -212,8 +212,8 @@

    Submodules

    sparsify.workers.base module

    Code related to the base implementations for job workers

    -
    -class sparsify.workers.base.BaseJobWorker(job_id: str, project_id: str)[source]
    +
    +class sparsify.workers.base.JobWorker(job_id: str, project_id: str)[source]

    Bases: object

    The base job worker instance all job workers must extend

    @@ -225,8 +225,8 @@

    Submodules -
    -abstract classmethod format_args(**kwargs) → Dict[str, Any][source]
    +
    +abstract classmethod format_args(**kwargs) → Dict[str, Any][source]

    Format a given args into proper args to be stored for later use in the constructor for the job worker.

    @@ -240,8 +240,8 @@

    Submodules -
    -classmethod get_type() → str[source]
    +
    +classmethod get_type() → str[source]
    Returns

    the type of job worker

    @@ -250,8 +250,8 @@

    Submodules -
    -property job_id
    +
    +property job_id

    the id of the job the worker is being run for

    Type
    @@ -261,8 +261,8 @@

    Submodules -
    -property project_id
    +
    +property project_id

    the id of the project the job belongs to

    Type
    @@ -272,8 +272,8 @@

    Submodules -
    -abstract run() → Iterator[Dict[str, Any]][source]
    +
    +abstract run() → Iterator[Dict[str, Any]][source]

    Perform the work for the job. Must be implemented as an iterator that returns a dictionary containing the progress object on each progress step.

    @@ -287,216 +287,90 @@

    Submodules -
    -class sparsify.workers.base.JobWorkerRegistryHolder(name, bases, attrs)[source]
    +
    +class sparsify.workers.base.JobWorkerRegistry(name, bases, attrs)[source]

    Bases: type

    Registry class for handling and storing BaseJobWorker sub class instances. All subclasses are added to the the REGISTRY property

    -
    -REGISTRY = {'BaseJobWorker': <class 'sparsify.workers.base.BaseJobWorker'>, 'BaseProfileJobWorker': <class 'sparsify.workers.projects_profiles.BaseProfileJobWorker'>, 'CreateBenchmarkJobWorker': <class 'sparsify.workers.projects_benchmark.CreateBenchmarkJobWorker'>, 'CreateLossProfileJobWorker': <class 'sparsify.workers.projects_profiles.CreateLossProfileJobWorker'>, 'CreatePerfProfileJobWorker': <class 'sparsify.workers.projects_profiles.CreatePerfProfileJobWorker'>, 'DataFromPathJobWorker': <class 'sparsify.workers.projects_data.DataFromPathJobWorker'>, 'DataFromRepoJobWorker': <class 'sparsify.workers.projects_data.DataFromRepoJobWorker'>, 'ModelFromPathJobWorker': <class 'sparsify.workers.projects_model.ModelFromPathJobWorker'>, 'ModelFromRepoJobWorker': <class 'sparsify.workers.projects_model.ModelFromRepoJobWorker'>, '_DataLoaderJobWorker': <class 'sparsify.workers.projects_data._DataLoaderJobWorker'>, '_ModelLoaderJobWorker': <class 'sparsify.workers.projects_model._ModelLoaderJobWorker'>}
    +
    +REGISTRY = {'BaseProfileJobWorker': <class 'sparsify.workers.projects_profiles.BaseProfileJobWorker'>, 'CreateBenchmarkJobWorker': <class 'sparsify.workers.projects_benchmark.CreateBenchmarkJobWorker'>, 'CreateLossProfileJobWorker': <class 'sparsify.workers.projects_profiles.CreateLossProfileJobWorker'>, 'CreatePerfProfileJobWorker': <class 'sparsify.workers.projects_profiles.CreatePerfProfileJobWorker'>, 'DataFromPathJobWorker': <class 'sparsify.workers.projects_data.DataFromPathJobWorker'>, 'DataFromRepoJobWorker': <class 'sparsify.workers.projects_data.DataFromRepoJobWorker'>, 'JobWorker': <class 'sparsify.workers.base.JobWorker'>, 'ModelFromPathJobWorker': <class 'sparsify.workers.projects_model.ModelFromPathJobWorker'>, 'ModelFromRepoJobWorker': <class 'sparsify.workers.projects_model.ModelFromRepoJobWorker'>, '_DataLoaderJobWorker': <class 'sparsify.workers.projects_data._DataLoaderJobWorker'>, '_ModelLoaderJobWorker': <class 'sparsify.workers.projects_model._ModelLoaderJobWorker'>}
    +
    + +
    +
    +static create_worker(job)[source]

    -
    -

    sparsify.workers.base_manager module

    +
    +

    sparsify.workers.manager module

    Code related to managing jobs in the server

    -
    -exception sparsify.workers.base_manager.JobCancelationFailureError(*args: object)[source]
    +
    +exception sparsify.workers.manager.JobCancelationFailureError(*args: object)[source]

    Bases: Exception

    Error raised if a job could not be canceled

    -
    -exception sparsify.workers.base_manager.JobNotFoundError(*args: object)[source]
    +
    +exception sparsify.workers.manager.JobNotFoundError(*args: object)[source]

    Bases: Exception

    Error raised if a job is not found in the database

    -
    -class sparsify.workers.base_manager.JobWorkerManager(*args, **kwargs)[source]
    +
    +class sparsify.workers.manager.JobWorkerManager(*args, **kwargs)[source]

    Bases: object

    Manager class for handling running job workers in the background. Only one job worker can run at once. Once one completes, the next oldest one marked as pending in the db is launched.

    +
    +
    Parameters
    +

    max_workers – The maximum number of workers to allow the ThreadPoolExecutor +to work with in parallel

    +
    +
    -
    -app_startup()[source]
    -

    Handle app startup to clear uncompleted state for jobs and begin running

    -
    - -
    -
    -cancel_job(job_id: str)[source]
    -

    Cancel a job with the given job_id so it won’t be run. -Blocks until the job can be canceled.

    +
    +cancel_job(job_id: str)[source]
    +

    Cancel a job with the given job_id so it won’t be run.

    Parameters

    job_id – the job_id to cancel

    Raises
    -
    -refresh()[source]
    -

    Refresh the available jobs. -If a new job is marked as pending and no current job is running, -will start the new job.

    +
    +refresh()[source]
    +

    Refresh the available jobs and put any pending ones since last refresh +onto the ThreadPoolExecutor.

    Otherwise will exit out without doing anything and subsequent jobs will be launched after the current one completes.

    -
    - -
    -
    -

    sparsify.workers.base_wrapper module

    -

    Code related to wrappers for the job worker to handle running them -through the proper flow and update state to the manager and the dataabase.

    -
    -
    -exception sparsify.workers.base_wrapper.JobCancelError(*args: object)[source]
    -

    Bases: Exception

    -

    Error raised if a job was canceled

    -
    - -
    -
    -class sparsify.workers.base_wrapper.JobWorkerWrapper(worker: sparsify.workers.base.BaseJobWorker)[source]
    -

    Bases: object

    -

    The wrapper for a job worker to handle running an instance -through the proper flow and update state to the manager and the database.

    -
    -
    Parameters
    -

    worker – the worker instance to run

    -
    -
    -
    -
    -cancel()[source]
    -

    Cancel the running job. start must have been called first

    -
    -
    -
    -property canceled
    -

    True if the job is canceled, False otherwise

    -
    -
    Type
    -

    return

    -
    -
    +
    +shutdown()[source]
    +

    Shutdown the JobWorkerManager to stop processing any background jobs

    -
    -property canceling
    -

    True if the job is being canceled, False otherwise

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property completed
    -

    True if the job is completed, False otherwise

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property error
    -

    The error, if any, encountered while running the job worker

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property errored
    -

    True if the job has errored, False otherwise

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property job_id
    -

    the job id

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property progress
    -

    current progress, if any, for the running job worker

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -start(done_callback: Callable[None])[source]
    -

    Start running the contained job worker in a separate thread

    -
    -
    Parameters
    -

    done_callback – the callback to invoke once completed running

    -
    -
    -
    - -
    -
    -property started
    -

    True if start has been called, False otherwise

    -
    -
    Type
    -

    return

    -
    -
    -
    - -
    -
    -property worker
    -

    the worker instance to run

    -
    -
    Type
    -

    return

    -
    -
    +
    +start()[source]
    +

    Start the JobWorkerManager to begin processing any background jobs present

    @@ -508,7 +382,7 @@

    Submodules
    class sparsify.workers.projects_benchmark.CreateBenchmarkJobWorker(job_id: str, project_id: str, model_id: str, benchmark_id: str, core_counts: List[int], batch_sizes: List[int], instruction_sets: List[str], inference_models: List[Dict[str, Optional[str]]], warmup_iterations_per_check: int, iterations_per_check: int)[source]
    -

    Bases: sparsify.workers.base.BaseJobWorker

    +

    Bases: sparsify.workers.base.JobWorker

    A job worker for running and saving a benchmark for a given project and configuration.

    diff --git a/sparsify/genindex.html b/sparsify/genindex.html index 111fe6c712c..5ebcc01bb0c 100644 --- a/sparsify/genindex.html +++ b/sparsify/genindex.html @@ -104,10 +104,11 @@ -

    Help and Support

    +

    Help

    @@ -216,8 +217,6 @@

    A

    - +
    cv/classification/efficientnet-b0/pytorch/sparseml/imagenet/arch-moderate/optimization.mdcv/classification/efficientnet-b0/pytorch/sparseml/imagenet/arch-moderate?recipe_type=original 76.5% top1 accuracy
    cv/classification/efficientnet-b4/pytorch/sparseml/imagenet/arch-moderate/optimization.mdcv/classification/efficientnet-b4/pytorch/sparseml/imagenet/arch-moderate?recipe_type=original 82.1% top1 accuracy
    cv/classification/inception_v3/pytorch/sparseml/imagenet/pruned-conservative/optimization.mdcv/classification/inception_v3/pytorch/sparseml/imagenet/pruned-conservative?recipe_type=original 77.4% top1 accuracy
    cv/classification/inception_v3/pytorch/sparseml/imagenet/pruned-moderate/optimization.mdcv/classification/inception_v3/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original 76.6% top1 accuracy
    cv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/base-none/optimization.mdcv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/base-none?recipe_type=original 70.9% top1 accuracy
    cv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned-conservative/optimization.mdcv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned-conservative?recipe_type=original 70.9% top1 accuracy
    cv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned-moderate/optimization.mdcv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original 70.1% top1 accuracy
    cv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned_quant-moderate/optimization.mdcv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned_quant-moderate?recipe_type=original 70.1% top1 accuracy
    cv/classification/resnet_v1-101/pytorch/sparseml/imagenet/pruned-moderate/optimization.mdcv/classification/mobilenet_v1-1.0/pytorch/sparseml/imagenet/pruned_quant-moderate?recipe_type=original70.1% top1 accuracy
    cv/classification/resnet_v1-101/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original 76.6% top1 accuracy
    cv/classification/resnet_v1-152/pytorch/sparseml/imagenet/pruned-moderate/optimization.mdcv/classification/resnet_v1-152/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original 77.5% top1 accuracy
    cv/classification/resnet_v1-18/pytorch/sparseml/imagenet/sparse-conservative/optimization.mdcv/classification/resnet_v1-18/pytorch/sparseml/imagenet/pruned-conservative?recipe_type=original 69.8% top1 accuracy
    cv/classification/resnet_v1-34/pytorch/sparseml/imagenet/sparse-conservative/optimization.mdcv/classification/resnet_v1-34/pytorch/sparseml/imagenet/pruned-conservative?recipe_type=original 73.3% top1 accuracy
    cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned-conservative/optimization.mdcv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned-conservative?recipe_type=original 76.1% top1 accuracy
    cv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned-moderate/optimization.mdcv/classification/resnet_v1-50/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original 75.3% top1 accuracy
    cv/classification/resnet_v1-50/pytorch/sparseml/imagenet-augmented/pruned_quant-aggressive/optimization.mdcv/classification/resnet_v1-50/pytorch/sparseml/imagenet-augmented/pruned_quant-aggressive?recipe_type=original 76.1% top1 accuracy
    cv/classification/resnet_v1-50/pytorch/sparseml/imagenette/pruned-conservative/optimization.mdcv/classification/resnet_v1-50/pytorch/sparseml/imagenette/pruned-conservative?recipe_type=original 99.9% top1 accuracy
    cv/classification/resnet_v1-50/pytorch/torchvision/imagenette/pruned-conservative/optimization.mdcv/classification/resnet_v1-50/pytorch/torchvision/imagenette/pruned-conservative?recipe_type=original 99.9% top1 accuracy
    cv/classification/vgg-11/pytorch/sparseml/imagenet/pruned-moderate/optimization.mdcv/classification/vgg-11/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original 68.3% top1 accuracy
    cv/classification/vgg-16/pytorch/sparseml/imagenet/pruned-conservative/optimization.mdcv/classification/vgg-16/pytorch/sparseml/imagenet/pruned-conservative?recipe_type=original 71.6% top1 accuracy
    cv/classification/vgg-16/pytorch/sparseml/imagenet/pruned-moderate/optimization.mdcv/classification/vgg-16/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original 70.8% top1 accuracy
    cv/classification/vgg-19/pytorch/sparseml/imagenet/pruned-moderate/optimization.mdcv/classification/vgg-19/pytorch/sparseml/imagenet/pruned-moderate?recipe_type=original 71.7% top1 accuracy
    -
  • project_id() (sparsify.workers.base.BaseJobWorker property) +
  • project_id() (sparsify.workers.base.JobWorker property)
  • ProjectAvailableModelModificationsSchema (class in sparsify.schemas.projects_optimizations)
  • @@ -1405,14 +1364,14 @@

    P

  • ProjectOptimization (class in sparsify.models.projects_optimizations)
  • -
    • ProjectOptimizationModifierLRExponentialArgsSchema (class in sparsify.schemas.projects_optimizations)
    • ProjectOptimizationModifierLRMultiStepArgsSchema (class in sparsify.schemas.projects_optimizations) @@ -1472,8 +1431,6 @@

      P

    • ProjectProfileSchema (class in sparsify.schemas.projects_profiles)
    • ProjectSchema (class in sparsify.schemas.projects) -
    • -
    • prunable_params() (sparsify.blueprints.utils.projects_optimizations_pruning.PruningNodeEvaluator property)
    • pruning_end_epoch() (sparsify.blueprints.utils.projects_optimizations.OptimEpochs property)
    • @@ -1506,8 +1463,6 @@

      P

    • pruning_update_frequency() (sparsify.blueprints.utils.projects_optimizations.OptimEpochs property)
    • PruningModelEvaluator (class in sparsify.blueprints.utils.projects_optimizations_pruning) -
    • -
    • PruningNodeEvaluator (class in sparsify.blueprints.utils.projects_optimizations_pruning)
    • PruningSettings (class in sparsify.blueprints.utils.projects_optimizations_pruning)
    • @@ -1553,11 +1508,13 @@

      Q

      R

      + - - - - diff --git a/sparsify/quicktour.html b/sparsify/quicktour.html index 6800c62adbd..cf8c0dcd964 100644 --- a/sparsify/quicktour.html +++ b/sparsify/quicktour.html @@ -112,10 +112,11 @@ -

      Help and Support

      +

      Help

      @@ -215,15 +216,15 @@

      Quick Tour0.0.0.0. Additionally, confirm that the networking rules on your server allow for access to port 5543.

      -

      After visiting http://0.0.0.0:5543 in a web browser, the home page for Sparsify will load if configured correctly: -SparseZoo main page Icon

      -

      A quick start flow is given below. For a more in-depth read, check out the Sparsify User Guide.

      +

      After visiting http://0.0.0.0:5543 in a web browser, the home page for Sparsify will load if configured correctly:

      +


      +

      A quick start flow is given below. For a more in-depth read, check out Sparsify documentation.

      New Project

      -

      To begin optimizing a model, a new project must be created. +

      To begin sparsifying a model, a new project must be created. The New Project button is located in the lower right of Sparsify’s home screen. -After clicking, the create project popup will be displayed: -Sparsify new project popup

      +After clicking, the create project popup will be displayed:

      +


      Sparsify only accepts ONNX model formats currently. To easily convert to ONNX from common ML frameworks, see the SparseML repository.

      To begin creating a project use one of the following flows:

      @@ -236,36 +237,36 @@

      New Project

      Analyzing a Model

      -

      After model creation, optimization sensitivity analysis for the model are shown under the Performance Profiles and Loss Profiles in the left navigation.

      -

      The profiles will show the effects that different types of optimizations and degrees of those optimizations have on both the models inference speed and the baseline loss.

      -

      Performance Profiles: -Sparsify performance profiles

      -

      Loss Profiles: -Sparsify loss profiles

      +

      After model creation, sensitivity analysis for the model are shown under the Performance Profiles and Loss Profiles in the left navigation.

      +

      The profiles will show the effects that different types of algorithms and degrees of those algorithms have on both the models inference speed and the baseline loss.

      +

      Performance Profiles:

      +


      +

      Loss Profiles:

      +


      Optimizing a Model

      -

      Click on the Optimization in the left navigation or the Start Optimizing button on the analyzing pages to begin optimizing your model. After clicking, the optimization creation popup will be displayed: -Sparsify loss profiles

      +

      Click on the Optimization in the left navigation or the Start Optimizing button on the analyzing pages to begin sparsifying your model. +After clicking, the sparsification creation popup will be displayed:

      +


      Fill in the information as required in the modal. Once completed, Sparsify’s autoML algorithms will choose the best settings it can find for optimizing your model. The resulting recipe will be displayed along with estimated metrics for the optimized model. -The recipe can then be further edited if desired: -Sparsify loss profiles

      +The recipe can then be further edited if desired:

      +


      Exporting a Recipe

      -

      Currently Sparsify is focused on training-aware optimizations; -these allow much better loss recovery for a given target performance. -A future release will enable the option of one-shot optimizations with limited to no retraining.

      -

      Given that the optimization recipe is created with training-aware optimizations, it must be exported for inclusion in your original training pipeline using SparseML. +

      Currently Sparsify is focused on training-aware methods; these allow much better loss recovery for a given target performance. +A future release will enable the option of one-shot sparsification with limited to no retraining.

      +

      Given that the recipe is created with training-aware algorithms, it must be exported for inclusion in your original training pipeline using SparseML. SparseML enables this inclusion with only a few lines of code for most training workflows.

      On the optimization page, click the Export button in the bottom right. -This will open up the export popup: -Sparsify loss profiles

      +This will open up the export popup:

      +


      Select the framework the model was originally trained in on the upper right of the popup. Once selected, either copy or download the recipe for use with SparseML. -In addition, some sample code using SparseML is given to integrate the exported optimization recipe.

      +In addition, some sample code using SparseML is given to integrate the exported sparsification recipe.

      diff --git a/sparsify/search.html b/sparsify/search.html index 488302c6c12..cebb3d82c76 100644 --- a/sparsify/search.html +++ b/sparsify/search.html @@ -107,10 +107,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/searchindex.js b/sparsify/searchindex.js index ff33cc25f18..63b2d88f1ab 100644 --- a/sparsify/searchindex.js +++ b/sparsify/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["api/modules","api/sparsify","api/sparsify.blueprints","api/sparsify.blueprints.code_samples","api/sparsify.blueprints.utils","api/sparsify.models","api/sparsify.schemas","api/sparsify.utils","api/sparsify.workers","index","installation","quicktour","userguide/01-intro","userguide/02-install-sparsify","userguide/03-sparsify-overview","userguide/04-analyze","userguide/04a-profiling-your-model","userguide/04b-reviewing-performance-profiles","userguide/04c-reviewing-loss-profiles","userguide/05-optimize","userguide/05a-benchmark","userguide/06-integrate","userguide/06a-optimize-config","userguide/07-settings","userguide/08-key-terms","userguide/index"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":3,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["api/modules.rst","api/sparsify.rst","api/sparsify.blueprints.rst","api/sparsify.blueprints.code_samples.rst","api/sparsify.blueprints.utils.rst","api/sparsify.models.rst","api/sparsify.schemas.rst","api/sparsify.utils.rst","api/sparsify.workers.rst","index.rst","installation.md","quicktour.md","userguide/01-intro.md","userguide/02-install-sparsify.md","userguide/03-sparsify-overview.md","userguide/04-analyze.md","userguide/04a-profiling-your-model.md","userguide/04b-reviewing-performance-profiles.md","userguide/04c-reviewing-loss-profiles.md","userguide/05-optimize.md","userguide/05a-benchmark.md","userguide/06-integrate.md","userguide/06a-optimize-config.md","userguide/07-settings.md","userguide/08-key-terms.md","userguide/index.rst"],objects:{"":{sparsify:[1,0,0,"-"]},"sparsify.app":{main:[1,1,1,""],run:[1,1,1,""]},"sparsify.blueprints":{code_samples:[3,0,0,"-"],errors:[2,0,0,"-"],jobs:[2,0,0,"-"],model_repo:[2,0,0,"-"],projects:[2,0,0,"-"],projects_benchmarks:[2,0,0,"-"],projects_data:[2,0,0,"-"],projects_model:[2,0,0,"-"],projects_optimizations:[2,0,0,"-"],projects_profiles:[2,0,0,"-"],system:[2,0,0,"-"],ui:[2,0,0,"-"],utils:[4,0,0,"-"]},"sparsify.blueprints.code_samples":{pytorch__training:[3,0,0,"-"]},"sparsify.blueprints.code_samples.pytorch__training":{train:[3,1,1,""],train_setup:[3,1,1,""]},"sparsify.blueprints.utils":{helpers:[4,0,0,"-"],projects:[4,0,0,"-"],projects_benchmark:[4,0,0,"-"],projects_data:[4,0,0,"-"],projects_optimizations:[4,0,0,"-"],projects_optimizations_pruning:[4,0,0,"-"]},"sparsify.blueprints.utils.helpers":{HTTPNotFoundError:[4,2,1,""]},"sparsify.blueprints.utils.projects":{get_project_by_id:[4,1,1,""],get_project_model_by_project_id:[4,1,1,""]},"sparsify.blueprints.utils.projects_benchmark":{get_project_benchmark_by_ids:[4,1,1,""]},"sparsify.blueprints.utils.projects_data":{get_project_data_by_ids:[4,1,1,""],validate_model_data:[4,1,1,""]},"sparsify.blueprints.utils.projects_optimizations":{OptimEpochs:[4,3,1,""],create_config:[4,1,1,""],default_epochs_distribution:[4,1,1,""],default_pruning_settings:[4,1,1,""],get_profiles_by_id:[4,1,1,""],get_project_optimizer_by_ids:[4,1,1,""],optim_lr_sched_default_mods:[4,1,1,""],optim_lr_sched_updater:[4,1,1,""],optim_pruning_updater:[4,1,1,""],optim_trainable_default_nodes:[4,1,1,""],optim_trainable_updater:[4,1,1,""],optim_updater:[4,1,1,""],optim_validate_and_get_project_by_id:[4,1,1,""],sparse_training_available:[4,1,1,""],validate_pruning_nodes:[4,1,1,""]},"sparsify.blueprints.utils.projects_optimizations.OptimEpochs":{end_epoch:[4,4,1,""],fine_tuning_epochs:[4,4,1,""],fine_tuning_start_epoch:[4,4,1,""],pruning_end_epoch:[4,4,1,""],pruning_epochs:[4,4,1,""],pruning_start_epoch:[4,4,1,""],pruning_update_frequency:[4,4,1,""],stabilization_epochs:[4,4,1,""],start_epoch:[4,4,1,""],training_epochs:[4,4,1,""]},"sparsify.blueprints.utils.projects_optimizations_pruning":{PruningModelEvaluator:[4,3,1,""],PruningNodeEvaluator:[4,3,1,""],PruningSettings:[4,3,1,""]},"sparsify.blueprints.utils.projects_optimizations_pruning.PruningModelEvaluator":{EVAL_SENSITIVITY_SPARSITY:[4,5,1,""],MAX_NODE_SPARSITY:[4,5,1,""],apply_node_overrides:[4,4,1,""],eval_baseline:[4,4,1,""],eval_pruning:[4,4,1,""],to_dict_values:[4,4,1,""]},"sparsify.blueprints.utils.projects_optimizations_pruning.PruningNodeEvaluator":{eval_dict:[4,4,1,""],flops:[4,4,1,""],loss:[4,4,1,""],loss_estimated:[4,4,1,""],loss_metric:[4,4,1,""],node_id:[4,4,1,""],params:[4,4,1,""],performance:[4,4,1,""],performance_metric:[4,4,1,""],prunable_params:[4,4,1,""],recovery:[4,4,1,""],sparse_costs:[4,4,1,""],structurally_pruned:[4,4,1,""]},"sparsify.blueprints.utils.projects_optimizations_pruning.PruningSettings":{balance_perf_loss:[4,4,1,""],filter_min_perf_gain:[4,4,1,""],filter_min_recovery:[4,4,1,""],filter_min_sparsity:[4,4,1,""],mask_type:[4,4,1,""],sparsity:[4,4,1,""]},"sparsify.log":{get_main_logger:[1,1,1,""],get_root_logger:[1,1,1,""],set_logging_level:[1,1,1,""]},"sparsify.models":{base:[5,0,0,"-"],jobs:[5,0,0,"-"],projects:[5,0,0,"-"],projects_benchmark:[5,0,0,"-"],projects_data:[5,0,0,"-"],projects_model:[5,0,0,"-"],projects_optimizations:[5,0,0,"-"],projects_profiles:[5,0,0,"-"],utils:[5,0,0,"-"]},"sparsify.models.base":{BaseCreatedModifiedModel:[5,3,1,""],BaseModel:[5,3,1,""],CSVField:[5,3,1,""],CSVFloatField:[5,3,1,""],CSVIntField:[5,3,1,""],FileStorage:[5,3,1,""],ListObjField:[5,3,1,""]},"sparsify.models.base.BaseCreatedModifiedModel":{DoesNotExist:[5,5,1,""],created:[5,5,1,""],id:[5,5,1,""],modified:[5,5,1,""],save:[5,4,1,""]},"sparsify.models.base.BaseModel":{DoesNotExist:[5,5,1,""],id:[5,5,1,""]},"sparsify.models.base.CSVField":{db_value:[5,4,1,""],python_value:[5,4,1,""]},"sparsify.models.base.CSVFloatField":{python_value:[5,4,1,""]},"sparsify.models.base.CSVIntField":{python_value:[5,4,1,""]},"sparsify.models.base.FileStorage":{init:[5,4,1,""],root_path:[5,4,1,""]},"sparsify.models.base.ListObjField":{db_value:[5,4,1,""],python_value:[5,4,1,""]},"sparsify.models.jobs":{Job:[5,3,1,""],JobStatus:[5,3,1,""],JobStatusField:[5,3,1,""]},"sparsify.models.jobs.Job":{DoesNotExist:[5,5,1,""],baseprojectprofile_set:[5,5,1,""],created:[5,5,1,""],error:[5,5,1,""],job_id:[5,5,1,""],modified:[5,5,1,""],progress:[5,5,1,""],project_id:[5,5,1,""],projectbenchmark_set:[5,5,1,""],projectdata_set:[5,5,1,""],projectlossprofile_set:[5,5,1,""],projectmodel_set:[5,5,1,""],projectperfprofile_set:[5,5,1,""],save:[5,4,1,""],status:[5,5,1,""],type_:[5,5,1,""],worker_args:[5,5,1,""]},"sparsify.models.jobs.JobStatus":{canceled:[5,5,1,""],canceling:[5,5,1,""],completed:[5,5,1,""],error:[5,5,1,""],pending:[5,5,1,""],started:[5,5,1,""]},"sparsify.models.jobs.JobStatusField":{db_value:[5,4,1,""],field_type:[5,5,1,""],python_value:[5,4,1,""]},"sparsify.models.projects":{BaseProjectModel:[5,3,1,""],Project:[5,3,1,""]},"sparsify.models.projects.BaseProjectModel":{DoesNotExist:[5,5,1,""],delete_filesystem:[5,4,1,""],id:[5,5,1,""],setup_filesystem:[5,4,1,""],validate_filesystem:[5,4,1,""]},"sparsify.models.projects.Project":{DoesNotExist:[5,5,1,""],benchmarks:[5,5,1,""],created:[5,5,1,""],data:[5,5,1,""],delete_filesystem:[5,4,1,""],description:[5,5,1,""],dir_path:[5,4,1,""],dir_size:[5,4,1,""],models:[5,5,1,""],modified:[5,5,1,""],name:[5,5,1,""],optims:[5,5,1,""],profiles_loss:[5,5,1,""],profiles_perf:[5,5,1,""],project_id:[5,5,1,""],save:[5,4,1,""],setup_filesystem:[5,4,1,""],training_epochs:[5,5,1,""],training_lr_final:[5,5,1,""],training_lr_init:[5,5,1,""],training_optimizer:[5,5,1,""],validate_filesystem:[5,4,1,""]},"sparsify.models.projects_benchmark":{ProjectBenchmark:[5,3,1,""]},"sparsify.models.projects_benchmark.ProjectBenchmark":{DoesNotExist:[5,5,1,""],batch_sizes:[5,5,1,""],benchmark_id:[5,5,1,""],core_counts:[5,5,1,""],created:[5,5,1,""],inference_models:[5,5,1,""],instruction_sets:[5,5,1,""],iterations_per_check:[5,5,1,""],job:[5,5,1,""],job_id:[5,5,1,""],modified:[5,5,1,""],name:[5,5,1,""],project:[5,5,1,""],project_id:[5,5,1,""],result:[5,5,1,""],source:[5,5,1,""],warmup_iterations_per_check:[5,5,1,""]},"sparsify.models.projects_data":{ProjectData:[5,3,1,""]},"sparsify.models.projects_data.ProjectData":{DoesNotExist:[5,5,1,""],created:[5,5,1,""],data_id:[5,5,1,""],delete_filesystem:[5,4,1,""],dir_path:[5,4,1,""],file:[5,5,1,""],file_path:[5,4,1,""],job:[5,5,1,""],job_id:[5,5,1,""],project:[5,5,1,""],project_id:[5,5,1,""],setup_filesystem:[5,4,1,""],source:[5,5,1,""],validate_filesystem:[5,4,1,""]},"sparsify.models.projects_model":{ProjectModel:[5,3,1,""]},"sparsify.models.projects_model.ProjectModel":{DoesNotExist:[5,5,1,""],analysis:[5,5,1,""],created:[5,5,1,""],delete_filesystem:[5,4,1,""],dir_path:[5,4,1,""],file:[5,5,1,""],file_path:[5,4,1,""],job:[5,5,1,""],job_id:[5,5,1,""],model_id:[5,5,1,""],project:[5,5,1,""],project_id:[5,5,1,""],setup_filesystem:[5,4,1,""],source:[5,5,1,""],validate_filesystem:[5,4,1,""]},"sparsify.models.projects_optimizations":{ProjectOptimization:[5,3,1,""],ProjectOptimizationModifierLRSchedule:[5,3,1,""],ProjectOptimizationModifierPruning:[5,3,1,""],ProjectOptimizationModifierQuantization:[5,3,1,""],ProjectOptimizationModifierTrainable:[5,3,1,""]},"sparsify.models.projects_optimizations.ProjectOptimization":{DoesNotExist:[5,5,1,""],created:[5,5,1,""],end_epoch:[5,5,1,""],lr_schedule_modifiers:[5,5,1,""],modified:[5,5,1,""],name:[5,5,1,""],notes:[5,5,1,""],optim_id:[5,5,1,""],profile_loss:[5,5,1,""],profile_loss_id:[5,5,1,""],profile_perf:[5,5,1,""],profile_perf_id:[5,5,1,""],project:[5,5,1,""],project_id:[5,5,1,""],pruning_modifiers:[5,5,1,""],quantization_modifiers:[5,5,1,""],start_epoch:[5,5,1,""],trainable_modifiers:[5,5,1,""]},"sparsify.models.projects_optimizations.ProjectOptimizationModifierLRSchedule":{DoesNotExist:[5,5,1,""],created:[5,5,1,""],end_epoch:[5,5,1,""],final_lr:[5,5,1,""],init_lr:[5,5,1,""],lr_mods:[5,5,1,""],modified:[5,5,1,""],modifier_id:[5,5,1,""],optim:[5,5,1,""],optim_id:[5,5,1,""],start_epoch:[5,5,1,""]},"sparsify.models.projects_optimizations.ProjectOptimizationModifierPruning":{DoesNotExist:[5,5,1,""],balance_perf_loss:[5,5,1,""],compression:[5,5,1,""],created:[5,5,1,""],end_epoch:[5,5,1,""],est_loss_sensitivity:[5,5,1,""],est_perf_sensitivity:[5,5,1,""],est_recovery:[5,5,1,""],est_time:[5,5,1,""],est_time_baseline:[5,5,1,""],est_time_gain:[5,5,1,""],filter_min_perf_gain:[5,5,1,""],filter_min_recovery:[5,5,1,""],filter_min_sparsity:[5,5,1,""],flops:[5,5,1,""],flops_baseline:[5,5,1,""],flops_gain:[5,5,1,""],mask_type:[5,5,1,""],modified:[5,5,1,""],modifier_id:[5,5,1,""],nodes:[5,5,1,""],optim:[5,5,1,""],optim_id:[5,5,1,""],params:[5,5,1,""],params_baseline:[5,5,1,""],sparsity:[5,5,1,""],start_epoch:[5,5,1,""],update_frequency:[5,5,1,""]},"sparsify.models.projects_optimizations.ProjectOptimizationModifierQuantization":{DoesNotExist:[5,5,1,""],balance_perf_loss:[5,5,1,""],compression:[5,5,1,""],created:[5,5,1,""],end_epoch:[5,5,1,""],est_loss_sensitivity:[5,5,1,""],est_perf_sensitivity:[5,5,1,""],est_recovery:[5,5,1,""],est_time:[5,5,1,""],est_time_baseline:[5,5,1,""],est_time_gain:[5,5,1,""],filter_min_perf_gain:[5,5,1,""],filter_min_recovery:[5,5,1,""],flops:[5,5,1,""],flops_baseline:[5,5,1,""],flops_gain:[5,5,1,""],level:[5,5,1,""],modified:[5,5,1,""],modifier_id:[5,5,1,""],nodes:[5,5,1,""],optim:[5,5,1,""],optim_id:[5,5,1,""],params:[5,5,1,""],params_baseline:[5,5,1,""],start_epoch:[5,5,1,""]},"sparsify.models.projects_optimizations.ProjectOptimizationModifierTrainable":{DoesNotExist:[5,5,1,""],created:[5,5,1,""],end_epoch:[5,5,1,""],modified:[5,5,1,""],modifier_id:[5,5,1,""],nodes:[5,5,1,""],optim:[5,5,1,""],optim_id:[5,5,1,""],start_epoch:[5,5,1,""]},"sparsify.models.projects_profiles":{BaseProjectProfile:[5,3,1,""],ProjectLossProfile:[5,3,1,""],ProjectPerfProfile:[5,3,1,""]},"sparsify.models.projects_profiles.BaseProjectProfile":{DoesNotExist:[5,5,1,""],analysis:[5,5,1,""],created:[5,5,1,""],job:[5,5,1,""],job_id:[5,5,1,""],name:[5,5,1,""],profile_id:[5,5,1,""],source:[5,5,1,""]},"sparsify.models.projects_profiles.ProjectLossProfile":{DoesNotExist:[5,5,1,""],analysis:[5,5,1,""],created:[5,5,1,""],job:[5,5,1,""],job_id:[5,5,1,""],name:[5,5,1,""],profile_id:[5,5,1,""],project:[5,5,1,""],project_id:[5,5,1,""],projectoptimization_set:[5,5,1,""],pruning_estimation_type:[5,5,1,""],pruning_estimations:[5,5,1,""],pruning_structure:[5,5,1,""],quantized_estimation_type:[5,5,1,""],quantized_estimations:[5,5,1,""],source:[5,5,1,""]},"sparsify.models.projects_profiles.ProjectPerfProfile":{DoesNotExist:[5,5,1,""],analysis:[5,5,1,""],batch_size:[5,5,1,""],core_count:[5,5,1,""],created:[5,5,1,""],instruction_sets:[5,5,1,""],iterations_per_check:[5,5,1,""],job:[5,5,1,""],job_id:[5,5,1,""],name:[5,5,1,""],profile_id:[5,5,1,""],project:[5,5,1,""],project_id:[5,5,1,""],projectoptimization_set:[5,5,1,""],pruning_estimations:[5,5,1,""],quantized_estimations:[5,5,1,""],source:[5,5,1,""],warmup_iterations_per_check:[5,5,1,""]},"sparsify.models.utils":{database_setup:[5,1,1,""]},"sparsify.schemas":{errors:[6,0,0,"-"],helpers:[6,0,0,"-"],jobs:[6,0,0,"-"],model_repo:[6,0,0,"-"],projects:[6,0,0,"-"],projects_benchmarks:[6,0,0,"-"],projects_data:[6,0,0,"-"],projects_model:[6,0,0,"-"],projects_optimizations:[6,0,0,"-"],projects_profiles:[6,0,0,"-"],system:[6,0,0,"-"]},"sparsify.schemas.errors":{ErrorSchema:[6,3,1,""]},"sparsify.schemas.errors.ErrorSchema":{opts:[6,5,1,""]},"sparsify.schemas.helpers":{EnumField:[6,3,1,""],data_dump_and_validation:[6,1,1,""]},"sparsify.schemas.helpers.EnumField":{deserialize:[6,4,1,""]},"sparsify.schemas.jobs":{JobProgressSchema:[6,3,1,""],JobSchema:[6,3,1,""],ResponseJobSchema:[6,3,1,""],ResponseJobsSchema:[6,3,1,""],SearchJobsSchema:[6,3,1,""]},"sparsify.schemas.jobs.JobProgressSchema":{opts:[6,5,1,""]},"sparsify.schemas.jobs.JobSchema":{opts:[6,5,1,""]},"sparsify.schemas.jobs.ResponseJobSchema":{opts:[6,5,1,""]},"sparsify.schemas.jobs.ResponseJobsSchema":{opts:[6,5,1,""]},"sparsify.schemas.jobs.SearchJobsSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo":{ModelRepoArchitectureSchema:[6,3,1,""],ModelRepoDatasetSchema:[6,3,1,""],ModelRepoDomainSchema:[6,3,1,""],ModelRepoModelDescSchema:[6,3,1,""],ModelRepoModelMetricSchema:[6,3,1,""],ModelRepoModelPerfSchema:[6,3,1,""],ModelRepoModelSchema:[6,3,1,""],ResponseModelRepoModels:[6,3,1,""],SearchModelRepoModels:[6,3,1,""]},"sparsify.schemas.model_repo.ModelRepoArchitectureSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ModelRepoDatasetSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ModelRepoDomainSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ModelRepoModelDescSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ModelRepoModelMetricSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ModelRepoModelPerfSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ModelRepoModelSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ResponseModelRepoModels":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.SearchModelRepoModels":{opts:[6,5,1,""]},"sparsify.schemas.projects":{CreateUpdateProjectSchema:[6,3,1,""],DeleteProjectSchema:[6,3,1,""],ProjectExtSchema:[6,3,1,""],ProjectSchema:[6,3,1,""],ResponseProjectDeletedSchema:[6,3,1,""],ResponseProjectExtSchema:[6,3,1,""],ResponseProjectSchema:[6,3,1,""],ResponseProjectsSchema:[6,3,1,""],SearchProjectsSchema:[6,3,1,""]},"sparsify.schemas.projects.CreateUpdateProjectSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.DeleteProjectSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.ProjectExtSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.ProjectSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.ResponseProjectDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.ResponseProjectExtSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.ResponseProjectSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.ResponseProjectsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.SearchProjectsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks":{CreateProjectBenchmarkSchema:[6,3,1,""],ProjectBenchmarkResultSchema:[6,3,1,""],ProjectBenchmarkResultsSchema:[6,3,1,""],ProjectBenchmarkSchema:[6,3,1,""],ResponseProjectBenchmarkDeletedSchema:[6,3,1,""],ResponseProjectBenchmarkSchema:[6,3,1,""],ResponseProjectBenchmarksSchema:[6,3,1,""],SearchProjectBenchmarksSchema:[6,3,1,""]},"sparsify.schemas.projects_benchmarks.CreateProjectBenchmarkSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.ProjectBenchmarkResultSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.ProjectBenchmarkResultsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.ProjectBenchmarkSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.ResponseProjectBenchmarkDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.ResponseProjectBenchmarkSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.ResponseProjectBenchmarksSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.SearchProjectBenchmarksSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data":{CreateUpdateProjectDataSchema:[6,3,1,""],ProjectDataSchema:[6,3,1,""],ResponseProjectDataDeletedSchema:[6,3,1,""],ResponseProjectDataSchema:[6,3,1,""],ResponseProjectDataSingleSchema:[6,3,1,""],SearchProjectDataSchema:[6,3,1,""],SetProjectDataFromSchema:[6,3,1,""]},"sparsify.schemas.projects_data.CreateUpdateProjectDataSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data.ProjectDataSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data.ResponseProjectDataDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data.ResponseProjectDataSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data.ResponseProjectDataSingleSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data.SearchProjectDataSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data.SetProjectDataFromSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model":{CreateUpdateProjectModelSchema:[6,3,1,""],DeleteProjectModelSchema:[6,3,1,""],ProjectModelAnalysisSchema:[6,3,1,""],ProjectModelSchema:[6,3,1,""],ResponseProjectModelAnalysisSchema:[6,3,1,""],ResponseProjectModelDeletedSchema:[6,3,1,""],ResponseProjectModelSchema:[6,3,1,""],SetProjectModelFromSchema:[6,3,1,""]},"sparsify.schemas.projects_model.CreateUpdateProjectModelSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.DeleteProjectModelSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.ProjectModelAnalysisSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.ProjectModelSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.ResponseProjectModelAnalysisSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.ResponseProjectModelDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.ResponseProjectModelSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.SetProjectModelFromSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations":{CreateProjectOptimizationSchema:[6,3,1,""],CreateUpdateProjectOptimizationModifiersLRScheduleSchema:[6,3,1,""],CreateUpdateProjectOptimizationModifiersPruningSchema:[6,3,1,""],CreateUpdateProjectOptimizationModifiersQuantizationSchema:[6,3,1,""],CreateUpdateProjectOptimizationModifiersTrainableSchema:[6,3,1,""],GetProjectOptimizationBestEstimatedResultsSchema:[6,3,1,""],ProjectAvailableModelModificationsSchema:[6,3,1,""],ProjectOptimizationModifierLRExponentialArgsSchema:[6,3,1,""],ProjectOptimizationModifierLRMultiStepArgsSchema:[6,3,1,""],ProjectOptimizationModifierLRScheduleSchema:[6,3,1,""],ProjectOptimizationModifierLRSchema:[6,3,1,""],ProjectOptimizationModifierLRSetArgsSchema:[6,3,1,""],ProjectOptimizationModifierLRStepArgsSchema:[6,3,1,""],ProjectOptimizationModifierPruningNodeSchema:[6,3,1,""],ProjectOptimizationModifierPruningSchema:[6,3,1,""],ProjectOptimizationModifierQuantizationNodeSchema:[6,3,1,""],ProjectOptimizationModifierQuantizationSchema:[6,3,1,""],ProjectOptimizationModifierTrainableNodeSchema:[6,3,1,""],ProjectOptimizationModifierTrainableSchema:[6,3,1,""],ProjectOptimizationSchema:[6,3,1,""],ResponseProjectOptimizationDeletedSchema:[6,3,1,""],ResponseProjectOptimizationFrameworksAvailableSamplesSchema:[6,3,1,""],ResponseProjectOptimizationFrameworksAvailableSchema:[6,3,1,""],ResponseProjectOptimizationModifierDeletedSchema:[6,3,1,""],ResponseProjectOptimizationModifiersAvailable:[6,3,1,""],ResponseProjectOptimizationModifiersBestEstimated:[6,3,1,""],ResponseProjectOptimizationSchema:[6,3,1,""],ResponseProjectOptimizationsSchema:[6,3,1,""],SearchProjectOptimizationsSchema:[6,3,1,""],UpdateProjectOptimizationSchema:[6,3,1,""]},"sparsify.schemas.projects_optimizations.CreateProjectOptimizationSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.CreateUpdateProjectOptimizationModifiersLRScheduleSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.CreateUpdateProjectOptimizationModifiersPruningSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.CreateUpdateProjectOptimizationModifiersQuantizationSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.CreateUpdateProjectOptimizationModifiersTrainableSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.GetProjectOptimizationBestEstimatedResultsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectAvailableModelModificationsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierLRExponentialArgsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierLRMultiStepArgsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierLRScheduleSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierLRSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierLRSetArgsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierLRStepArgsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierPruningNodeSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierPruningSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierQuantizationNodeSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierQuantizationSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierTrainableNodeSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierTrainableSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationFrameworksAvailableSamplesSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationFrameworksAvailableSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationModifierDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationModifiersAvailable":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationModifiersBestEstimated":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.SearchProjectOptimizationsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.UpdateProjectOptimizationSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles":{CreateProjectLossProfileSchema:[6,3,1,""],CreateProjectPerfProfileSchema:[6,3,1,""],ProjectLossProfileSchema:[6,3,1,""],ProjectPerfProfileSchema:[6,3,1,""],ProjectProfileAnalysisSchema:[6,3,1,""],ProjectProfileMeasurementSchema:[6,3,1,""],ProjectProfileMeasurementsSchema:[6,3,1,""],ProjectProfileModelOpsBaselineMeasurementsSchema:[6,3,1,""],ProjectProfileModelOpsMeasurementsSchema:[6,3,1,""],ProjectProfileOpBaselineMeasurementSchema:[6,3,1,""],ProjectProfileOpMeasurementsSchema:[6,3,1,""],ProjectProfileOpSchema:[6,3,1,""],ProjectProfileSchema:[6,3,1,""],ResponseProjectLossProfileSchema:[6,3,1,""],ResponseProjectLossProfilesSchema:[6,3,1,""],ResponseProjectPerfProfileSchema:[6,3,1,""],ResponseProjectPerfProfilesSchema:[6,3,1,""],ResponseProjectProfileDeletedSchema:[6,3,1,""],SearchProjectProfilesSchema:[6,3,1,""]},"sparsify.schemas.projects_profiles.CreateProjectLossProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.CreateProjectPerfProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectLossProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectPerfProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileAnalysisSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileMeasurementSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileMeasurementsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileModelOpsBaselineMeasurementsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileModelOpsMeasurementsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileOpBaselineMeasurementSchema":{dump_fields:[6,5,1,""],fields:[6,5,1,""],load_fields:[6,5,1,""],opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileOpMeasurementsSchema":{dump_fields:[6,5,1,""],fields:[6,5,1,""],load_fields:[6,5,1,""],opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileOpSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ResponseProjectLossProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ResponseProjectLossProfilesSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ResponseProjectPerfProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ResponseProjectPerfProfilesSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ResponseProjectProfileDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.SearchProjectProfilesSchema":{opts:[6,5,1,""]},"sparsify.schemas.system":{ResponseSystemInfo:[6,3,1,""],SystemInfo:[6,3,1,""],VersionInfoSchema:[6,3,1,""]},"sparsify.schemas.system.ResponseSystemInfo":{opts:[6,5,1,""]},"sparsify.schemas.system.SystemInfo":{opts:[6,5,1,""]},"sparsify.schemas.system.VersionInfoSchema":{opts:[6,5,1,""]},"sparsify.utils":{system:[7,0,0,"-"]},"sparsify.utils.system":{available_ml_engines:[7,1,1,""],get_ml_sys_info:[7,1,1,""],ml_engines_errors:[7,1,1,""]},"sparsify.workers":{base:[8,0,0,"-"],base_manager:[8,0,0,"-"],base_wrapper:[8,0,0,"-"],projects_benchmark:[8,0,0,"-"],projects_data:[8,0,0,"-"],projects_model:[8,0,0,"-"],projects_profiles:[8,0,0,"-"]},"sparsify.workers.base":{BaseJobWorker:[8,3,1,""],JobWorkerRegistryHolder:[8,3,1,""]},"sparsify.workers.base.BaseJobWorker":{format_args:[8,4,1,""],get_type:[8,4,1,""],job_id:[8,4,1,""],project_id:[8,4,1,""],run:[8,4,1,""]},"sparsify.workers.base.JobWorkerRegistryHolder":{REGISTRY:[8,5,1,""]},"sparsify.workers.base_manager":{JobCancelationFailureError:[8,2,1,""],JobNotFoundError:[8,2,1,""],JobWorkerManager:[8,3,1,""]},"sparsify.workers.base_manager.JobWorkerManager":{app_startup:[8,4,1,""],cancel_job:[8,4,1,""],refresh:[8,4,1,""]},"sparsify.workers.base_wrapper":{JobCancelError:[8,2,1,""],JobWorkerWrapper:[8,3,1,""]},"sparsify.workers.base_wrapper.JobWorkerWrapper":{cancel:[8,4,1,""],canceled:[8,4,1,""],canceling:[8,4,1,""],completed:[8,4,1,""],error:[8,4,1,""],errored:[8,4,1,""],job_id:[8,4,1,""],progress:[8,4,1,""],start:[8,4,1,""],started:[8,4,1,""],worker:[8,4,1,""]},"sparsify.workers.projects_benchmark":{CreateBenchmarkJobWorker:[8,3,1,""]},"sparsify.workers.projects_benchmark.CreateBenchmarkJobWorker":{batch_sizes:[8,4,1,""],benchmark_id:[8,4,1,""],core_counts:[8,4,1,""],format_args:[8,4,1,""],inference_models:[8,4,1,""],instruction_sets:[8,4,1,""],iterations_per_check:[8,4,1,""],model_id:[8,4,1,""],run:[8,4,1,""],warmup_iterations_per_check:[8,4,1,""]},"sparsify.workers.projects_data":{DataFromPathJobWorker:[8,3,1,""],DataFromRepoJobWorker:[8,3,1,""]},"sparsify.workers.projects_data.DataFromPathJobWorker":{run:[8,4,1,""]},"sparsify.workers.projects_data.DataFromRepoJobWorker":{run:[8,4,1,""]},"sparsify.workers.projects_model":{ModelFromPathJobWorker:[8,3,1,""],ModelFromRepoJobWorker:[8,3,1,""]},"sparsify.workers.projects_model.ModelFromPathJobWorker":{run:[8,4,1,""]},"sparsify.workers.projects_model.ModelFromRepoJobWorker":{run:[8,4,1,""]},"sparsify.workers.projects_profiles":{CreateLossProfileJobWorker:[8,3,1,""],CreatePerfProfileJobWorker:[8,3,1,""]},"sparsify.workers.projects_profiles.CreateLossProfileJobWorker":{format_args:[8,4,1,""],model_id:[8,4,1,""],profile_id:[8,4,1,""],pruning_estimation_type:[8,4,1,""],pruning_estimations:[8,4,1,""],pruning_structure:[8,4,1,""],quantized_estimations:[8,4,1,""],run:[8,4,1,""]},"sparsify.workers.projects_profiles.CreatePerfProfileJobWorker":{batch_size:[8,4,1,""],core_count:[8,4,1,""],format_args:[8,4,1,""],iterations_per_check:[8,4,1,""],pruning_estimations:[8,4,1,""],quantized_estimations:[8,4,1,""],run:[8,4,1,""],warmup_iterations_per_check:[8,4,1,""]},sparsify:{app:[1,0,0,"-"],blueprints:[2,0,0,"-"],log:[1,0,0,"-"],models:[5,0,0,"-"],schemas:[6,0,0,"-"],utils:[7,0,0,"-"],workers:[8,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","function","Python function"],"2":["py","exception","Python exception"],"3":["py","class","Python class"],"4":["py","method","Python method"],"5":["py","attribute","Python attribute"]},objtypes:{"0":"py:module","1":"py:function","2":"py:exception","3":"py:class","4":"py:method","5":"py:attribute"},terms:{"100":4,"404":4,"5543":11,"975":4,"abstract":[5,8],"case":[15,16,19,24],"class":[4,5,6,8],"default":[4,5,11,16,17],"enum":[5,6],"export":[9,14,15,21,22,24,25],"final":[4,12,19],"float":[4,5,16,17,24],"function":[4,5,7],"import":[7,18],"int":[1,3,4,8],"long":[17,24],"new":[4,8,9,14,16,20,25],"null":5,"public":[8,11],"return":[1,4,5,6,7,8,15,16,22],"true":[4,8],"try":[18,22],"while":[6,8,14,17,19,24],Adding:[9,16,25],And:[15,24],For:[11,14,15,16,17,18,19,20,22,24],One:24,Such:7,The:[6,8,11,13,15,16,17,18,19,20,21,22,24],Then:[16,17,18,20,22],There:[18,19],These:[16,19,21],Use:[6,12,14],Used:5,Using:20,Will:4,_dataloaderjobwork:8,_hidden:5,_modelloaderjobwork:8,abl:4,about:[9,13,17,18,25],acceler:12,accept:[11,15,22],access:[8,11,19,23],accordingli:21,accuraci:[9,12,19,24],achiev:[9,12,17,24],across:[17,19,24],activ:[14,19,24],add:[4,17,18,19,20],added:8,addit:[8,11,14,16,19,24,25],addition:[9,11],address:11,adjust:19,advanc:12,affect:[14,15,16,17,18,19,24],after:[4,6,8,11,14,18,19,20,21,22,24],again:[14,20],algorithm:[9,11,14,15],alia:[4,5],all:[1,4,5,6,8,12,16,17,19,20,22,24],allow:[11,23],along:[1,11],alpha:12,alphanumer:15,alreadi:[18,19,20],also:[16,17,18,20,24],alwai:[4,17],amount:24,analysi:[4,5,6,8,11,13,14,15,24],analyz:[9,12,13,19,25],ani:[3,4,6,7,8,9,12,14,15,16,21,23,24],anoth:20,anyth:[6,8],anywher:15,api:[2,9],app:[0,5,6,8,9],app_startup:8,appear:17,appli:[4,14,16,17,19,24],applic:2,apply_node_overrid:4,appropri:[8,20],approxim:[16,18,24],architect:17,architectur:[6,16,18,24],area:[13,17],arg:[4,5,6,8],argument:6,art:9,artifici:15,assign:4,associ:[4,6],attr:[6,8],attribut:[6,24],autofield:5,automat:[4,14,15,19],automl:[9,11],avail:[4,6,7,8,11,13,17,24],available_ml_engin:7,averag:[18,19,20,24],avoid:12,avx2:24,avx512:24,awai:24,awar:11,back:[4,15,16,17],background:8,balanc:[4,19],balance_perf_loss:[4,5],bar:[13,14,15,17,18,23],base:[0,1,4,6,7,15,17,20,21],base_manag:[0,1],base_wrapp:[0,1],basecreatedmodifiedmodel:5,basecreatedmodifiedmodeldoesnotexist:5,basejobwork:8,baselin:[4,6,11,17,18,20,24],baseline_spars:4,basemodel:5,basemodeldoesnotexist:5,baseprofilejobwork:8,baseprojectmodel:5,baseprojectmodeldoesnotexist:5,baseprojectprofil:5,baseprojectprofile_set:5,baseprojectprofiledoesnotexist:5,basi:[17,19],basic:12,batch:[8,15,16,17,20,24],batch_siz:[3,5,8],batchnorm:24,becaus:[16,18],been:[8,24],befor:[4,8,15,24],begin:[8,11,12,14,15,24],being:[8,16],belong:8,below:[11,19,20],benchmark:[1,2,4,5,6,8,9,12,14,16,19,24,25],benchmark_id:[4,5,8],best:[4,6,9,11,12,19],better:[11,17,19,24],between:[9,12,18,19],bit:24,black:13,block:8,block_4:8,blog:9,blueprint:[0,1],bool:[1,4,6,8],booleanfield:5,both:[11,16,17],bottom:11,brows:[11,15],browser:[11,15],bug:[9,12],build:9,busi:[20,22],button:[11,13,19,20,21,23],calcul:4,call:[4,8],callabl:[3,8],callback:8,can:[1,5,8,9,11,14,15,16,17,18,19,20,21,22,23,24,25],cancel:[5,8,15],cancel_job:8,cannot:11,chanc:[18,19,24],chang:[14,15,16,17,19,20,21,23],charact:15,charfield:5,check:[8,10,11,12,20],choic:[5,19],choos:[11,21],chronolog:20,classif:16,classmethod:8,clear:8,click:[11,15,16,17,18,19,20,21,23],clipboard:22,close:[19,24],code:[1,6,8,9,11,14,19,21,24,25],code_sampl:[1,2],collat:5,color:14,column_nam:5,combin:[9,12],come:17,command:[11,13,15],common:[11,16],compar:[4,9,19,24,25],comparison:[8,20],compat:13,complet:[5,8,11,15,16,18,24],compress:[5,12,14,18],comput:[15,17,24],concept:[9,22,23,25],condit:20,confid:[4,18,19,24],config:[4,9,21,25],config_path:3,configur:[8,11,14,15,19,21,22],confirm:11,consid:16,consider:16,consist:[1,17,18,19],consol:11,constant:17,constraint:5,constructor:8,consult:15,contain:[4,5,6,7,8,9,12],content:[0,9],context:6,continu:[11,15,16,17,18,19,20,24],contribut:[15,24],control:[19,22,24],conv:24,convers:17,convert:11,convolut:[4,17,24],copi:[8,11,22],core:[7,8,15,16,17,20,24],core_count:[5,8],correct:5,correctli:[4,11],correl:18,correspond:[18,19,21],cost:[4,24],could:8,count:[8,9,16,17,20,24,25],cpu:[9,16,17,20,24],creat:[1,4,5,6,9,11,13,14,19,20,25],create_config:4,createbenchmarkjobwork:8,createlossprofilejobwork:8,createperfprofilejobwork:8,createprojectbenchmarkschema:6,createprojectlossprofileschema:6,createprojectoptimizationschema:6,createprojectperfprofileschema:6,createupdateprojectdataschema:6,createupdateprojectmodelschema:6,createupdateprojectoptimizationmodifierslrscheduleschema:6,createupdateprojectoptimizationmodifierspruningschema:6,createupdateprojectoptimizationmodifiersquantizationschema:6,createupdateprojectoptimizationmodifierstrainableschema:6,createupdateprojectschema:6,creation:11,criteria:[20,22],csv:5,csvfield:5,csvfloatfield:5,csvintfield:5,current:[4,6,7,8,11,12,14,15,17,18,19,21,24],custom:[6,19],cut:[15,24],data:[2,3,4,5,6,8,15,24],data_dump_and_valid:6,data_id:[4,5,8],data_path:4,dataabas:8,databas:[5,6,8],database_setup:5,datafrompathjobwork:8,datafromrepojobwork:8,dataset:[3,6],date:5,datetimefield:5,db_column:5,db_valu:5,debian:10,debug:1,deep:[9,12],deeper:[13,17],deepspars:[7,9,13,15,17,24],default_epochs_distribut:4,default_pruning_set:4,default_train:4,defin:[17,18,24],degre:11,delet:[5,6,23],delete_filesystem:5,deleteprojectmodelschema:6,deleteprojectschema:6,deliv:9,dens:17,depend:[16,17,20],deploi:[9,12,16],deploy:[12,16],depth:[10,11,24],desc:6,describ:[16,19,20,22],descript:[5,15,21],deseri:6,desir:11,detail:[4,12,13,14,16,19,24],detect:16,determin:[15,16,17,19,20,24],dev:9,devic:3,dialog:[15,16,17,18,19,23],dict:[4,6,7,8],dictionari:[4,6,7,8],did:[18,22,24],differ:[11,17,19,20,22],differenti:15,dir_path:5,dir_siz:5,directori:5,disabl:[16,20],discuss:12,disk:22,displai:[11,15,16,17,19,20,23,24,25],distribut:4,document:[9,11,15],doe:[15,24],doesnotexist:5,doing:[8,20],domain:[6,16],done:[18,19,24],done_callback:8,down:[17,20],download:[8,11],drag:15,dramat:24,drill:17,drop:20,dump:6,dump_field:6,dump_onli:6,duplic:12,dure:[13,14,15,18,19,21,22,24],each:[4,8,14,16,17,18,19,24],easi:[11,12],easili:[11,15],ecosystem:15,edit:[11,19,23],editor:19,effect:[4,11,14,15,16,17,18],effici:17,either:[8,11,15,17,20],emgin:15,emploi:24,empti:15,enabl:[9,11,14,15,17,20,24],encod:22,encount:[4,6,7,8],end:[4,9,15,19,24],end_epoch:[4,5],engin:[7,9,12,13,17,24,25],ensur:13,enter:[11,15,16,17,18,19,23],entir:[17,18],entri:11,enum_class:6,enumer:5,enumfield:6,environ:[10,11,12],epoch:[4,19,24],equat:17,error:[0,1,4,5,7,8,12],errorschema:6,est_loss_sensit:5,est_perf_sensit:5,est_recoveri:5,est_tim:5,est_time_baselin:5,est_time_gain:5,establish:19,estim:[4,6,11,14,15,17,18,19,20,24],etc:[4,7],eval_baselin:4,eval_dict:4,eval_prun:4,eval_sensitivity_spars:4,evalu:4,even:17,event:6,eventu:16,everi:19,exact:18,examin:20,exampl:[12,14,15,16,17,18,19,20,22,24],except:[4,7,8],exchang:[15,24],exclud:6,execut:[16,24],exist:[5,9,12,14,20,21,25],exit:8,expect:[4,5,6,19],experi:[10,12],explor:[21,22],exponenti:6,extend:[5,8],extens:17,extern:1,extract:8,factor:17,fall:4,fals:[4,5,6,8],fast:[9,16,17,24],faster:[14,17,19,24],fastest:17,featur:[9,12,15,22,23,25],fed:24,feedback:[9,19,25],few:[9,11],fewer:[17,18,24],field:[4,5,6,8,11,15],field_nam:6,field_typ:5,file:[1,2,5,6,8,9,11,12,14,15,19,21,25],file_path:5,filestorag:5,fill:11,filter:[6,19],filter_min_perf_gain:[4,5],filter_min_recoveri:[4,5],filter_min_spars:[4,5],final_lr:5,find:11,fine:[4,19],fine_tuning_epoch:4,fine_tuning_start_epoch:4,finish:11,first:8,five:14,fix:20,flask:[2,4,5,6],floatfield:5,flop:[4,5,16,17,19,24],flops_baselin:5,flops_gain:5,flow:[8,11,14,21,22,24],focus:[11,24],folder:5,follow:[11,12,13,15,17,18,19,24],foreignkeyfield:5,format:[1,8,11,15,22],format_arg:8,found:[4,8,9],framework:[4,6,11,12,17,21],frequenc:19,from:[1,4,5,6,8,9,10,11,12,15,16,17,18,19,20,23,24,25],further:[11,15,16,18],futur:[11,14,18,19,24],gemm:24,gener:[4,9,14,16,17,19,21,22,23,24],get:[4,6,13,14,15,17,19,20],get_main_logg:1,get_ml_sys_info:7,get_profiles_by_id:4,get_project_benchmark_by_id:4,get_project_by_id:4,get_project_data_by_id:4,get_project_model_by_project_id:4,get_project_optimizer_by_id:4,get_root_logg:1,get_typ:8,getprojectoptimizationbestestimatedresultsschema:6,github:[9,12],give:19,given:[4,5,8,11,24],global_end_epoch:4,global_start_epoch:4,globalaveragepool:24,goal:[12,14,15,19,21],going:[15,17,24],good:5,grai:16,grain:19,graph:[17,18,19,20,24],greater:[4,19,24],group:4,guid:[9,10,11,15],handl:[1,2,5,6,8],happen:[11,19],has:[8,17,18,24],have:[4,5,8,11,18,20,24],held:17,help:[9,17,19,25],help_text:5,helper:[0,1,2],here:[20,24],higher:[19,24],highli:9,home:11,host:[1,9,11],how:[14,15,16,17,18,19,21,24],howev:19,http:11,httpnotfounderror:4,icon:[17,18,19],identifi:[16,17,18],ids:4,imag:16,implement:[8,24],improv:[12,14],includ:[5,6,7,8,9,12,14,17,18,21,22,23,24],inclus:11,increas:17,increasingli:19,independ:24,index:5,index_typ:5,indic:[14,16,17,18,19,20,24],individu:24,industri:[9,12],infer:[7,8,9,11,14,15,16,17,24,25],inference_engin:8,inference_model:[5,8],inference_model_optim:8,info:[1,4,6,7],inform:[8,9,11,12,14,15,16,17,18,19,20,22,23,24,25],init:5,init_lr:5,initi:[4,5,13,19,21],input:[4,6,16,24],insight:[9,12],instal:[9,11,12,25],instanc:8,instant:19,instead:[4,17,19],instruct:[7,8,15,17,24],instruction_set:[5,8],integ:5,integerfield:5,integr:[9,11,12,13,19,20,22,25],intellig:15,intens:[17,24],interact:11,intern:24,invalid:6,invok:[4,8],involv:14,issu:12,item:[17,24],iter:8,iterations_per_check:[5,8],its:[4,6,11],job:[0,1,8],job_id:[5,8],jobcancelationfailureerror:8,jobcancelerror:8,jobdoesnotexist:5,jobnotfounderror:8,jobprogressschema:6,jobschema:6,jobstatu:5,jobstatusfield:5,jobworkermanag:8,jobworkerregistryhold:8,jobworkerwrapp:8,join:4,json_dump:5,json_load:5,jsonfield:5,just:[15,16],keep:10,kei:[6,9,22,23,25],keyword:6,know:[15,24],kwarg:[5,6,8],lai:19,larg:[5,16],larger:[17,24],last:18,latenc:16,later:[8,19],latest:[12,14,19,24],launch:[8,9,11,25],layer:[9,14,15,16,19,24,25],learn:[5,12,14,22,24,25],least:18,left:[11,13,15,19,23],less:[4,19],level:[1,4,5,19],librari:9,light:12,like:16,limit:[11,17],line:[9,11],linux:10,list:[4,5,7,8,13,14,15,17,18,19,24],listobjfield:5,load:[5,6,9,11,15,17],load_field:6,load_onli:6,loadabl:6,local:[5,8,9,11,15],locat:[5,11],log:[0,9,24],logger:1,logging_level:1,longer:[17,24],look:[15,16,17,24],loss:[2,3,4,5,6,8,9,11,12,14,15,16,17,19,24,25],loss_analysi:4,loss_estim:4,loss_metr:4,loss_rescal:4,losswrapp:3,low:24,lower:[11,22],lr_mod:[4,5],lr_sched:4,lr_schedule_modifi:5,ma_field:6,machin:[11,24],magic:[8,9,12,13,17,24],mai:[5,11,12,16,17,18,20,24],main:[1,11],maintain:12,major:17,make:[2,14,18,19,23,24],manag:[8,24],mani:[6,14,16,18,24],map:6,mark:8,marshmallow:6,mask_typ:[4,5],matter:4,max_node_spars:4,maxim:[4,13],maximum:8,maxpool:24,mean:24,measur:[4,6,8,14,16,17,18,19,20,24],memori:24,mention:[22,23],menu:20,messag:[12,16],metadata:[6,24],metric:[4,6,11,16,20],might:[14,16,17,18,20],millisecond:[17,24],minim:[14,21],minimum:19,minut:9,miss:6,ml_engines_error:7,mod:4,mod_end_epoch:4,mod_start_epoch:4,modal:11,model:[0,1,2,3,4,6,8,9,12,13,14,15,20,21,22,24,25],model_analysi:4,model_id:[5,8],model_path:4,model_repo:[0,1],modelfrompathjobwork:8,modelfromrepojobwork:8,modelrepoarchitectureschema:6,modelrepodatasetschema:6,modelrepodomainschema:6,modelrepomodeldescschema:6,modelrepomodelmetricschema:6,modelrepomodelperfschema:6,modelrepomodelschema:6,modif:19,modifi:[2,4,5,6,14,21,22,24,25],modifier_id:5,modul:[0,9],more:[11,13,17,18,19,24],most:[11,17,18,20],move:[11,19],much:[11,15,16,18,19,24],multi:6,multipl:[5,6,13,15,16,19,20],must:[4,5,8,11,19],name:[4,5,8,15,16,17,18,24],namespac:1,navig:[11,13,14,15,17,18,23],need:[9,11,12,14,15,19,21],nest:5,network:[1,9,11,12,15,18,24],neural:[1,8,9,12,13,15,17,24],next:[8,12,13,14,15,16,17,18,19,20,21,22,23,24],nightli:9,node:[4,5,6,17,24],node_id:4,node_overrid:4,none:[4,5,6,8],note:[5,11,15,16,17,18,19,20],notic:19,npz:8,number:[4,7,8,16,18,19,24],numer:16,object:[4,5,6,8,16],occur:[6,19],offici:9,offlin:16,often:19,oldest:8,onc:[8,11,14,15,19],one:[5,8,11,15,19,24],one_shot:8,onli:[4,5,6,8,11,15,19],onlin:16,onnx:[7,8,11,14,15,17,20,24],onscreen:12,open:[9,11,14,19,24,25],oper:[9,12,16,17,24],ops:[6,24],opt:[6,16],optim:[1,2,3,4,5,6,9,12,13,15,16,17,18,20,21,24,25],optim_const:3,optim_id:[4,5],optim_lr_sched_default_mod:4,optim_lr_sched_updat:4,optim_pruning_updat:4,optim_trainable_default_nod:4,optim_trainable_updat:4,optim_updat:4,optim_validate_and_get_project_by_id:4,optimepoch:4,option:[1,4,5,6,8,11,12,13,14,16,17,19,20],order:[10,20,24],origin:[4,11,15,17,18,19,20,23,24],ort:[17,24],ort_cpu:7,ort_gpu:7,other:[1,17,19,24],otherwis:[4,8,19],out:[8,10,11,15,16,19,24],overrid:[4,5,22],overridden:4,overview:[13,25],own:10,packag:[0,9,11],page:[11,13,15],param:[4,5,9,19,24,25],paramet:[1,4,5,6,8,14,16,18,24],params_baselin:5,part:[15,21,24],partial:6,pass:[6,15,24],path:[5,6,8,11,12,15],peewe:5,pend:[5,8],per:[16,17,19,24],percentag:[17,24],perf:[4,5,6,8],perf_analysi:4,perf_rescal:4,perform:[2,4,5,6,8,9,11,12,13,14,15,16,18,19,24,25],performance_metr:4,perhap:22,pip:[10,13],pipelin:[9,11],place:[11,19],plan:12,platform:21,playhous:5,pleas:12,point:[8,11,16,17,24],pool:24,popup:11,port:[1,11],portion:[1,17],possibl:[17,19,24],post:12,potenti:[7,9,12,14,17],practic:14,practition:[9,12],present:24,preset:19,previou:15,primary_kei:5,problem:17,procedur:15,process:[9,12,19,21,22,24],product:12,profil:[2,4,5,6,8,9,11,14,15,19,24,25],profile_id:[5,8],profile_loss:[4,5],profile_loss_id:[4,5],profile_perf:[4,5],profile_perf_id:[4,5],profiles_loss:5,profiles_perf:5,program:24,progress:[5,6,8],project:[0,1,8,9,14,16,17,18,22,23,24,25],project_data:8,project_id:[4,5,8],projectavailablemodelmodificationsschema:6,projectbenchmark:[4,5],projectbenchmark_set:5,projectbenchmarkdoesnotexist:5,projectbenchmarkresultschema:6,projectbenchmarkresultsschema:6,projectbenchmarkschema:6,projectdata:[4,5],projectdata_set:5,projectdatadoesnotexist:5,projectdataschema:6,projectdoesnotexist:5,projectextschema:6,projectlossprofil:[4,5],projectlossprofile_set:5,projectlossprofiledoesnotexist:5,projectlossprofileschema:6,projectmodel:[4,5],projectmodel_set:5,projectmodelanalysisschema:6,projectmodeldoesnotexist:5,projectmodelschema:6,projectoptim:[4,5],projectoptimization_set:5,projectoptimizationdoesnotexist:5,projectoptimizationmodifierestimationsschema:6,projectoptimizationmodifierlrexponentialargsschema:6,projectoptimizationmodifierlrmultistepargsschema:6,projectoptimizationmodifierlrschedul:[4,5],projectoptimizationmodifierlrscheduledoesnotexist:5,projectoptimizationmodifierlrscheduleschema:6,projectoptimizationmodifierlrschema:6,projectoptimizationmodifierlrsetargsschema:6,projectoptimizationmodifierlrstepargsschema:6,projectoptimizationmodifierprun:[4,5],projectoptimizationmodifierpruningdoesnotexist:5,projectoptimizationmodifierpruningnodemetadataschema:6,projectoptimizationmodifierpruningnodeschema:6,projectoptimizationmodifierpruningschema:6,projectoptimizationmodifierquant:5,projectoptimizationmodifierquantizationdoesnotexist:5,projectoptimizationmodifierquantizationnodeschema:6,projectoptimizationmodifierquantizationschema:6,projectoptimizationmodifiertrain:[4,5],projectoptimizationmodifiertrainabledoesnotexist:5,projectoptimizationmodifiertrainablenodeschema:6,projectoptimizationmodifiertrainableschema:6,projectoptimizationschema:6,projectperfprofil:[4,5],projectperfprofile_set:5,projectperfprofiledoesnotexist:5,projectperfprofileschema:6,projectprofileanalysisschema:6,projectprofilemeasurementschema:6,projectprofilemeasurementsschema:6,projectprofilemodelopsbaselinemeasurementsschema:6,projectprofilemodelopsmeasurementsschema:6,projectprofileopbaselinemeasurementschema:6,projectprofileopmeasurementsschema:6,projectprofileopschema:6,projectprofileschema:6,projects_benchmark:[0,1],projects_data:[0,1],projects_model:[0,1,4],projects_optim:[0,1],projects_optimizations_prun:[1,2],projects_profil:[0,1,4],projectschema:6,proper:[8,11],properli:11,properti:[4,5,8],provid:[4,7,11,12,14,16,17,18,19,22],prunabl:[4,18],prunable_param:4,prune:[4,5,6,8,14,16,17,18,22,24,25],pruning_end_epoch:4,pruning_epoch:4,pruning_estim:[5,8],pruning_estimation_typ:[5,8],pruning_modifi:5,pruning_set:4,pruning_start_epoch:4,pruning_structur:[5,8],pruning_update_frequ:4,pruningmodelevalu:4,pruningnodeevalu:4,pruningset:4,put:14,pypi:9,python:10,python_valu:5,pytorch:[3,15,17,21],pytorch__integr:[1,2],pytorch__train:[1,2],quantiz:[5,6,8,14,18,19,24],quantization_modifi:5,quantized_estim:[5,8],quantized_estimation_typ:5,queri:6,quick:9,quickli:[14,16],rais:[4,6,8],raise_not_found:4,ran:[17,19,20,24],rang:[18,19,24],rapidli:[9,12],rate:[5,22,24,25],rather:[14,15,19,20,24],raw:6,read:11,readi:[16,17,18,19],real:16,recent:20,recip:9,recommend:[4,10],recov:[18,19,22,24],recoveri:[4,11,18,19,24],redistribut:19,reduc:[18,24],reduct:[18,24],redund:18,refer:15,referenc:16,reflect:23,refresh:8,registri:8,rel:[18,19],relat:[2,6,8],releas:[11,12,18],relev:24,relu:24,remot:[11,15],remov:[9,19,24,25],repo:[2,6,8],report:6,repositori:[9,10,11],repres:[17,19],reproduc:12,request:[2,9,12],requir:[6,11,12,16,19,24],rescal:4,research:[9,12],respond:[14,18,24],respons:6,responsejobschema:6,responsejobsschema:6,responsemodelrepomodel:6,responseprojectbenchmarkdeletedschema:6,responseprojectbenchmarkschema:6,responseprojectbenchmarksschema:6,responseprojectdatadeletedschema:6,responseprojectdataschema:6,responseprojectdatasingleschema:6,responseprojectdeletedschema:6,responseprojectextschema:6,responseprojectlossprofileschema:6,responseprojectlossprofilesschema:6,responseprojectmodelanalysisschema:6,responseprojectmodeldeletedschema:6,responseprojectmodelschema:6,responseprojectoptimizationdeletedschema:6,responseprojectoptimizationframeworksavailablesamplesschema:6,responseprojectoptimizationframeworksavailableschema:6,responseprojectoptimizationmodifierdeletedschema:6,responseprojectoptimizationmodifiersavail:6,responseprojectoptimizationmodifiersbestestim:6,responseprojectoptimizationschema:6,responseprojectoptimizationsschema:6,responseprojectperfprofileschema:6,responseprojectperfprofilesschema:6,responseprojectprofiledeletedschema:6,responseprojectschema:6,responseprojectsschema:6,responsesysteminfo:6,restructur:24,result:[4,5,6,9,11,15,18,19,20,22,25],retain:16,retrain:[11,14,17,18,19,24],retriev:[4,8],review:[9,14,16,22,23,25],rewrit:21,right:[11,13,19,20],root:[1,5],root_path:5,rough:18,rout:[2,6],rule:11,run:[1,6,8,9,11,13,14,15,16,17,18,22,24,25],runtim:[17,20,24],same:[15,20],sampl:[5,6,11,24],satisfi:[14,19],save:[5,8,19,22,23],scale:[9,12,20],scenario:20,schedul:[4,5,6,19,24],schema:[0,1],schemaopt:6,scheme:16,screen:[9,11,15,17,19,21,25],screenshot:12,script:[1,11],scroll:19,search:6,searchjobsschema:6,searchmodelrepomodel:6,searchprojectbenchmarksschema:6,searchprojectdataschema:6,searchprojectoptimizationsschema:6,searchprojectprofilesschema:6,searchprojectsschema:6,second:[16,17,24],section:[14,17,18,19,23],see:[11,16,17,18,19,20,24],select:[11,15,16,17,19,20,24],sens:19,sensit:[4,9,11,14,24,25],separ:[8,11,24],sequenc:[5,6,24],sequenti:24,seri:4,serial:6,serv:2,server:[1,2,5,6,8,11,15],set:[1,4,5,6,7,8,9,11,14,15,17,18,19,20,22,24,25],set_logging_level:1,setprojectdatafromschema:6,setprojectmodelfromschema:6,setup:[1,2,3,4,5,16,17,18,24],setup_filesystem:5,sever:15,share:12,shot:11,should:[4,8,16,19,22,24],show:[9,11,14,16,17,18,19,20,24],shown:[11,17],shuffl:[17,24],side:15,signific:[17,18],significantli:[18,24],simpl:[9,14],simpli:[15,24],simplifi:[9,12],singl:[6,9,13,24,25],size:[5,8,9,12,15,16,17,18,20,24],slide:[9,12],slider:19,smaller:[14,17,24],smallest:24,softmax:24,softwar:12,some:[6,11],sort:20,sourc:[1,3,4,5,6,7,8,15],space:15,spars:[9,14,17,19],sparse_cost:4,sparse_training_avail:4,sparseml:[3,9,11,15,21],sparsezoo:9,sparsif:24,sparsifi:[10,11,15,16,17,18,21,22,23,24,25],sparsiti:[4,5,19,22,24],special:15,specif:[4,6,17,20,24],specifi:[11,18,19,20,24],speed:[9,11,12,17],speedup:[17,19,24],spent:17,sqlite_ext:5,stabil:19,stabilization_epoch:4,stabl:9,stage:19,standard:1,start:[4,5,8,9,11,15,19,20,24,25],start_epoch:[4,5],start_fine_tuning_epoch:4,startup:8,state:[5,8,9],statu:5,step:[4,6,8,12,13,14,16,17,18,19,20,21,22,23,24],storag:5,store:[5,6,8],str:[1,3,4,5,6,7,8],string:[5,6],structur:4,structurally_prun:4,sub:8,subclass:8,subgraph:17,submit:12,submodul:[0,9],subpackag:[0,9],subsequ:8,substitut:11,suggest:12,suit:12,summari:[9,12,24,25],support:[9,12],sure:12,system:[0,1,5,8,10,11,16,19,22,24],systeminfo:6,tabl:19,take:[16,17,19,24],taken:19,tar:8,target:[11,16],techniqu:[12,14,19,24],tell:24,tensor:3,tensorflow:[15,17,21],tensorflow__integr:[1,2],term:[9,22,23,25],termin:11,test:10,textfield:5,than:[4,14,15,19,20,24],thei:[14,16,20,24],them:8,theoret:[16,17,24],therefor:[11,16,17],thi:[4,8,9,10,11,14,15,16,17,18,19,20,21,22,23,24,25],those:[11,17,18,19,24],thread:[8,24],three:[14,17,18,19,20,23],through:[8,11,17,19,24],throughout:[15,16,19,22,23],throughput:[16,17],tied:[17,19],time:[4,8,14,15,16,17,19,24],timestamp:5,to_dict_valu:4,took:[17,24],tool:[9,12,24],tooltip:19,top:19,torch:3,total:[18,24],tour:[9,10],track:5,train:[3,4,9,11,14,21,22,23,24,25],train_dataset:3,train_setup:3,trainabl:[4,5,6],trainable_modifi:5,training_epoch:[4,5],training_final_lr:4,training_init_lr:4,training_lr_fin:5,training_lr_init:5,training_optim:5,transfer:[14,19],tune:[4,19],tupl:4,twice:19,two:[17,18,19,20],type:[4,5,6,8,11,13,17,18,19,24],type_:5,typic:24,ui_path:1,ultim:9,uncomplet:8,under:[8,11],understand:15,unindex:5,union:[3,4,6,8],uniqu:[5,15,24],unknown:6,unpreced:9,unspecifi:15,unstructur:8,unsur:16,until:8,updat:[4,5,6,8,19],update_frequ:[4,5],updateprojectoptimizationschema:6,upload:[6,11,14,15],upper:[11,13],uri:[6,8],url:[6,8,11,15],use:[4,6,8,11,12,15,16,19,20,22,24],used:[1,5,6,11,12,15,16,17,18,19,20,22,24],user:[4,9,10,11,12,24],uses:24,using:[10,11,12,14,15,19,20,24],util:[0,1,2,3,17,24],val_dataset:3,vale:4,valid:[4,5,6,7],validate_filesystem:5,validate_model_data:4,validate_pruning_nod:4,validationerror:6,valu:[4,5,6,7,14,17,18,19,20,22,24],valuabl:[15,24],valuerescal:4,varchar:5,vari:17,variou:[16,17,24],verbose_nam:5,veri:18,version:[12,14,15,20],versioninfoschema:6,via:9,view:[16,24],virtual:10,visit:11,visual:[9,12,17,19],vnni:24,wai:[12,15,16,17,19,24],want:[14,15,16,17,18,19,20,24],warmup:8,warmup_iterations_per_check:[5,8],web:11,websit:9,week:9,weight:[4,17,18,24],weight_magnitud:8,welcom:[9,25],well:[16,17,18,24],went:17,were:[17,18,24],what:[4,17,19,24],when:[4,7,14,15,16,17,18,19,24],where:[5,9,11,17,24],which:[13,15,16,17,18,19,20,24],who:[9,12],width:24,window:11,within:[8,24],without:[8,14,16,17,24],won:8,work:[5,6,8,14,21],worker:[0,1,6],worker_arg:5,workflow:[9,11,14],working_dir:[1,3,5],would:[5,17,18,22],wrapper:8,yaml:4,yet:20,yml:[14,21,22],you:[9,11,12,14,15,16,17,18,19,20,21,22,23,24,25],your:[9,10,11,12,13,14,15,17,18,19,20,21,22,23,24],zero:18},titles:["sparsify","sparsify package","sparsify.blueprints package","sparsify.blueprints.code_samples package","sparsify.blueprints.utils package","sparsify.models package","sparsify.schemas package","sparsify.utils package","sparsify.workers package","Sparsify 0.1","Installation","Quick Tour","Welcome to Sparsify","Installing and Launching Sparsify","Sparsify Overview","Analyze","Profiling Your Model","Reviewing Performance Profiles","Reviewing Loss Profiles","Optimize","Benchmarking","Integrate","Optimization Config File and Code for Optimization","Settings","Key Concepts/Features/Terms","User Guide"],titleterms:{"export":[11,19],"new":[11,13,15,17,18],Adding:[17,18],about:12,addit:13,analyz:[11,14,15],app:1,base:[5,8],base_manag:8,base_wrapp:8,benchmark:20,blueprint:[2,3,4],can:13,code:22,code_sampl:3,compar:20,concept:24,config:22,content:[1,2,3,4,5,6,7,8],count:18,creat:15,displai:13,engin:20,error:[2,6],exist:[13,15],featur:24,feedback:12,file:22,from:13,guid:[12,25],help:12,helper:[4,6],histori:9,infer:20,inform:13,instal:[10,13],integr:[14,21],job:[2,5,6],kei:24,launch:13,layer:[17,18],learn:[9,19],log:1,loss:18,model:[5,11,16,17,18,19],model_repo:[2,6],modifi:19,modul:[1,2,3,4,5,6,7,8],more:9,open:[13,15],optim:[11,14,19,22],overview:[9,14],packag:[1,2,3,4,5,6,7,8],param:18,perform:17,product:9,profil:[16,17,18],project:[2,4,5,6,11,13,15],projects_benchmark:[2,4,5,6,8],projects_data:[2,4,5,6,8],projects_model:[2,5,6,8],projects_optim:[2,4,5,6],projects_optimizations_prun:4,projects_profil:[2,5,6,8],prune:19,pytorch__integr:3,pytorch__train:3,quick:11,rate:19,recip:11,relat:9,releas:9,remov:20,resourc:9,result:17,review:[17,18],run:[19,20],schema:6,screen:13,sensit:18,set:23,singl:20,sparsifi:[0,1,2,3,4,5,6,7,8,9,12,13,14,19],start:13,submodul:[1,2,3,4,5,6,7,8],subpackag:[1,2],summari:[17,18,19],system:[2,6,7],tensorflow__integr:3,term:24,thi:12,tour:11,train:19,user:25,util:[4,5,7],welcom:12,worker:8,you:13,your:16}}) \ No newline at end of file +Search.setIndex({docnames:["api/modules","api/sparsify","api/sparsify.blueprints","api/sparsify.blueprints.code_samples","api/sparsify.blueprints.utils","api/sparsify.models","api/sparsify.schemas","api/sparsify.utils","api/sparsify.workers","index","installation","quicktour","userguide/01-intro","userguide/02-install-sparsify","userguide/03-sparsify-overview","userguide/04-analyze","userguide/04a-profiling-your-model","userguide/04b-reviewing-performance-profiles","userguide/04c-reviewing-loss-profiles","userguide/05-optimize","userguide/05a-benchmark","userguide/06-integrate","userguide/06a-optimize-config","userguide/07-settings","userguide/08-key-terms","userguide/index"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":3,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.viewcode":1,sphinx:56},filenames:["api/modules.rst","api/sparsify.rst","api/sparsify.blueprints.rst","api/sparsify.blueprints.code_samples.rst","api/sparsify.blueprints.utils.rst","api/sparsify.models.rst","api/sparsify.schemas.rst","api/sparsify.utils.rst","api/sparsify.workers.rst","index.rst","installation.md","quicktour.md","userguide/01-intro.md","userguide/02-install-sparsify.md","userguide/03-sparsify-overview.md","userguide/04-analyze.md","userguide/04a-profiling-your-model.md","userguide/04b-reviewing-performance-profiles.md","userguide/04c-reviewing-loss-profiles.md","userguide/05-optimize.md","userguide/05a-benchmark.md","userguide/06-integrate.md","userguide/06a-optimize-config.md","userguide/07-settings.md","userguide/08-key-terms.md","userguide/index.rst"],objects:{"":{sparsify:[1,0,0,"-"]},"sparsify.app":{main:[1,1,1,""],run:[1,1,1,""]},"sparsify.blueprints":{code_samples:[3,0,0,"-"],errors:[2,0,0,"-"],jobs:[2,0,0,"-"],model_repo:[2,0,0,"-"],projects:[2,0,0,"-"],projects_benchmarks:[2,0,0,"-"],projects_data:[2,0,0,"-"],projects_model:[2,0,0,"-"],projects_optimizations:[2,0,0,"-"],projects_profiles:[2,0,0,"-"],system:[2,0,0,"-"],ui:[2,0,0,"-"],utils:[4,0,0,"-"]},"sparsify.blueprints.code_samples":{pytorch__training:[3,0,0,"-"]},"sparsify.blueprints.code_samples.pytorch__training":{train:[3,1,1,""],train_setup:[3,1,1,""]},"sparsify.blueprints.utils":{helpers:[4,0,0,"-"],projects:[4,0,0,"-"],projects_benchmark:[4,0,0,"-"],projects_data:[4,0,0,"-"],projects_optimizations:[4,0,0,"-"],projects_optimizations_pruning:[4,0,0,"-"]},"sparsify.blueprints.utils.helpers":{HTTPNotFoundError:[4,2,1,""]},"sparsify.blueprints.utils.projects":{get_project_by_id:[4,1,1,""],get_project_model_by_project_id:[4,1,1,""]},"sparsify.blueprints.utils.projects_benchmark":{get_project_benchmark_by_ids:[4,1,1,""]},"sparsify.blueprints.utils.projects_data":{get_project_data_by_ids:[4,1,1,""],validate_model_data:[4,1,1,""]},"sparsify.blueprints.utils.projects_optimizations":{OptimEpochs:[4,3,1,""],create_config:[4,1,1,""],default_epochs_distribution:[4,1,1,""],default_pruning_settings:[4,1,1,""],get_profiles_by_id:[4,1,1,""],get_project_optimizer_by_ids:[4,1,1,""],optim_lr_sched_default_mods:[4,1,1,""],optim_lr_sched_updater:[4,1,1,""],optim_pruning_updater:[4,1,1,""],optim_trainable_default_nodes:[4,1,1,""],optim_trainable_updater:[4,1,1,""],optim_updater:[4,1,1,""],optim_validate_and_get_project_by_id:[4,1,1,""],sparse_training_available:[4,1,1,""],validate_pruning_nodes:[4,1,1,""]},"sparsify.blueprints.utils.projects_optimizations.OptimEpochs":{end_epoch:[4,4,1,""],fine_tuning_epochs:[4,4,1,""],fine_tuning_start_epoch:[4,4,1,""],pruning_end_epoch:[4,4,1,""],pruning_epochs:[4,4,1,""],pruning_start_epoch:[4,4,1,""],pruning_update_frequency:[4,4,1,""],stabilization_epochs:[4,4,1,""],start_epoch:[4,4,1,""],training_epochs:[4,4,1,""]},"sparsify.blueprints.utils.projects_optimizations_pruning":{PruningModelEvaluator:[4,3,1,""],PruningSettings:[4,3,1,""]},"sparsify.blueprints.utils.projects_optimizations_pruning.PruningModelEvaluator":{EVAL_SENSITIVITY_SPARSITY:[4,5,1,""],MAX_NODE_SPARSITY:[4,5,1,""],apply_node_overrides:[4,4,1,""],eval_baseline:[4,4,1,""],eval_pruning:[4,4,1,""],to_dict_values:[4,4,1,""]},"sparsify.blueprints.utils.projects_optimizations_pruning.PruningSettings":{balance_perf_loss:[4,4,1,""],filter_min_perf_gain:[4,4,1,""],filter_min_recovery:[4,4,1,""],filter_min_sparsity:[4,4,1,""],mask_type:[4,4,1,""],sparsity:[4,4,1,""]},"sparsify.log":{get_main_logger:[1,1,1,""],get_root_logger:[1,1,1,""],set_logging_level:[1,1,1,""]},"sparsify.models":{base:[5,0,0,"-"],jobs:[5,0,0,"-"],projects:[5,0,0,"-"],projects_benchmark:[5,0,0,"-"],projects_data:[5,0,0,"-"],projects_model:[5,0,0,"-"],projects_optimizations:[5,0,0,"-"],projects_profiles:[5,0,0,"-"],utils:[5,0,0,"-"]},"sparsify.models.base":{BaseCreatedModifiedModel:[5,3,1,""],BaseModel:[5,3,1,""],CSVField:[5,3,1,""],CSVFloatField:[5,3,1,""],CSVIntField:[5,3,1,""],FileStorage:[5,3,1,""],ListObjField:[5,3,1,""]},"sparsify.models.base.BaseCreatedModifiedModel":{DoesNotExist:[5,5,1,""],created:[5,5,1,""],id:[5,5,1,""],modified:[5,5,1,""],save:[5,4,1,""]},"sparsify.models.base.BaseModel":{DoesNotExist:[5,5,1,""],id:[5,5,1,""],refresh:[5,4,1,""]},"sparsify.models.base.CSVField":{db_value:[5,4,1,""],python_value:[5,4,1,""]},"sparsify.models.base.CSVFloatField":{python_value:[5,4,1,""]},"sparsify.models.base.CSVIntField":{python_value:[5,4,1,""]},"sparsify.models.base.FileStorage":{init:[5,4,1,""],root_path:[5,4,1,""]},"sparsify.models.base.ListObjField":{db_value:[5,4,1,""],python_value:[5,4,1,""]},"sparsify.models.jobs":{Job:[5,3,1,""],JobStatus:[5,3,1,""],JobStatusField:[5,3,1,""]},"sparsify.models.jobs.Job":{DoesNotExist:[5,5,1,""],baseprojectprofile_set:[5,5,1,""],created:[5,5,1,""],error:[5,5,1,""],job_id:[5,5,1,""],modified:[5,5,1,""],progress:[5,5,1,""],project_id:[5,5,1,""],projectbenchmark_set:[5,5,1,""],projectdata_set:[5,5,1,""],projectlossprofile_set:[5,5,1,""],projectmodel_set:[5,5,1,""],projectperfprofile_set:[5,5,1,""],save:[5,4,1,""],status:[5,5,1,""],type_:[5,5,1,""],worker_ack:[5,5,1,""],worker_args:[5,5,1,""]},"sparsify.models.jobs.JobStatus":{canceled:[5,5,1,""],canceling:[5,5,1,""],completed:[5,5,1,""],error:[5,5,1,""],pending:[5,5,1,""],started:[5,5,1,""]},"sparsify.models.jobs.JobStatusField":{db_value:[5,4,1,""],field_type:[5,5,1,""],python_value:[5,4,1,""]},"sparsify.models.projects":{BaseProjectModel:[5,3,1,""],Project:[5,3,1,""]},"sparsify.models.projects.BaseProjectModel":{DoesNotExist:[5,5,1,""],delete_filesystem:[5,4,1,""],id:[5,5,1,""],setup_filesystem:[5,4,1,""],validate_filesystem:[5,4,1,""]},"sparsify.models.projects.Project":{DoesNotExist:[5,5,1,""],benchmarks:[5,5,1,""],created:[5,5,1,""],data:[5,5,1,""],delete_filesystem:[5,4,1,""],description:[5,5,1,""],dir_path:[5,4,1,""],dir_size:[5,4,1,""],models:[5,5,1,""],modified:[5,5,1,""],name:[5,5,1,""],optims:[5,5,1,""],profiles_loss:[5,5,1,""],profiles_perf:[5,5,1,""],project_id:[5,5,1,""],save:[5,4,1,""],setup_filesystem:[5,4,1,""],training_epochs:[5,5,1,""],training_lr_final:[5,5,1,""],training_lr_init:[5,5,1,""],training_optimizer:[5,5,1,""],validate_filesystem:[5,4,1,""]},"sparsify.models.projects_benchmark":{ProjectBenchmark:[5,3,1,""]},"sparsify.models.projects_benchmark.ProjectBenchmark":{DoesNotExist:[5,5,1,""],batch_sizes:[5,5,1,""],benchmark_id:[5,5,1,""],core_counts:[5,5,1,""],created:[5,5,1,""],inference_models:[5,5,1,""],instruction_sets:[5,5,1,""],iterations_per_check:[5,5,1,""],job:[5,5,1,""],job_id:[5,5,1,""],modified:[5,5,1,""],name:[5,5,1,""],project:[5,5,1,""],project_id:[5,5,1,""],result:[5,5,1,""],source:[5,5,1,""],warmup_iterations_per_check:[5,5,1,""]},"sparsify.models.projects_data":{ProjectData:[5,3,1,""]},"sparsify.models.projects_data.ProjectData":{DoesNotExist:[5,5,1,""],created:[5,5,1,""],data_id:[5,5,1,""],delete_filesystem:[5,4,1,""],dir_path:[5,4,1,""],file:[5,5,1,""],file_path:[5,4,1,""],job:[5,5,1,""],job_id:[5,5,1,""],project:[5,5,1,""],project_id:[5,5,1,""],setup_filesystem:[5,4,1,""],source:[5,5,1,""],validate_filesystem:[5,4,1,""]},"sparsify.models.projects_model":{ProjectModel:[5,3,1,""]},"sparsify.models.projects_model.ProjectModel":{DoesNotExist:[5,5,1,""],analysis:[5,5,1,""],created:[5,5,1,""],delete_filesystem:[5,4,1,""],dir_path:[5,4,1,""],file:[5,5,1,""],file_path:[5,4,1,""],job:[5,5,1,""],job_id:[5,5,1,""],model_id:[5,5,1,""],project:[5,5,1,""],project_id:[5,5,1,""],setup_filesystem:[5,4,1,""],source:[5,5,1,""],validate_filesystem:[5,4,1,""]},"sparsify.models.projects_optimizations":{ProjectOptimization:[5,3,1,""],ProjectOptimizationModifierLRSchedule:[5,3,1,""],ProjectOptimizationModifierPruning:[5,3,1,""],ProjectOptimizationModifierQuantization:[5,3,1,""],ProjectOptimizationModifierTrainable:[5,3,1,""]},"sparsify.models.projects_optimizations.ProjectOptimization":{DoesNotExist:[5,5,1,""],created:[5,5,1,""],end_epoch:[5,5,1,""],lr_schedule_modifiers:[5,5,1,""],modified:[5,5,1,""],name:[5,5,1,""],notes:[5,5,1,""],optim_id:[5,5,1,""],profile_loss:[5,5,1,""],profile_loss_id:[5,5,1,""],profile_perf:[5,5,1,""],profile_perf_id:[5,5,1,""],project:[5,5,1,""],project_id:[5,5,1,""],pruning_modifiers:[5,5,1,""],quantization_modifiers:[5,5,1,""],start_epoch:[5,5,1,""],trainable_modifiers:[5,5,1,""]},"sparsify.models.projects_optimizations.ProjectOptimizationModifierLRSchedule":{DoesNotExist:[5,5,1,""],created:[5,5,1,""],end_epoch:[5,5,1,""],final_lr:[5,5,1,""],init_lr:[5,5,1,""],lr_mods:[5,5,1,""],modified:[5,5,1,""],modifier_id:[5,5,1,""],optim:[5,5,1,""],optim_id:[5,5,1,""],start_epoch:[5,5,1,""]},"sparsify.models.projects_optimizations.ProjectOptimizationModifierPruning":{DoesNotExist:[5,5,1,""],balance_perf_loss:[5,5,1,""],compression:[5,5,1,""],created:[5,5,1,""],end_epoch:[5,5,1,""],est_loss_sensitivity:[5,5,1,""],est_perf_sensitivity:[5,5,1,""],est_recovery:[5,5,1,""],est_time:[5,5,1,""],est_time_baseline:[5,5,1,""],est_time_gain:[5,5,1,""],filter_min_perf_gain:[5,5,1,""],filter_min_recovery:[5,5,1,""],filter_min_sparsity:[5,5,1,""],flops:[5,5,1,""],flops_baseline:[5,5,1,""],flops_gain:[5,5,1,""],mask_type:[5,5,1,""],modified:[5,5,1,""],modifier_id:[5,5,1,""],nodes:[5,5,1,""],optim:[5,5,1,""],optim_id:[5,5,1,""],params:[5,5,1,""],params_baseline:[5,5,1,""],sparsity:[5,5,1,""],start_epoch:[5,5,1,""],update_frequency:[5,5,1,""]},"sparsify.models.projects_optimizations.ProjectOptimizationModifierQuantization":{DoesNotExist:[5,5,1,""],balance_perf_loss:[5,5,1,""],compression:[5,5,1,""],created:[5,5,1,""],end_epoch:[5,5,1,""],est_loss_sensitivity:[5,5,1,""],est_perf_sensitivity:[5,5,1,""],est_recovery:[5,5,1,""],est_time:[5,5,1,""],est_time_baseline:[5,5,1,""],est_time_gain:[5,5,1,""],filter_min_perf_gain:[5,5,1,""],filter_min_recovery:[5,5,1,""],flops:[5,5,1,""],flops_baseline:[5,5,1,""],flops_gain:[5,5,1,""],level:[5,5,1,""],modified:[5,5,1,""],modifier_id:[5,5,1,""],nodes:[5,5,1,""],optim:[5,5,1,""],optim_id:[5,5,1,""],params:[5,5,1,""],params_baseline:[5,5,1,""],start_epoch:[5,5,1,""]},"sparsify.models.projects_optimizations.ProjectOptimizationModifierTrainable":{DoesNotExist:[5,5,1,""],created:[5,5,1,""],end_epoch:[5,5,1,""],modified:[5,5,1,""],modifier_id:[5,5,1,""],nodes:[5,5,1,""],optim:[5,5,1,""],optim_id:[5,5,1,""],start_epoch:[5,5,1,""]},"sparsify.models.projects_profiles":{BaseProjectProfile:[5,3,1,""],ProjectLossProfile:[5,3,1,""],ProjectPerfProfile:[5,3,1,""]},"sparsify.models.projects_profiles.BaseProjectProfile":{DoesNotExist:[5,5,1,""],analysis:[5,5,1,""],created:[5,5,1,""],job:[5,5,1,""],job_id:[5,5,1,""],name:[5,5,1,""],profile_id:[5,5,1,""],source:[5,5,1,""]},"sparsify.models.projects_profiles.ProjectLossProfile":{DoesNotExist:[5,5,1,""],analysis:[5,5,1,""],created:[5,5,1,""],job:[5,5,1,""],job_id:[5,5,1,""],name:[5,5,1,""],profile_id:[5,5,1,""],project:[5,5,1,""],project_id:[5,5,1,""],projectoptimization_set:[5,5,1,""],pruning_estimation_type:[5,5,1,""],pruning_estimations:[5,5,1,""],pruning_structure:[5,5,1,""],quantized_estimation_type:[5,5,1,""],quantized_estimations:[5,5,1,""],source:[5,5,1,""]},"sparsify.models.projects_profiles.ProjectPerfProfile":{DoesNotExist:[5,5,1,""],analysis:[5,5,1,""],batch_size:[5,5,1,""],core_count:[5,5,1,""],created:[5,5,1,""],instruction_sets:[5,5,1,""],iterations_per_check:[5,5,1,""],job:[5,5,1,""],job_id:[5,5,1,""],name:[5,5,1,""],profile_id:[5,5,1,""],project:[5,5,1,""],project_id:[5,5,1,""],projectoptimization_set:[5,5,1,""],pruning_estimations:[5,5,1,""],quantized_estimations:[5,5,1,""],source:[5,5,1,""],warmup_iterations_per_check:[5,5,1,""]},"sparsify.models.utils":{database_setup:[5,1,1,""]},"sparsify.schemas":{errors:[6,0,0,"-"],helpers:[6,0,0,"-"],jobs:[6,0,0,"-"],model_repo:[6,0,0,"-"],projects:[6,0,0,"-"],projects_benchmarks:[6,0,0,"-"],projects_data:[6,0,0,"-"],projects_model:[6,0,0,"-"],projects_optimizations:[6,0,0,"-"],projects_profiles:[6,0,0,"-"],system:[6,0,0,"-"]},"sparsify.schemas.errors":{ErrorSchema:[6,3,1,""]},"sparsify.schemas.errors.ErrorSchema":{opts:[6,5,1,""]},"sparsify.schemas.helpers":{EnumField:[6,3,1,""],data_dump_and_validation:[6,1,1,""]},"sparsify.schemas.helpers.EnumField":{deserialize:[6,4,1,""]},"sparsify.schemas.jobs":{JobProgressSchema:[6,3,1,""],JobSchema:[6,3,1,""],ResponseJobSchema:[6,3,1,""],ResponseJobsSchema:[6,3,1,""],SearchJobsSchema:[6,3,1,""]},"sparsify.schemas.jobs.JobProgressSchema":{opts:[6,5,1,""]},"sparsify.schemas.jobs.JobSchema":{opts:[6,5,1,""]},"sparsify.schemas.jobs.ResponseJobSchema":{opts:[6,5,1,""]},"sparsify.schemas.jobs.ResponseJobsSchema":{opts:[6,5,1,""]},"sparsify.schemas.jobs.SearchJobsSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo":{ModelRepoArchitectureSchema:[6,3,1,""],ModelRepoDatasetSchema:[6,3,1,""],ModelRepoDomainSchema:[6,3,1,""],ModelRepoModelDescSchema:[6,3,1,""],ModelRepoModelMetricSchema:[6,3,1,""],ModelRepoModelPerfSchema:[6,3,1,""],ModelRepoModelSchema:[6,3,1,""],ResponseModelRepoModels:[6,3,1,""],SearchModelRepoModels:[6,3,1,""]},"sparsify.schemas.model_repo.ModelRepoArchitectureSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ModelRepoDatasetSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ModelRepoDomainSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ModelRepoModelDescSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ModelRepoModelMetricSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ModelRepoModelPerfSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ModelRepoModelSchema":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.ResponseModelRepoModels":{opts:[6,5,1,""]},"sparsify.schemas.model_repo.SearchModelRepoModels":{opts:[6,5,1,""]},"sparsify.schemas.projects":{CreateUpdateProjectSchema:[6,3,1,""],DeleteProjectSchema:[6,3,1,""],ProjectExtSchema:[6,3,1,""],ProjectSchema:[6,3,1,""],ResponseProjectDeletedSchema:[6,3,1,""],ResponseProjectExtSchema:[6,3,1,""],ResponseProjectSchema:[6,3,1,""],ResponseProjectsSchema:[6,3,1,""],SearchProjectsSchema:[6,3,1,""]},"sparsify.schemas.projects.CreateUpdateProjectSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.DeleteProjectSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.ProjectExtSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.ProjectSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.ResponseProjectDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.ResponseProjectExtSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.ResponseProjectSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.ResponseProjectsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects.SearchProjectsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks":{CreateProjectBenchmarkSchema:[6,3,1,""],ProjectBenchmarkResultSchema:[6,3,1,""],ProjectBenchmarkResultsSchema:[6,3,1,""],ProjectBenchmarkSchema:[6,3,1,""],ResponseProjectBenchmarkDeletedSchema:[6,3,1,""],ResponseProjectBenchmarkSchema:[6,3,1,""],ResponseProjectBenchmarksSchema:[6,3,1,""],SearchProjectBenchmarksSchema:[6,3,1,""]},"sparsify.schemas.projects_benchmarks.CreateProjectBenchmarkSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.ProjectBenchmarkResultSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.ProjectBenchmarkResultsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.ProjectBenchmarkSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.ResponseProjectBenchmarkDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.ResponseProjectBenchmarkSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.ResponseProjectBenchmarksSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_benchmarks.SearchProjectBenchmarksSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data":{CreateUpdateProjectDataSchema:[6,3,1,""],ProjectDataSchema:[6,3,1,""],ResponseProjectDataDeletedSchema:[6,3,1,""],ResponseProjectDataSchema:[6,3,1,""],ResponseProjectDataSingleSchema:[6,3,1,""],SearchProjectDataSchema:[6,3,1,""],SetProjectDataFromSchema:[6,3,1,""]},"sparsify.schemas.projects_data.CreateUpdateProjectDataSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data.ProjectDataSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data.ResponseProjectDataDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data.ResponseProjectDataSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data.ResponseProjectDataSingleSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data.SearchProjectDataSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_data.SetProjectDataFromSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model":{CreateUpdateProjectModelSchema:[6,3,1,""],DeleteProjectModelSchema:[6,3,1,""],ProjectModelAnalysisSchema:[6,3,1,""],ProjectModelSchema:[6,3,1,""],ResponseProjectModelAnalysisSchema:[6,3,1,""],ResponseProjectModelDeletedSchema:[6,3,1,""],ResponseProjectModelSchema:[6,3,1,""],SetProjectModelFromSchema:[6,3,1,""]},"sparsify.schemas.projects_model.CreateUpdateProjectModelSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.DeleteProjectModelSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.ProjectModelAnalysisSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.ProjectModelSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.ResponseProjectModelAnalysisSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.ResponseProjectModelDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.ResponseProjectModelSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_model.SetProjectModelFromSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations":{CreateProjectOptimizationSchema:[6,3,1,""],CreateUpdateProjectOptimizationModifiersLRScheduleSchema:[6,3,1,""],CreateUpdateProjectOptimizationModifiersPruningSchema:[6,3,1,""],CreateUpdateProjectOptimizationModifiersQuantizationSchema:[6,3,1,""],CreateUpdateProjectOptimizationModifiersTrainableSchema:[6,3,1,""],GetProjectOptimizationBestEstimatedResultsSchema:[6,3,1,""],ProjectAvailableModelModificationsSchema:[6,3,1,""],ProjectOptimizationModifierLRExponentialArgsSchema:[6,3,1,""],ProjectOptimizationModifierLRMultiStepArgsSchema:[6,3,1,""],ProjectOptimizationModifierLRScheduleSchema:[6,3,1,""],ProjectOptimizationModifierLRSchema:[6,3,1,""],ProjectOptimizationModifierLRSetArgsSchema:[6,3,1,""],ProjectOptimizationModifierLRStepArgsSchema:[6,3,1,""],ProjectOptimizationModifierPruningNodeSchema:[6,3,1,""],ProjectOptimizationModifierPruningSchema:[6,3,1,""],ProjectOptimizationModifierQuantizationNodeSchema:[6,3,1,""],ProjectOptimizationModifierQuantizationSchema:[6,3,1,""],ProjectOptimizationModifierTrainableNodeSchema:[6,3,1,""],ProjectOptimizationModifierTrainableSchema:[6,3,1,""],ProjectOptimizationSchema:[6,3,1,""],ResponseProjectOptimizationDeletedSchema:[6,3,1,""],ResponseProjectOptimizationFrameworksAvailableSamplesSchema:[6,3,1,""],ResponseProjectOptimizationFrameworksAvailableSchema:[6,3,1,""],ResponseProjectOptimizationModifierDeletedSchema:[6,3,1,""],ResponseProjectOptimizationModifiersAvailable:[6,3,1,""],ResponseProjectOptimizationModifiersBestEstimated:[6,3,1,""],ResponseProjectOptimizationSchema:[6,3,1,""],ResponseProjectOptimizationsSchema:[6,3,1,""],SearchProjectOptimizationsSchema:[6,3,1,""],UpdateProjectOptimizationSchema:[6,3,1,""]},"sparsify.schemas.projects_optimizations.CreateProjectOptimizationSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.CreateUpdateProjectOptimizationModifiersLRScheduleSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.CreateUpdateProjectOptimizationModifiersPruningSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.CreateUpdateProjectOptimizationModifiersQuantizationSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.CreateUpdateProjectOptimizationModifiersTrainableSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.GetProjectOptimizationBestEstimatedResultsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectAvailableModelModificationsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierLRExponentialArgsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierLRMultiStepArgsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierLRScheduleSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierLRSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierLRSetArgsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierLRStepArgsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierPruningNodeSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierPruningSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierQuantizationNodeSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierQuantizationSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierTrainableNodeSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationModifierTrainableSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ProjectOptimizationSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationFrameworksAvailableSamplesSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationFrameworksAvailableSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationModifierDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationModifiersAvailable":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationModifiersBestEstimated":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.ResponseProjectOptimizationsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.SearchProjectOptimizationsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_optimizations.UpdateProjectOptimizationSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles":{CreateProjectLossProfileSchema:[6,3,1,""],CreateProjectPerfProfileSchema:[6,3,1,""],ProjectLossProfileSchema:[6,3,1,""],ProjectPerfProfileSchema:[6,3,1,""],ProjectProfileAnalysisSchema:[6,3,1,""],ProjectProfileMeasurementSchema:[6,3,1,""],ProjectProfileMeasurementsSchema:[6,3,1,""],ProjectProfileModelOpsBaselineMeasurementsSchema:[6,3,1,""],ProjectProfileModelOpsMeasurementsSchema:[6,3,1,""],ProjectProfileOpBaselineMeasurementSchema:[6,3,1,""],ProjectProfileOpMeasurementsSchema:[6,3,1,""],ProjectProfileOpSchema:[6,3,1,""],ProjectProfileSchema:[6,3,1,""],ResponseProjectLossProfileSchema:[6,3,1,""],ResponseProjectLossProfilesSchema:[6,3,1,""],ResponseProjectPerfProfileSchema:[6,3,1,""],ResponseProjectPerfProfilesSchema:[6,3,1,""],ResponseProjectProfileDeletedSchema:[6,3,1,""],SearchProjectProfilesSchema:[6,3,1,""]},"sparsify.schemas.projects_profiles.CreateProjectLossProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.CreateProjectPerfProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectLossProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectPerfProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileAnalysisSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileMeasurementSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileMeasurementsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileModelOpsBaselineMeasurementsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileModelOpsMeasurementsSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileOpBaselineMeasurementSchema":{dump_fields:[6,5,1,""],fields:[6,5,1,""],load_fields:[6,5,1,""],opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileOpMeasurementsSchema":{dump_fields:[6,5,1,""],fields:[6,5,1,""],load_fields:[6,5,1,""],opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileOpSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ProjectProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ResponseProjectLossProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ResponseProjectLossProfilesSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ResponseProjectPerfProfileSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ResponseProjectPerfProfilesSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.ResponseProjectProfileDeletedSchema":{opts:[6,5,1,""]},"sparsify.schemas.projects_profiles.SearchProjectProfilesSchema":{opts:[6,5,1,""]},"sparsify.schemas.system":{ResponseSystemInfo:[6,3,1,""],SystemInfo:[6,3,1,""],VersionInfoSchema:[6,3,1,""]},"sparsify.schemas.system.ResponseSystemInfo":{opts:[6,5,1,""]},"sparsify.schemas.system.SystemInfo":{opts:[6,5,1,""]},"sparsify.schemas.system.VersionInfoSchema":{opts:[6,5,1,""]},"sparsify.utils":{system:[7,0,0,"-"]},"sparsify.utils.system":{available_ml_engines:[7,1,1,""],get_ml_sys_info:[7,1,1,""],ml_engines_errors:[7,1,1,""]},"sparsify.workers":{base:[8,0,0,"-"],manager:[8,0,0,"-"],projects_benchmark:[8,0,0,"-"],projects_data:[8,0,0,"-"],projects_model:[8,0,0,"-"],projects_profiles:[8,0,0,"-"]},"sparsify.workers.base":{JobWorker:[8,3,1,""],JobWorkerRegistry:[8,3,1,""]},"sparsify.workers.base.JobWorker":{format_args:[8,4,1,""],get_type:[8,4,1,""],job_id:[8,4,1,""],project_id:[8,4,1,""],run:[8,4,1,""]},"sparsify.workers.base.JobWorkerRegistry":{REGISTRY:[8,5,1,""],create_worker:[8,4,1,""]},"sparsify.workers.manager":{JobCancelationFailureError:[8,2,1,""],JobNotFoundError:[8,2,1,""],JobWorkerManager:[8,3,1,""]},"sparsify.workers.manager.JobWorkerManager":{cancel_job:[8,4,1,""],refresh:[8,4,1,""],shutdown:[8,4,1,""],start:[8,4,1,""]},"sparsify.workers.projects_benchmark":{CreateBenchmarkJobWorker:[8,3,1,""]},"sparsify.workers.projects_benchmark.CreateBenchmarkJobWorker":{batch_sizes:[8,4,1,""],benchmark_id:[8,4,1,""],core_counts:[8,4,1,""],format_args:[8,4,1,""],inference_models:[8,4,1,""],instruction_sets:[8,4,1,""],iterations_per_check:[8,4,1,""],model_id:[8,4,1,""],run:[8,4,1,""],warmup_iterations_per_check:[8,4,1,""]},"sparsify.workers.projects_data":{DataFromPathJobWorker:[8,3,1,""],DataFromRepoJobWorker:[8,3,1,""]},"sparsify.workers.projects_data.DataFromPathJobWorker":{run:[8,4,1,""]},"sparsify.workers.projects_data.DataFromRepoJobWorker":{run:[8,4,1,""]},"sparsify.workers.projects_model":{ModelFromPathJobWorker:[8,3,1,""],ModelFromRepoJobWorker:[8,3,1,""]},"sparsify.workers.projects_model.ModelFromPathJobWorker":{run:[8,4,1,""]},"sparsify.workers.projects_model.ModelFromRepoJobWorker":{run:[8,4,1,""]},"sparsify.workers.projects_profiles":{CreateLossProfileJobWorker:[8,3,1,""],CreatePerfProfileJobWorker:[8,3,1,""]},"sparsify.workers.projects_profiles.CreateLossProfileJobWorker":{format_args:[8,4,1,""],model_id:[8,4,1,""],profile_id:[8,4,1,""],pruning_estimation_type:[8,4,1,""],pruning_estimations:[8,4,1,""],pruning_structure:[8,4,1,""],quantized_estimations:[8,4,1,""],run:[8,4,1,""]},"sparsify.workers.projects_profiles.CreatePerfProfileJobWorker":{batch_size:[8,4,1,""],core_count:[8,4,1,""],format_args:[8,4,1,""],iterations_per_check:[8,4,1,""],pruning_estimations:[8,4,1,""],quantized_estimations:[8,4,1,""],run:[8,4,1,""],warmup_iterations_per_check:[8,4,1,""]},sparsify:{app:[1,0,0,"-"],blueprints:[2,0,0,"-"],log:[1,0,0,"-"],models:[5,0,0,"-"],schemas:[6,0,0,"-"],utils:[7,0,0,"-"],workers:[8,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","function","Python function"],"2":["py","exception","Python exception"],"3":["py","class","Python class"],"4":["py","method","Python method"],"5":["py","attribute","Python attribute"]},objtypes:{"0":"py:module","1":"py:function","2":"py:exception","3":"py:class","4":"py:method","5":"py:attribute"},terms:{"100":4,"404":4,"5543":11,"960px":9,"abstract":[5,8],"case":[15,16,19,24],"class":[4,5,6,8],"default":[4,5,11,16,17],"enum":[5,6],"export":[9,14,15,21,22,24,25],"final":[4,9,12,19],"float":[4,5,16,17,24],"function":[4,5,7],"import":[7,18],"int":[1,3,4,8],"long":[17,24],"new":[4,9,14,16,20,25],"null":5,"public":[8,11],"return":[1,4,5,6,7,8,15,16,22],"static":8,"true":[4,8],"try":[18,22],"while":[6,9,14,17,19,24],Adding:[9,16,25],And:[15,24],For:[9,11,14,15,16,17,18,19,20,22,24],One:24,Such:7,The:[6,8,9,11,13,15,16,17,18,19,20,21,22,24],Then:[16,17,18,20,22],There:[18,19],These:[16,19,21],Use:[6,12,14],Used:5,Using:20,Will:4,_dataloaderjobwork:8,_hidden:5,_modelloaderjobwork:8,abl:4,about:[9,13,17,18,25],acceler:12,accept:[11,15,22],access:[8,11,19,23],accordingli:21,accuraci:[9,12,19,24],achiev:[9,12,17,24],across:[17,19,24],activ:[9,14,19,24],add:[4,17,18,19,20],added:8,addit:[8,11,14,16,19,24,25],addition:[9,11],address:11,adjust:19,advanc:12,affect:[14,15,16,17,18,19,24],after:[4,6,8,11,14,18,19,20,21,22,24],again:[14,20],algorithm:[11,14,15],alia:[4,5],all:[1,4,5,6,8,9,12,16,17,19,20,22,24],allow:[8,11,23],along:[1,11],alpha:12,alphanumer:15,alreadi:[18,19,20],also:[16,17,18,20,24],altern:9,alwai:[4,17],amount:24,analysi:[4,5,6,8,11,13,14,15,24],analyz:[9,12,13,19,25],ani:[3,4,6,7,8,12,14,15,16,21,23,24],anoth:20,anyth:[6,8],anywher:15,api:[2,9],app:[0,5,6,9],appear:17,appli:[4,9,14,16,17,19,24],applic:2,apply_node_overrid:4,approach:9,appropri:[8,20],approxim:[16,18,24],architect:17,architectur:[6,16,18,24],area:[13,17],arg:[4,5,6,8],argument:6,artifici:15,assign:4,associ:6,attr:[6,8],attribut:[6,24],autofield:5,automat:[4,9,14,15,19],automl:11,avail:[6,7,8,11,13,17,24],available_ml_engin:7,averag:[18,19,20,24],avoid:12,avx2:24,avx512:24,awai:24,awar:11,back:[15,16,17],background:8,balanc:19,balance_perf_loss:[4,5],bar:[13,14,15,17,18,23],base:[0,1,4,6,7,15,17,20,21],basecreatedmodifiedmodel:5,basecreatedmodifiedmodeldoesnotexist:5,basejobwork:8,baselin:[4,6,9,11,17,18,20,24],baseline_spars:4,basemodel:5,basemodeldoesnotexist:5,baseprofilejobwork:8,baseprojectmodel:5,baseprojectmodeldoesnotexist:5,baseprojectprofil:5,baseprojectprofile_set:5,baseprojectprofiledoesnotexist:5,basi:[17,19],basic:12,batch:[8,15,16,17,20,24],batch_siz:[3,5,8],batchnorm:24,becaus:[16,18],been:24,befor:[4,8,15,24],begin:[8,11,12,14,15,24],being:[8,16],belong:8,below:[11,19,20],benchmark:[1,2,4,5,6,8,9,12,14,16,19,24,25],benchmark_id:[4,5,8],best:[4,6,9,11,12,19],better:[9,11,17,19,24],between:[9,12,18,19],bit:24,black:13,block_4:8,blog:9,blueprint:[0,1],bool:[1,4,6,8],booleanfield:5,both:[11,16,17],bottom:11,brows:[11,15],browser:[11,15],bug:[9,12],build:9,busi:[20,22],button:[11,13,19,20,21,23],call:4,callabl:3,can:[1,5,8,9,11,14,15,16,17,18,19,20,21,22,23,24,25],cancel:[5,8,15],cancel_job:8,cannot:11,chanc:[18,19,24],chang:[14,15,16,17,19,20,21,23],charact:15,charfield:5,check:[8,10,11,12,20],choic:[5,19],choos:[11,21],chronolog:20,classif:16,classmethod:8,click:[11,15,16,17,18,19,20,21,23],clipboard:22,close:[19,24],code:[1,6,8,9,11,14,19,21,24,25],code_sampl:[1,2],collat:5,color:14,column_nam:5,com:9,combin:[9,12],come:17,command:[11,13,15],common:[11,16],compar:[9,19,24,25],comparison:[8,20],compat:13,complet:[5,8,11,15,16,18,24],compress:[5,12,14,18],comput:[15,17,24],concept:[9,22,23,25],condit:20,confid:[18,19,24],config:[4,9,21,25],config_path:3,configur:[8,11,14,15,19,21,22],confirm:11,consid:16,consider:16,consist:[1,17,18,19],consol:11,constant:17,constraint:5,constructor:8,consult:15,contain:[4,5,6,7,8,9,12],content:[0,9],context:6,continu:[11,15,16,17,18,19,20,24],contribut:[15,24],control:[19,22,24],conv:24,convers:17,convert:11,convolut:[17,24],copi:[8,11,22],core:[7,8,15,16,17,20,24],core_count:[5,8],correct:5,correctli:[4,9,11],correl:18,correspond:[18,19,21],cost:24,could:8,count:[8,9,16,17,20,24,25],cpu:[9,16,17,20,24],creat:[1,4,5,6,9,11,13,14,19,20,25],create_config:4,create_work:8,createbenchmarkjobwork:8,createlossprofilejobwork:8,createperfprofilejobwork:8,createprojectbenchmarkschema:6,createprojectlossprofileschema:6,createprojectoptimizationschema:6,createprojectperfprofileschema:6,createupdateprojectdataschema:6,createupdateprojectmodelschema:6,createupdateprojectoptimizationmodifierslrscheduleschema:6,createupdateprojectoptimizationmodifierspruningschema:6,createupdateprojectoptimizationmodifiersquantizationschema:6,createupdateprojectoptimizationmodifierstrainableschema:6,createupdateprojectschema:6,creation:11,criteria:[20,22],csv:5,csvfield:5,csvfloatfield:5,csvintfield:5,current:[4,6,7,8,11,12,14,15,17,18,19,21,24],custom:[6,19],cut:[15,24],data:[2,3,4,5,6,8,15,24],data_dump_and_valid:6,data_id:[4,5,8],data_path:4,databas:[5,6,8],database_setup:5,datafrompathjobwork:8,datafromrepojobwork:8,dataset:[3,6,9],date:5,datetimefield:5,db_column:5,db_valu:5,debian:10,debug:1,deep:[9,12],deeper:[13,17],deepspars:[7,9,13,15,17,24],default_epochs_distribut:4,default_pruning_set:4,default_train:4,defin:[17,18,24],degre:11,delet:[5,6,23],delete_filesystem:5,deleteprojectmodelschema:6,deleteprojectschema:6,dens:17,depend:[16,17,20],deploi:[9,12,16],deploy:[12,16],depth:[10,11,24],desc:6,describ:[16,19,20,22],descript:[5,15,21],deseri:6,desir:11,detail:[4,12,13,14,16,19,24],detect:16,determin:[15,16,17,19,20,24],dev:9,devic:3,dialog:[15,16,17,18,19,23],dict:[4,6,7,8],dictionari:[4,6,7,8],did:[18,22,24],differ:[11,17,19,20,22],differenti:15,dir_path:5,dir_siz:5,direct:9,directori:5,disabl:[16,20],discuss:12,disk:22,displai:[11,15,16,17,19,20,23,24,25],distribut:4,doc:9,document:[9,11,15],doe:[15,24],doesnotexist:5,doing:[8,20],domain:[6,16],done:[18,19,24],down:[17,20],download:[8,9,11],drag:15,dramat:24,drill:17,driven:9,drop:20,dump:6,dump_field:6,dump_onli:6,duplic:12,dure:[13,14,15,18,19,21,22,24],each:[4,8,14,16,17,18,19,24],easi:[9,11,12],easili:[9,11,15],ecosystem:15,edit:[9,11,19,23],editor:19,effect:[4,9,11,14,15,16,17,18],effici:17,either:[8,11,15,17,20],emgin:15,emploi:24,empti:15,enabl:[9,11,14,15,17,20,24],encod:[9,22],encompass:9,encount:[4,6,7],end:[4,9,15,19,24],end_epoch:[4,5],engin:[7,9,12,13,17,24,25],ensur:13,enter:[11,15,16,17,18,19,23],entir:[17,18],entri:11,enum_class:6,enumer:5,enumfield:6,environ:[10,11,12],epoch:[4,19,24],equat:17,error:[0,1,4,5,7,8,12],errorschema:6,est_loss_sensit:5,est_perf_sensit:5,est_recoveri:5,est_tim:5,est_time_baselin:5,est_time_gain:5,establish:19,estim:[4,6,11,14,15,17,18,19,20,24],etc:[4,7],eval_baselin:4,eval_prun:4,eval_sensitivity_spars:4,evalu:4,even:17,event:6,eventu:16,everi:19,everyth:9,exact:18,examin:20,exampl:[9,12,14,15,16,17,18,19,20,22,24],except:[4,7,8],exchang:[15,24],exclud:6,execut:[16,24],exist:[5,9,12,14,20,21,25],exit:8,expect:[4,5,6,19],experi:[10,12],explor:[21,22],exponenti:6,extend:[5,8],extens:17,extern:1,extract:8,factor:17,fals:[4,5,6,8],fast:[9,16,17,24],faster:[9,14,17,19,24],fastest:17,featur:[9,12,15,22,23,25],fed:24,feedback:[9,19,25],few:[9,11],fewer:[17,18,24],fft:9,field:[4,5,6,8,11,15],field_nam:6,field_typ:5,file:[1,2,5,6,8,9,11,12,14,15,19,21,25],file_path:5,filestorag:5,fill:11,filter:[6,19],filter_min_perf_gain:[4,5],filter_min_recoveri:[4,5],filter_min_spars:[4,5],final_lr:5,find:11,fine:[4,19],fine_tuning_epoch:4,fine_tuning_start_epoch:4,finish:11,five:14,fix:20,flask:[2,4,5,6],floatfield:5,flop:[5,16,17,19,24],flops_baselin:5,flops_gain:5,flow:[9,11,14,21,22,24],focus:[11,24],folder:5,follow:[11,12,13,15,17,18,19,24],footprint:9,foreignkeyfield:5,format:[1,8,9,11,15,22],format_arg:8,found:[4,8,9],framework:[4,6,11,12,17,21],frequenc:19,from:[1,4,5,6,8,9,10,11,12,15,16,17,18,19,20,23,24,25],full:9,further:[11,15,16,18],futur:[11,14,18,19,24],gemm:24,gener:[4,9,14,16,17,19,21,22,23,24],get:[4,6,13,14,15,17,19,20],get_main_logg:1,get_ml_sys_info:7,get_profiles_by_id:4,get_project_benchmark_by_id:4,get_project_by_id:4,get_project_data_by_id:4,get_project_model_by_project_id:4,get_project_optimizer_by_id:4,get_root_logg:1,get_typ:8,getprojectoptimizationbestestimatedresultsschema:6,github:[9,12],give:[9,19],given:[4,5,8,11,24],global_end_epoch:4,global_start_epoch:4,globalaveragepool:24,goal:[12,14,15,19,21],going:[15,17,24],good:5,gpu:9,grai:16,grain:19,graph:[17,18,19,20,24],greater:[4,19,24],guid:[9,10,15],handl:[1,2,5,6,8],happen:[11,19],has:[17,18,24],have:[4,5,11,18,20,24],held:17,help:[9,17,19,25],help_text:5,helper:[0,1,2],here:[20,24],higher:[19,24],home:11,host:[1,9,11],how:[9,14,15,16,17,18,19,21,24],howev:19,http:[9,11],httpnotfounderror:4,icon:[17,18,19],identifi:[16,17,18],ids:4,imag:16,img:9,implement:[8,9,24],improv:[9,12,14],includ:[5,6,7,8,9,12,14,17,18,21,22,23,24],inclus:11,increas:17,increasingli:19,independ:24,index:5,index_typ:5,indic:[14,16,17,18,19,20,24],individu:24,induc:9,industri:[9,12],infer:[7,8,9,11,14,15,16,17,24,25],inference_engin:8,inference_model:[5,8],inference_model_optim:8,info:[1,4,6,7],inform:[8,9,11,12,14,15,16,17,18,19,20,22,23,24,25],init:5,init_lr:5,initi:[4,5,13,19,21],input:[4,6,16,24],insight:[9,12],instal:[9,11,12,25],instanc:[5,8],instant:19,instead:[4,17,19],instruct:[7,8,15,17,24],instruction_set:[5,8],integ:5,integerfield:5,integr:[9,11,12,13,19,20,22,25],intellig:15,intens:[17,24],interact:11,intern:24,invalid:6,invok:4,involv:14,issu:12,item:[17,24],iter:8,iterations_per_check:[5,8],its:[4,6,11],job:[0,1,8],job_id:[5,8],jobcancelationfailureerror:8,jobdoesnotexist:5,jobnotfounderror:8,jobprogressschema:6,jobschema:6,jobstatu:5,jobstatusfield:5,jobwork:8,jobworkermanag:8,jobworkerregistri:8,join:4,json_dump:5,json_load:5,jsonfield:5,just:[15,16],keep:10,kei:[6,9,22,23,25],keyword:6,know:[15,24],kwarg:[5,6,8],lai:19,larg:[5,16],larger:[17,24],last:[8,18],latenc:16,later:[8,19],latest:[12,14,19,24],launch:[8,9,11,25],layer:[9,14,15,16,19,24,25],learn:[5,12,14,22,24,25],least:18,left:[11,13,15,19,23],less:[4,19],level:[1,4,5,9,19],light:12,like:16,limit:[9,11,17],line:[9,11],linux:10,list:[4,5,7,8,13,14,15,17,18,19,24],listobjfield:5,load:[5,6,9,11,15,17],load_field:6,load_onli:6,loadabl:6,local:[5,8,9,11,15],locat:[5,11],log:[0,9,24],logger:1,logging_level:1,longer:[17,24],look:[15,16,17,24],loss:[2,3,4,5,6,8,9,11,12,14,15,16,17,19,24,25],loss_analysi:4,losswrapp:3,low:24,lower:[11,22],lr_mod:[4,5],lr_sched:4,lr_schedule_modifi:5,ma_field:6,machin:[11,24],magic:[8,9,12,13,17,24],mai:[5,11,12,16,17,18,20,24],main:[1,11],maintain:12,major:17,make:[2,14,18,19,23,24],manag:[0,1,24],mani:[6,14,16,18,24],map:6,mark:8,marshmallow:6,mask_typ:[4,5],matter:4,max_node_spars:4,max_work:8,maxim:[4,13],maximum:8,maxpool:24,mean:24,measur:[6,8,14,16,17,18,19,20,24],memori:24,mention:[22,23],menu:20,messag:[12,16],metadata:[6,24],method:11,metric:[4,6,9,11,16,20],might:[14,16,17,18,20],millisecond:[17,24],minim:[14,21],minimum:19,minut:9,miss:6,ml_engines_error:7,mod:4,mod_end_epoch:4,mod_start_epoch:4,modal:11,model:[0,1,2,3,4,6,8,9,12,13,14,15,20,21,22,24,25],model_analysi:4,model_id:[5,8],model_path:4,model_repo:[0,1],modelfrompathjobwork:8,modelfromrepojobwork:8,modelrepoarchitectureschema:6,modelrepodatasetschema:6,modelrepodomainschema:6,modelrepomodeldescschema:6,modelrepomodelmetricschema:6,modelrepomodelperfschema:6,modelrepomodelschema:6,modif:19,modifi:[2,4,5,6,14,21,22,24,25],modifier_id:5,modul:[0,9],more:[11,13,17,18,19,24],most:[11,17,18,20],move:[11,19],much:[11,15,16,18,19,24],multi:6,multipl:[5,6,13,15,16,19,20],must:[4,5,8,11,19],name:[4,5,8,15,16,17,18,24],namespac:1,natur:9,navig:[11,13,14,15,17,18,23],nearli:9,need:[9,11,12,14,15,19,21],nest:5,network:[1,9,11,12,15,18,24],neural:[1,8,9,12,13,15,17,24],neuralmag:9,next:[8,12,13,14,15,16,17,18,19,20,21,22,23,24],nightli:9,node:[4,5,6,17,24],node_overrid:4,none:[4,5,6,8],note:[5,11,15,16,17,18,19,20],notic:19,npz:8,number:[4,7,8,16,18,19,24],numer:16,object:[4,5,6,8,16],occur:[6,9,19],offici:9,offlin:16,often:19,oldest:8,onc:[8,11,14,15,19],one:[5,8,11,15,19,24],one_shot:8,ones:8,onli:[4,5,6,8,9,11,15,19],onlin:16,onnx:[7,8,11,14,15,17,20,24],onscreen:12,onto:8,open:[9,11,14,19,24,25],oper:[9,12,16,17,24],ops:[6,24],opt:[6,16],optim:[1,2,3,4,5,6,9,12,13,15,16,17,18,20,21,24,25],optim_const:3,optim_id:[4,5],optim_lr_sched_default_mod:4,optim_lr_sched_updat:4,optim_pruning_updat:4,optim_trainable_default_nod:4,optim_trainable_updat:4,optim_updat:4,optim_validate_and_get_project_by_id:4,optimepoch:4,option:[1,4,5,6,8,11,12,13,14,16,17,19,20],order:[10,20,24],origin:[4,11,15,17,18,19,20,23,24],ort:[17,24],ort_cpu:7,ort_gpu:7,other:[1,17,19,24],otherwis:[4,8,19],out:[8,10,11,15,16,19,24],over:9,overprecis:9,overrid:[4,5,22],overview:[13,25],own:10,packag:[0,9,11],page:[11,13,15],parallel:8,param:[5,9,19,24,25],paramet:[1,4,5,6,8,14,16,18,24],parameter:9,params_baselin:5,part:[15,21,24],partial:6,pass:[6,15,24],path:[5,6,8,11,12,15],peewe:5,pend:[5,8],per:[16,17,19,24],percentag:[17,24],perf:[4,5,6,8],perf_analysi:4,perform:[2,4,5,6,8,9,11,12,13,14,15,16,18,19,24,25],perhap:22,pip:[10,13],pipelin:11,place:[11,19],plan:12,platform:21,playhous:5,pleas:12,plu:9,point:[8,11,16,17,24],pool:24,popup:11,port:[1,11],portion:[1,17],possibl:[17,19,24],post:12,potenti:[7,9,12,14,17],practic:14,practition:[9,12],present:[8,24],preset:19,previou:15,primary_kei:5,problem:17,procedur:15,process:[8,9,12,19,21,22,24],product:[9,12],profil:[2,4,5,6,8,9,11,14,15,19,24,25],profile_id:[5,8],profile_loss:[4,5],profile_loss_id:[4,5],profile_perf:[4,5],profile_perf_id:[4,5],profiles_loss:5,profiles_perf:5,program:24,progress:[5,6,8],project:[0,1,8,9,14,16,17,18,22,23,24,25],project_data:8,project_id:[4,5,8],projectavailablemodelmodificationsschema:6,projectbenchmark:[4,5],projectbenchmark_set:5,projectbenchmarkdoesnotexist:5,projectbenchmarkresultschema:6,projectbenchmarkresultsschema:6,projectbenchmarkschema:6,projectdata:[4,5],projectdata_set:5,projectdatadoesnotexist:5,projectdataschema:6,projectdoesnotexist:5,projectextschema:6,projectlossprofil:[4,5],projectlossprofile_set:5,projectlossprofiledoesnotexist:5,projectlossprofileschema:6,projectmodel:[4,5],projectmodel_set:5,projectmodelanalysisschema:6,projectmodeldoesnotexist:5,projectmodelschema:6,projectoptim:[4,5],projectoptimization_set:5,projectoptimizationdoesnotexist:5,projectoptimizationmodifierestimationsschema:6,projectoptimizationmodifierlrexponentialargsschema:6,projectoptimizationmodifierlrmultistepargsschema:6,projectoptimizationmodifierlrschedul:[4,5],projectoptimizationmodifierlrscheduledoesnotexist:5,projectoptimizationmodifierlrscheduleschema:6,projectoptimizationmodifierlrschema:6,projectoptimizationmodifierlrsetargsschema:6,projectoptimizationmodifierlrstepargsschema:6,projectoptimizationmodifierprun:[4,5],projectoptimizationmodifierpruningdoesnotexist:5,projectoptimizationmodifierpruningnodemetadataschema:6,projectoptimizationmodifierpruningnodeschema:6,projectoptimizationmodifierpruningschema:6,projectoptimizationmodifierquant:5,projectoptimizationmodifierquantizationdoesnotexist:5,projectoptimizationmodifierquantizationnodeschema:6,projectoptimizationmodifierquantizationschema:6,projectoptimizationmodifiertrain:[4,5],projectoptimizationmodifiertrainabledoesnotexist:5,projectoptimizationmodifiertrainablenodeschema:6,projectoptimizationmodifiertrainableschema:6,projectoptimizationschema:6,projectperfprofil:[4,5],projectperfprofile_set:5,projectperfprofiledoesnotexist:5,projectperfprofileschema:6,projectprofileanalysisschema:6,projectprofilemeasurementschema:6,projectprofilemeasurementsschema:6,projectprofilemodelopsbaselinemeasurementsschema:6,projectprofilemodelopsmeasurementsschema:6,projectprofileopbaselinemeasurementschema:6,projectprofileopmeasurementsschema:6,projectprofileopschema:6,projectprofileschema:6,projects_benchmark:[0,1],projects_data:[0,1],projects_model:[0,1,4],projects_optim:[0,1],projects_optimizations_prun:[1,2],projects_profil:[0,1,4],projectschema:6,proper:[8,11],properli:11,properti:[4,5,8],provid:[4,7,11,12,14,16,17,18,19,22],prunabl:[4,18],prune:[4,5,6,8,9,14,16,17,18,22,24,25],pruning_end_epoch:4,pruning_epoch:4,pruning_estim:[5,8],pruning_estimation_typ:[5,8],pruning_modifi:5,pruning_set:4,pruning_start_epoch:4,pruning_structur:[5,8],pruning_update_frequ:4,pruningmodelevalu:4,pruningset:4,put:[8,14],pypi:9,python:10,python_valu:5,pytorch:[3,15,17,21],pytorch__integr:[1,2],pytorch__train:[1,2],quantiz:[5,6,8,9,14,18,19,24],quantization_modifi:5,quantized_estim:[5,8],quantized_estimation_typ:5,queri:6,quick:9,quickli:[14,16],rais:[4,6,8],raise_not_found:4,ran:[17,19,20,24],rang:[18,19,24],rapidli:[9,12],rate:[5,22,24,25],rather:[14,15,19,20,24],raw:6,read:11,readi:[16,17,18,19],real:16,recent:20,recip:9,recommend:[4,10],recov:[9,18,19,22,24],recoveri:[4,9,11,18,19,24],redistribut:19,reduc:[18,24],reduct:[18,24],redund:[9,18],refer:15,referenc:16,reflect:23,refresh:[5,8],registri:8,rel:[18,19],relat:[2,6,8],releas:[11,12,18],relev:24,relu:24,remot:[11,15],remov:[9,19,24,25],repo:[2,6,8],report:6,repositori:[9,10,11],repres:[17,19],reproduc:12,request:[2,9,12],requir:[6,11,12,16,19,24],research:[9,12],respond:[14,18,24],respons:6,responsejobschema:6,responsejobsschema:6,responsemodelrepomodel:6,responseprojectbenchmarkdeletedschema:6,responseprojectbenchmarkschema:6,responseprojectbenchmarksschema:6,responseprojectdatadeletedschema:6,responseprojectdataschema:6,responseprojectdatasingleschema:6,responseprojectdeletedschema:6,responseprojectextschema:6,responseprojectlossprofileschema:6,responseprojectlossprofilesschema:6,responseprojectmodelanalysisschema:6,responseprojectmodeldeletedschema:6,responseprojectmodelschema:6,responseprojectoptimizationdeletedschema:6,responseprojectoptimizationframeworksavailablesamplesschema:6,responseprojectoptimizationframeworksavailableschema:6,responseprojectoptimizationmodifierdeletedschema:6,responseprojectoptimizationmodifiersavail:6,responseprojectoptimizationmodifiersbestestim:6,responseprojectoptimizationschema:6,responseprojectoptimizationsschema:6,responseprojectperfprofileschema:6,responseprojectperfprofilesschema:6,responseprojectprofiledeletedschema:6,responseprojectschema:6,responseprojectsschema:6,responsesysteminfo:6,restructur:24,result:[4,5,6,9,11,15,18,19,20,22,25],retain:16,retrain:[11,14,17,18,19,24],retriev:[4,8],review:[9,14,16,22,23,25],rewrit:21,right:[11,13,19,20],root:[1,5],root_path:5,rough:18,rout:[2,6],rule:11,run:[1,6,8,9,11,13,14,15,16,17,18,22,24,25],runtim:[17,20,24],same:[9,15,20],sampl:[5,6,11,24],satisfi:[14,19],save:[5,8,19,22,23],scale:[9,12,20],scenario:20,schedul:[4,5,6,19,24],schema:[0,1],schemaopt:6,scheme:16,screen:[9,11,15,17,19,21,25],screenshot:12,script:[1,11],scroll:19,search:6,searchjobsschema:6,searchmodelrepomodel:6,searchprojectbenchmarksschema:6,searchprojectdataschema:6,searchprojectoptimizationsschema:6,searchprojectprofilesschema:6,searchprojectsschema:6,second:[16,17,24],section:[14,17,18,19,23],see:[11,16,17,18,19,20,24],select:[11,15,16,17,19,20,24],sens:19,sensit:[9,11,14,24,25],separ:[11,24],sequenc:[5,6,24],sequenti:24,serial:6,serv:2,server:[1,2,5,6,8,11,15],set:[1,4,5,6,7,8,9,11,14,15,17,18,19,20,22,24,25],set_logging_level:1,setprojectdatafromschema:6,setprojectmodelfromschema:6,setup:[1,2,3,4,5,16,17,18,24],setup_filesystem:5,sever:15,share:12,shot:11,should:[4,8,16,19,22,24],show:[9,11,14,16,17,18,19,20,24],shown:[11,17],shuffl:[17,24],shutdown:8,side:15,signific:[17,18],significantli:[9,18,24],simpl:[9,14],simpli:[15,24],simplifi:[9,12],sinc:8,singl:[6,9,13,24,25],size:[5,8,9,12,15,16,17,18,20,24],slide:[9,12],slider:19,smaller:[9,14,17,24],smallest:24,softmax:24,softwar:12,some:[6,11],sort:20,sourc:[1,3,4,5,6,7,8,9,15],space:15,spars:[9,14,17,19],sparse_training_avail:4,sparseml:[3,9,11,15,21],sparsezoo:9,sparsif:[11,24],sparsifi:[10,11,15,16,17,18,21,22,23,24,25],sparsiti:[4,5,9,19,22,24],special:15,specif:[4,6,17,20,24],specifi:[11,18,19,20,24],speed:[9,11,12,17],speedup:[17,19,24],spent:17,sqlite_ext:5,src:9,stabil:19,stabilization_epoch:4,stabl:9,stage:19,standard:1,start:[4,5,8,9,11,15,19,20,24,25],start_epoch:[4,5],start_fine_tuning_epoch:4,state:5,statu:5,step:[4,6,8,12,13,14,16,17,18,19,20,21,22,23,24],stop:8,storag:5,store:[5,6,8],str:[1,3,4,5,6,7,8],string:[5,6],sub:8,subclass:8,subgraph:17,submit:12,submodul:[0,9],subpackag:[0,9],subsequ:8,substitut:11,suggest:12,suit:[9,12],summari:[9,12,24,25],support:[9,12],sure:12,svg:9,system:[0,1,5,8,10,11,16,19,22,24],systeminfo:6,tabl:19,take:[9,16,17,19,24],taken:19,tar:8,target:[11,16],techniqu:[9,12,14,19,24],tell:24,tensor:3,tensorflow:[15,17,21],tensorflow__integr:[1,2],term:[9,22,23,25],termin:11,test:10,textfield:5,than:[4,14,15,19,20,24],thei:[14,16,20,24],theoret:[16,17,24],therefor:[11,16,17],thi:[4,8,9,10,11,14,15,16,17,18,19,20,21,22,23,24,25],those:[11,17,18,19,24],thread:24,threadpoolexecutor:8,three:[14,17,18,19,20,23],through:[8,11,17,19,24],throughout:[15,16,19,22,23],throughput:[16,17],tied:[17,19],time:[4,8,14,15,16,17,19,24],timestamp:5,to_dict_valu:4,took:[17,24],tool:[9,12,24],tooltip:19,top:[9,19],torch:3,total:[18,24],tour:[9,10],track:5,train:[3,4,9,11,14,21,22,23,24,25],train_dataset:3,train_setup:3,trainabl:[4,5,6],trainable_modifi:5,training_epoch:[4,5],training_final_lr:4,training_init_lr:4,training_lr_fin:5,training_lr_init:5,training_optim:5,transfer:[14,19],tune:[4,19],tupl:4,twice:19,two:[17,18,19,20],type:[5,6,8,11,13,17,18,19,24],type_:5,typic:24,ui_path:1,ultim:9,under:[8,11],understand:15,unindex:5,union:[3,4,6,8],uniqu:[5,15,24],unknown:6,unspecifi:15,unstructur:8,unsur:16,updat:[4,5,6,8,19],update_frequ:[4,5],updateprojectoptimizationschema:6,upload:[6,11,14,15],upper:[11,13],uri:[6,8],url:[6,8,11,15],use:[4,6,8,9,11,12,15,16,19,20,22,24],used:[1,5,6,11,12,15,16,17,18,19,20,22,24],user:[4,9,10,12,24],uses:24,using:[9,10,11,12,14,15,19,20,24],util:[0,1,2,3,17,24],val_dataset:3,valid:[4,5,6,7],validate_filesystem:5,validate_model_data:4,validate_pruning_nod:4,validationerror:6,valu:[4,5,6,7,14,17,18,19,20,22,24],valuabl:[15,24],varchar:5,vari:17,variou:[16,17,24],verbose_nam:5,veri:18,version:[12,14,15,20],versioninfoschema:6,via:9,view:[16,24],virtual:10,visit:11,visual:[9,12,17,19],vnni:24,wai:[12,15,16,17,19,24],want:[14,15,16,17,18,19,20,24],warmup:8,warmup_iterations_per_check:[5,8],web:11,websit:9,week:9,weight:[17,18,24],weight_magnitud:8,welcom:[9,25],well:[16,17,18,24],went:17,were:[17,18,24],what:[4,17,19,24],when:[4,7,9,14,15,16,17,18,19,24],where:[5,9,11,17,24],which:[13,15,16,17,18,19,20,24],who:[9,12],width:[9,24],window:11,winograd:9,within:[8,24],without:[8,14,16,17,24],won:8,work:[5,6,8,14,21],worker:[0,1,6],worker_ack:5,worker_arg:5,workflow:[9,11,14],working_dir:[1,3,5],would:[5,17,18,22],yaml:4,yet:20,yml:[14,21,22],you:[9,11,12,14,15,16,17,18,19,20,21,22,23,24,25],your:[9,10,11,12,13,14,15,17,18,19,20,21,22,23,24],zero:18},titles:["sparsify","sparsify package","sparsify.blueprints package","sparsify.blueprints.code_samples package","sparsify.blueprints.utils package","sparsify.models package","sparsify.schemas package","sparsify.utils package","sparsify.workers package","Sparsify 0.1","Installation","Quick Tour","Welcome to Sparsify","Installing and Launching Sparsify","Sparsify Overview","Analyze","Profiling Your Model","Reviewing Performance Profiles","Reviewing Loss Profiles","Optimize","Benchmarking","Integrate","Optimization Config File and Code for Optimization","Settings","Key Concepts/Features/Terms","User Guide"],titleterms:{"export":[11,19],"new":[11,13,15,17,18],Adding:[17,18],about:12,addit:13,analyz:[11,14,15],app:1,base:[5,8],benchmark:20,blueprint:[2,3,4],can:13,code:22,code_sampl:3,compar:20,concept:24,config:22,content:[1,2,3,4,5,6,7,8],count:18,creat:15,displai:13,engin:20,error:[2,6],exist:[13,15],featur:24,feedback:12,file:22,from:13,guid:[12,25],help:12,helper:[4,6],histori:9,infer:20,inform:13,instal:[10,13],integr:[14,21],job:[2,5,6],kei:24,launch:13,layer:[17,18],learn:[9,19],log:1,loss:18,manag:8,model:[5,11,16,17,18,19],model_repo:[2,6],modifi:19,modul:[1,2,3,4,5,6,7,8],more:9,open:[13,15],optim:[11,14,19,22],overview:[9,14],packag:[1,2,3,4,5,6,7,8],param:18,perform:17,profil:[16,17,18],project:[2,4,5,6,11,13,15],projects_benchmark:[2,4,5,6,8],projects_data:[2,4,5,6,8],projects_model:[2,5,6,8],projects_optim:[2,4,5,6],projects_optimizations_prun:4,projects_profil:[2,5,6,8],prune:19,pytorch__integr:3,pytorch__train:3,quick:11,rate:19,recip:11,releas:9,remov:20,resourc:9,result:17,review:[17,18],run:[19,20],schema:6,screen:13,sensit:18,set:23,singl:20,sparsif:9,sparsifi:[0,1,2,3,4,5,6,7,8,9,12,13,14,19],start:13,submodul:[1,2,3,4,5,6,7,8],subpackag:[1,2],summari:[17,18,19],system:[2,6,7],tensorflow__integr:3,term:24,thi:12,tour:11,train:19,user:25,util:[4,5,7],welcom:12,worker:8,you:13,your:16}}) \ No newline at end of file diff --git a/sparsify/userguide/01-intro.html b/sparsify/userguide/01-intro.html index 180aa6eec90..58b71d3c719 100644 --- a/sparsify/userguide/01-intro.html +++ b/sparsify/userguide/01-intro.html @@ -124,10 +124,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/userguide/02-install-sparsify.html b/sparsify/userguide/02-install-sparsify.html index 60210611fb1..6bed7c9d425 100644 --- a/sparsify/userguide/02-install-sparsify.html +++ b/sparsify/userguide/02-install-sparsify.html @@ -128,10 +128,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/userguide/03-sparsify-overview.html b/sparsify/userguide/03-sparsify-overview.html index 97ba218a770..df372a099f2 100644 --- a/sparsify/userguide/03-sparsify-overview.html +++ b/sparsify/userguide/03-sparsify-overview.html @@ -125,10 +125,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/userguide/04-analyze.html b/sparsify/userguide/04-analyze.html index 8af3cd40853..e6f3f096b48 100644 --- a/sparsify/userguide/04-analyze.html +++ b/sparsify/userguide/04-analyze.html @@ -124,10 +124,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/userguide/04a-profiling-your-model.html b/sparsify/userguide/04a-profiling-your-model.html index 93a408c222c..42951dfb669 100644 --- a/sparsify/userguide/04a-profiling-your-model.html +++ b/sparsify/userguide/04a-profiling-your-model.html @@ -104,10 +104,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/userguide/04b-reviewing-performance-profiles.html b/sparsify/userguide/04b-reviewing-performance-profiles.html index 1dfa973338e..cc551a82802 100644 --- a/sparsify/userguide/04b-reviewing-performance-profiles.html +++ b/sparsify/userguide/04b-reviewing-performance-profiles.html @@ -126,10 +126,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/userguide/04c-reviewing-loss-profiles.html b/sparsify/userguide/04c-reviewing-loss-profiles.html index c829450020c..6392d9a1dfb 100644 --- a/sparsify/userguide/04c-reviewing-loss-profiles.html +++ b/sparsify/userguide/04c-reviewing-loss-profiles.html @@ -128,10 +128,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/userguide/05-optimize.html b/sparsify/userguide/05-optimize.html index 86a00dbc401..2ae8e8e4484 100644 --- a/sparsify/userguide/05-optimize.html +++ b/sparsify/userguide/05-optimize.html @@ -129,10 +129,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/userguide/05a-benchmark.html b/sparsify/userguide/05a-benchmark.html index 42b57cedf42..bd6b6de3025 100644 --- a/sparsify/userguide/05a-benchmark.html +++ b/sparsify/userguide/05a-benchmark.html @@ -126,10 +126,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/userguide/06-integrate.html b/sparsify/userguide/06-integrate.html index 0bfc0eee805..f875a440be8 100644 --- a/sparsify/userguide/06-integrate.html +++ b/sparsify/userguide/06-integrate.html @@ -120,10 +120,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/userguide/06a-optimize-config.html b/sparsify/userguide/06a-optimize-config.html index db07c422fff..bd6cb3dd3c1 100644 --- a/sparsify/userguide/06a-optimize-config.html +++ b/sparsify/userguide/06a-optimize-config.html @@ -120,10 +120,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/userguide/07-settings.html b/sparsify/userguide/07-settings.html index 5c9d5948be5..b7c18e2b296 100644 --- a/sparsify/userguide/07-settings.html +++ b/sparsify/userguide/07-settings.html @@ -120,10 +120,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/userguide/08-key-terms.html b/sparsify/userguide/08-key-terms.html index b3b4bcbe0b9..7f8958adcc0 100644 --- a/sparsify/userguide/08-key-terms.html +++ b/sparsify/userguide/08-key-terms.html @@ -120,10 +120,11 @@ -

      Help and Support

      +

      Help

      diff --git a/sparsify/userguide/images/image_1.jpg b/sparsify/userguide/images/image_1.jpg deleted file mode 100644 index e42bc9bcbab..00000000000 Binary files a/sparsify/userguide/images/image_1.jpg and /dev/null differ diff --git a/sparsify/userguide/images/image_14.jpg b/sparsify/userguide/images/image_14.jpg deleted file mode 100644 index c326763208e..00000000000 Binary files a/sparsify/userguide/images/image_14.jpg and /dev/null differ diff --git a/sparsify/userguide/images/image_20.jpg b/sparsify/userguide/images/image_20.jpg deleted file mode 100644 index a36271df839..00000000000 Binary files a/sparsify/userguide/images/image_20.jpg and /dev/null differ diff --git a/sparsify/userguide/images/image_26.jpg b/sparsify/userguide/images/image_26.jpg deleted file mode 100644 index c45363b5b9b..00000000000 Binary files a/sparsify/userguide/images/image_26.jpg and /dev/null differ diff --git a/sparsify/userguide/images/image_28.jpg b/sparsify/userguide/images/image_28.jpg deleted file mode 100644 index 0c649672b60..00000000000 Binary files a/sparsify/userguide/images/image_28.jpg and /dev/null differ diff --git a/sparsify/userguide/images/image_34.jpg b/sparsify/userguide/images/image_34.jpg deleted file mode 100644 index 4880a6cf717..00000000000 Binary files a/sparsify/userguide/images/image_34.jpg and /dev/null differ diff --git a/sparsify/userguide/images/image_60.jpg b/sparsify/userguide/images/image_60.jpg deleted file mode 100644 index 368c65a1dde..00000000000 Binary files a/sparsify/userguide/images/image_60.jpg and /dev/null differ diff --git a/sparsify/userguide/images/image_7.jpg b/sparsify/userguide/images/image_7.jpg deleted file mode 100644 index d9f0506950c..00000000000 Binary files a/sparsify/userguide/images/image_7.jpg and /dev/null differ diff --git a/sparsify/userguide/index.html b/sparsify/userguide/index.html index 481f8c32edb..342aeca7ca3 100644 --- a/sparsify/userguide/index.html +++ b/sparsify/userguide/index.html @@ -120,10 +120,11 @@ -

      Help and Support

      +

      Help

      +
      • sparsify.models.base @@ -1889,8 +1848,6 @@

        S

      • module
      -
      • sparsify.models.jobs @@ -2060,17 +2017,10 @@

        S

    • - sparsify.workers.base_manager - -
    • -
    • - sparsify.workers.base_wrapper + sparsify.workers.manager
    • @@ -2107,7 +2057,7 @@

      S

    • stabilization_epochs() (sparsify.blueprints.utils.projects_optimizations.OptimEpochs property)
    • -
    • start() (sparsify.workers.base_wrapper.JobWorkerWrapper method) +
    • start() (sparsify.workers.manager.JobWorkerManager method)
    • start_epoch (sparsify.models.projects_optimizations.ProjectOptimization attribute) @@ -2124,12 +2074,8 @@

      S

    • start_epoch() (sparsify.blueprints.utils.projects_optimizations.OptimEpochs property)
    • started (sparsify.models.jobs.JobStatus attribute) -
    • -
    • started() (sparsify.workers.base_wrapper.JobWorkerWrapper property)
    • status (sparsify.models.jobs.Job attribute) -
    • -
    • structurally_pruned() (sparsify.blueprints.utils.projects_optimizations_pruning.PruningNodeEvaluator property)
    • SystemInfo (class in sparsify.schemas.system)
    • @@ -2217,7 +2163,7 @@

      W

        -
      • worker() (sparsify.workers.base_wrapper.JobWorkerWrapper property) +
      • worker_ack (sparsify.models.jobs.Job attribute)
      • worker_args (sparsify.models.jobs.Job attribute)
      • diff --git a/sparsify/index.html b/sparsify/index.html index d5c03549c68..7942c310faa 100644 --- a/sparsify/index.html +++ b/sparsify/index.html @@ -105,10 +105,11 @@ -

        Help and Support

        +

        Help

        @@ -178,7 +179,7 @@

        Sparsify 0.1

        -

        Neural network model repository for highly sparse models and optimization recipes

        +

        Easy-to-use UI for automatically sparsifying neural networks and creating sparsification recipes for better inference performance and a smaller footprint

        GitHub @@ -203,39 +204,45 @@

        Sparsify 0.1

        Overview

        -

        Sparsify is a deep learning autoML tool that simplifies the model optimization process to rapidly achieve the best combination of size, speed, and accuracy on any deep learning model. Sparsify optimizes and benchmarks models informed by industry research insights for ML practitioners, including ML engineers and operators, who need to deploy performant deep learning models fast and at scale. Sparsify shows visual performance potential for your model, including a sliding scale between performance and loss sensitivity, ultimately speeding up the model optimization process from weeks to minutes.

        -

        This repository contains the package to locally launch Sparsify where you can create projects to load and optimize your deep learning models. At the end, you can export optimization recipes to integrate with your training workflow.

        +

        Sparsify is an easy-to-use UI tool that simplifies the deep learning model optimization process to rapidly achieve the best combination of size, speed, and accuracy. +Sparsify sparsifies and benchmarks models informed by industry research insights for ML practitioners, including ML engineers and operators, who need to deploy performant deep learning models fast and at scale. +Sparsify shows visual performance potential for your model, including a sliding scale between performance and recovery, ultimately speeding up the model sparsification process from weeks to minutes.

        +

        This repository contains the package to locally launch Sparsify where you can create projects to load and sparsify your deep learning models. +At the end, you can export sparsification recipes to integrate with your training workflow.

        -
          - sparsify.workers.base_manager -
          - sparsify.workers.base_wrapper + sparsify.workers.manager