diff --git a/setup.py b/setup.py
index 95aa8da66..e5133b71a 100644
--- a/setup.py
+++ b/setup.py
@@ -51,7 +51,7 @@
     "pyyaml>=5.1.0",
     "requests>=2.0.0",
     "tqdm>=4.0.0",
-    "pydantic>=1.8.2,<2.0.0",
+    "pydantic>=2.0.0,<2.8",
     "click>=7.1.2,!=8.0.0",  # latest version < 8.0 + blocked version with reported bug
     "protobuf>=3.12.2",
     "pandas>1.3",
diff --git a/src/sparsezoo/analyze_v1/analysis.py b/src/sparsezoo/analyze_v1/analysis.py
index 96f4e264a..1578cbd2e 100644
--- a/src/sparsezoo/analyze_v1/analysis.py
+++ b/src/sparsezoo/analyze_v1/analysis.py
@@ -27,7 +27,7 @@
 import numpy
 import yaml
 from onnx import ModelProto, NodeProto
-from pydantic import BaseModel, Field, PositiveFloat, PositiveInt
+from pydantic import BaseModel, ConfigDict, Field, PositiveFloat, PositiveInt
 
 from sparsezoo import Model
 from sparsezoo.analyze_v1.utils.helpers import numpy_array_representer
@@ -200,6 +200,7 @@ class BenchmarkScenario(YAMLSerializableBaseModel):
     )
 
     num_cores: Optional[int] = Field(
+        None,
         description="The number of cores to use for benchmarking, can also take "
         "in a `None` value, which represents all cores",
     )
@@ -311,9 +312,7 @@ class NodeAnalysis(YAMLSerializableBaseModel):
     zero_point: Union[int, numpy.ndarray] = Field(
         description="Node zero point for quantization, default zero"
     )
-
-    class Config:
-        arbitrary_types_allowed = True
+    model_config = ConfigDict(arbitrary_types_allowed=True)
 
     @classmethod
     def from_node(
diff --git a/src/sparsezoo/analyze_v1/utils/models.py b/src/sparsezoo/analyze_v1/utils/models.py
index 9ff1979fa..68c4d7f59 100644
--- a/src/sparsezoo/analyze_v1/utils/models.py
+++ b/src/sparsezoo/analyze_v1/utils/models.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 import logging
 import textwrap
-from typing import Dict, List, Optional, Tuple, Union
+from typing import ClassVar, Dict, List, Optional, Tuple, Union
 
 from pydantic import BaseModel, Field
 
@@ -30,6 +30,7 @@
 ]
 
 _LOGGER = logging.getLogger(__name__)
+PrintOrderType = ClassVar[List[str]]
 
 
 class PropertyBaseModel(BaseModel):
@@ -104,11 +105,12 @@ class NodeIO(BaseModel):
 
     name: str = Field(description="Name of the input/output in onnx model graph")
     shape: Optional[List[Union[None, int]]] = Field(
+        None,
         description="Shape of the input/output in onnx model graph (assuming a "
-        "batch size of 1)"
+        "batch size of 1)",
     )
     dtype: Optional[str] = Field(
-        description="Data type of the values from the input/output"
+        None, description="Data type of the values from the input/output"
     )
 
 
@@ -220,9 +222,9 @@ class ParameterComponent(BaseModel):
     """
 
     alias: str = Field(description="The type of parameter (weight, bias)")
-    name: Optional[str] = Field(description="The name of the parameter")
+    name: Optional[str] = Field(None, description="The name of the parameter")
     shape: Optional[List[Union[None, int]]] = Field(
-        description="The shape of the parameter"
+        None, description="The shape of the parameter"
     )
     parameter_summary: ParameterSummary = Field(
         description="A summary of the parameter"
@@ -235,7 +237,7 @@ class Entry(BaseModel):
     A BaseModel with subtraction and pretty_print support
     """
 
-    _print_order: List[str] = []
+    _print_order: PrintOrderType = []
 
     def __sub__(self, other):
         """
@@ -306,7 +308,7 @@ class BaseEntry(Entry):
     sparsity: float
     quantized: float
 
-    _print_order = ["sparsity", "quantized"]
+    _print_order: PrintOrderType = ["sparsity", "quantized"]
 
 
 class NamedEntry(BaseEntry):
@@ -318,7 +320,7 @@ class NamedEntry(BaseEntry):
     total: float
     size: int
 
-    _print_order = ["name", "total", "size"] + BaseEntry._print_order
+    _print_order: PrintOrderType = ["name", "total", "size"] + BaseEntry._print_order
 
 
 class TypedEntry(BaseEntry):
@@ -329,7 +331,7 @@ class TypedEntry(BaseEntry):
     type: str
     size: int
 
-    _print_order = ["type", "size"] + BaseEntry._print_order
+    _print_order: PrintOrderType = ["type", "size"] + BaseEntry._print_order
 
 
 class ModelEntry(BaseEntry):
@@ -338,7 +340,7 @@ class ModelEntry(BaseEntry):
     """
 
     model: str
-    _print_order = ["model"] + BaseEntry._print_order
+    _print_order: PrintOrderType = ["model"] + BaseEntry._print_order
 
 
 class SizedModelEntry(ModelEntry):
@@ -347,8 +349,8 @@ class SizedModelEntry(ModelEntry):
     """
 
     count: int
-    size: int
-    _print_order = ModelEntry._print_order + ["count", "size"]
+    size: Union[int, float]
+    _print_order: PrintOrderType = ModelEntry._print_order + ["count", "size"]
 
 
 class PerformanceEntry(BaseEntry):
@@ -361,7 +363,7 @@ class PerformanceEntry(BaseEntry):
     throughput: float
     supported_graph: float
 
-    _print_order = [
+    _print_order: PrintOrderType = [
         "model",
         "latency",
         "throughput",
@@ -377,7 +379,7 @@ class NodeTimingEntry(Entry):
     node_name: str
     avg_runtime: float
 
-    _print_order = [
+    _print_order: PrintOrderType = [
         "node_name",
         "avg_runtime",
     ] + Entry._print_order
diff --git a/src/sparsezoo/analyze_v2/schemas/distribution_analysis.py b/src/sparsezoo/analyze_v2/schemas/distribution_analysis.py
index 0e44a8ce7..9c271beec 100644
--- a/src/sparsezoo/analyze_v2/schemas/distribution_analysis.py
+++ b/src/sparsezoo/analyze_v2/schemas/distribution_analysis.py
@@ -14,26 +14,26 @@
 
 from typing import Dict, List, Optional
 
-from pydantic import BaseModel, Field, validator
+from pydantic import BaseModel, Field, field_validator
 
 from sparsezoo.analyze_v2.schemas.utils import type_validator
 
 
 class DistributionAnalysisSchema(BaseModel):
     counts: Optional[int] = Field(..., description="Total number of parameters")
-    mean: Optional[float]
-    median: Optional[float]
-    modes: Optional[List]
-    sum_val: Optional[float]
-    min_val: Optional[float]
-    max_val: Optional[float]
-    percentiles: Optional[Dict[float, float]]
-    std_dev: Optional[float]
-    skewness: Optional[float]
-    kurtosis: Optional[float]
-    entropy: Optional[float]
-    bin_width: Optional[float]
-    num_bins: Optional[int]
+    mean: Optional[float] = None
+    median: Optional[float] = None
+    modes: Optional[List] = None
+    sum_val: Optional[float] = None
+    min_val: Optional[float] = None
+    max_val: Optional[float] = None
+    percentiles: Optional[Dict[float, float]] = None
+    std_dev: Optional[float] = None
+    skewness: Optional[float] = None
+    kurtosis: Optional[float] = None
+    entropy: Optional[float] = None
+    bin_width: Optional[float] = None
+    num_bins: Optional[int] = None
     hist: Optional[List[float]] = Field(
         ..., description="Frequency of the parameters, with respect to the bin edges"
     )
@@ -41,6 +41,7 @@ class DistributionAnalysisSchema(BaseModel):
         ..., description="Lower bound edges of each bin"
     )
 
-    @validator("*", pre=True)
+    @field_validator("*", mode="before")
+    @classmethod
     def validate_types(cls, value):
         return type_validator(value)
diff --git a/src/sparsezoo/analyze_v2/schemas/node_analysis.py b/src/sparsezoo/analyze_v2/schemas/node_analysis.py
index 62cf366df..505fc31e8 100644
--- a/src/sparsezoo/analyze_v2/schemas/node_analysis.py
+++ b/src/sparsezoo/analyze_v2/schemas/node_analysis.py
@@ -14,7 +14,7 @@
 
 from typing import List, Optional
 
-from pydantic import BaseModel, Field, validator
+from pydantic import BaseModel, Field, field_validator
 
 from sparsezoo.analyze_v2.schemas.memory_access_analysis import (
     MemoryAccessAnalysisSchema,
@@ -33,6 +33,7 @@ class NodeAnalysisSchema(BaseModel):
     params: ParameterAnalysisSchema
     mem_access: MemoryAccessAnalysisSchema
 
-    @validator("input", "output", pre=True)
+    @field_validator("input", "output", mode="before")
+    @classmethod
     def validate_types(cls, value):
         return [val for val in value]
diff --git a/src/sparsezoo/analyze_v2/schemas/quantization_analysis.py b/src/sparsezoo/analyze_v2/schemas/quantization_analysis.py
index b0a43120e..75dba714a 100644
--- a/src/sparsezoo/analyze_v2/schemas/quantization_analysis.py
+++ b/src/sparsezoo/analyze_v2/schemas/quantization_analysis.py
@@ -14,7 +14,7 @@
 
 from typing import Optional
 
-from pydantic import BaseModel, Field, validator
+from pydantic import BaseModel, Field, field_validator, validator
 
 from sparsezoo.analyze_v2.schemas.utils import type_validator
 
@@ -40,7 +40,8 @@ class QuantizationSummaryAnalysisSchema(BaseModel):
         None, description="Percentage of counts_sparse over counts"
     )
 
-    @validator("*", pre=True)
+    @field_validator("*", mode="before")
+    @classmethod
     def validate_types(cls, value):
         return type_validator(value)
 
diff --git a/src/sparsezoo/analyze_v2/schemas/sparsity_analysis.py b/src/sparsezoo/analyze_v2/schemas/sparsity_analysis.py
index af6d8a20c..12a982459 100644
--- a/src/sparsezoo/analyze_v2/schemas/sparsity_analysis.py
+++ b/src/sparsezoo/analyze_v2/schemas/sparsity_analysis.py
@@ -14,7 +14,7 @@
 
 from typing import Optional
 
-from pydantic import BaseModel, Field, validator
+from pydantic import BaseModel, Field, field_validator, validator
 
 from sparsezoo.analyze_v2.schemas.utils import type_validator
 
@@ -28,7 +28,8 @@ class SparsitySummaryAnalysisSchema(BaseModel):
         None, description="Percentage of counts_sparse over counts"
     )
 
-    @validator("*", pre=True)
+    @field_validator("*", mode="before")
+    @classmethod
     def validate_types(cls, value):
         return type_validator(value)
 
diff --git a/src/sparsezoo/evaluation/results.py b/src/sparsezoo/evaluation/results.py
index 30e3bf509..e9f892b4c 100644
--- a/src/sparsezoo/evaluation/results.py
+++ b/src/sparsezoo/evaluation/results.py
@@ -71,15 +71,15 @@ class Metric(BaseModel):
 
 
 class Dataset(BaseModel):
-    type: Optional[str] = Field(description="Type of dataset")
+    type: Optional[str] = Field(None, description="Type of dataset")
     name: str = Field(description="Name of the dataset")
-    config: Any = Field(description="Configuration for the dataset")
-    split: Optional[str] = Field(description="Split of the dataset")
+    config: Any = Field(None, description="Configuration for the dataset")
+    split: Optional[str] = Field(None, description="Split of the dataset")
 
 
 class EvalSample(BaseModel):
-    input: Any = Field(description="Sample input to the model")
-    output: Any = Field(description="Sample output from the model")
+    input: Any = Field(None, description="Sample input to the model")
+    output: Any = Field(None, description="Sample output from the model")
 
 
 class Evaluation(BaseModel):
@@ -90,7 +90,7 @@ class Evaluation(BaseModel):
     dataset: Dataset = Field(description="Dataset that the evaluation was performed on")
     metrics: List[Metric] = Field(description="List of metrics for the evaluation")
     samples: Optional[List[EvalSample]] = Field(
-        description="List of samples for the evaluation"
+        None, description="List of samples for the evaluation"
     )
 
 
@@ -99,8 +99,9 @@ class Result(BaseModel):
         description="Evaluation result represented in the unified, structured format"
     )
     raw: Any = Field(
+        None,
         description="Evaluation result represented in the raw format "
-        "(characteristic for the specific evaluation integration)"
+        "(characteristic for the specific evaluation integration)",
     )
 
 
diff --git a/src/sparsezoo/utils/standardization/feature_status_page.py b/src/sparsezoo/utils/standardization/feature_status_page.py
index d4158dd0b..2af035b62 100644
--- a/src/sparsezoo/utils/standardization/feature_status_page.py
+++ b/src/sparsezoo/utils/standardization/feature_status_page.py
@@ -22,7 +22,7 @@
 from typing import List
 
 import yaml
-from pydantic import BaseModel, Field
+from pydantic.v1 import BaseModel, Field
 
 from sparsezoo.utils.standardization.feature_status import FeatureStatus
 from sparsezoo.utils.standardization.feature_status_table import FeatureStatusTable
diff --git a/src/sparsezoo/utils/standardization/feature_status_table.py b/src/sparsezoo/utils/standardization/feature_status_table.py
index 621d58d8f..f19fb0703 100644
--- a/src/sparsezoo/utils/standardization/feature_status_table.py
+++ b/src/sparsezoo/utils/standardization/feature_status_table.py
@@ -19,7 +19,7 @@
 from abc import ABC, abstractmethod
 from typing import List, Tuple
 
-from pydantic import BaseModel, Field
+from pydantic.v1 import BaseModel, Field
 
 from sparsezoo.utils.standardization.feature_status import FeatureStatus
 from sparsezoo.utils.standardization.markdown_utils import create_markdown_table
diff --git a/tests/sparsezoo/utils/standardization/test_feature_status_page.py b/tests/sparsezoo/utils/standardization/test_feature_status_page.py
index 62a3071ad..94750c382 100644
--- a/tests/sparsezoo/utils/standardization/test_feature_status_page.py
+++ b/tests/sparsezoo/utils/standardization/test_feature_status_page.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from pydantic import Field
+from pydantic.v1 import Field
 
 from sparsezoo.utils.standardization import (
     FeatureStatus,