Skip to content

Commit

Permalink
minor: small linting
Browse files Browse the repository at this point in the history
  • Loading branch information
mrava87 committed Feb 13, 2024
1 parent 0e880aa commit 42a303a
Show file tree
Hide file tree
Showing 5 changed files with 29 additions and 31 deletions.
2 changes: 1 addition & 1 deletion examples/plot_distributed_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@
pylops_mpi.plot_local_arrays(sum_arr, "Addition", vmin=0, vmax=1)

###############################################################################
# **Element-wise In-place Addition** - Similar to the previous one but the
# **Element-wise In-place Addition** - Similar to the previous one but the
# addition is performed directly on one of the addends without creating a new
# distributed array.
sum_arr += arr2
Expand Down
14 changes: 7 additions & 7 deletions examples/plot_stacked_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@

###############################################################################
# Let's start by defining two distributed array
subarr1 = pylops_mpi.DistributedArray(global_shape=size*10,
subarr1 = pylops_mpi.DistributedArray(global_shape=size * 10,
partition=pylops_mpi.Partition.SCATTER,
axis=0)
subarr2 = pylops_mpi.DistributedArray(global_shape=size*4,
subarr2 = pylops_mpi.DistributedArray(global_shape=size * 4,
partition=pylops_mpi.Partition.SCATTER,
axis=0)
# Filling the local arrays
Expand Down Expand Up @@ -53,10 +53,10 @@
###############################################################################
# Let's now create a second :py:class:`pylops_mpi.StackedDistributedArray` object
# and perform different mathematical operations on those two objects.
subarr1_ = pylops_mpi.DistributedArray(global_shape=size*10,
subarr1_ = pylops_mpi.DistributedArray(global_shape=size * 10,
partition=pylops_mpi.Partition.SCATTER,
axis=0)
subarr2_ = pylops_mpi.DistributedArray(global_shape=size*4,
subarr2_ = pylops_mpi.DistributedArray(global_shape=size * 4,
partition=pylops_mpi.Partition.SCATTER,
axis=0)
# Filling the local arrays
Expand Down Expand Up @@ -107,7 +107,7 @@

###############################################################################
# **VStack of operators**
x = pylops_mpi.DistributedArray(global_shape=size*10,
x = pylops_mpi.DistributedArray(global_shape=size * 10,
partition=pylops_mpi.Partition.SCATTER,
axis=0)
# Filling the local arrays
Expand All @@ -130,7 +130,7 @@
pylops.BlockDiag([pylops.MatrixMult(2 * Mop1),] * size)])
print('mop.shape', mop.shape, mop_single.shape)

x_single = np.ones(size*10)
x_single = np.ones(size * 10)
y_single = mop_single @ x_single
xadj_single = mop_single.H @ y_single

Expand Down Expand Up @@ -162,4 +162,4 @@
xinv_array = xinv.asarray()

if rank == 0:
print('xinv_array', xinv_array)
print('xinv_array', xinv_array)
27 changes: 14 additions & 13 deletions pylops_mpi/DistributedArray.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,13 +346,13 @@ def __iadd__(self, x):

def __sub__(self, x):
return self.__add__(-x)

def __isub__(self, x):
return self.__iadd__(-x)

def __mul__(self, x):
return self.multiply(x)

def __rmul__(self, x):
return self.multiply(x)

Expand All @@ -376,13 +376,12 @@ def iadd(self, dist_array):
self[:] = self.local_array + dist_array.local_array
return self


def multiply(self, dist_array):
"""Distributed Element-wise multiplication
"""
if isinstance(dist_array, DistributedArray):
self._check_partition_shape(dist_array)

ProductArray = DistributedArray(global_shape=self.global_shape,
base_comm=self.base_comm,
dtype=self.dtype,
Expand Down Expand Up @@ -616,12 +615,13 @@ def asarray(self):
-------
final_array : :obj:`numpy.ndarray`
Global Array gathered at all ranks
"""
return np.hstack([distarr.asarray().ravel() for distarr in self.distarrays])

def _check_stacked_size(self, stacked_array):
"""Check that arrays have consistent size
"""
if self.narrays != stacked_array.narrays:
raise ValueError("Stacked arrays must be composed the same number of of distributed arrays")
Expand All @@ -632,17 +632,17 @@ def _check_stacked_size(self, stacked_array):
f"{stacked_array[iarr].global_shape}")

def __neg__(self):
arr = self.copy() #StackedDistributedArray([distarray.copy() for distarray in self.distarrays])
arr = self.copy()
for iarr in range(self.narrays):
arr[iarr][:] = -arr[iarr][:]
return arr

def __add__(self, x):
return self.add(x)

def __iadd__(self, x):
return self.iadd(x)

def __sub__(self, x):
return self.__add__(-x)

Expand All @@ -651,15 +651,16 @@ def __isub__(self, x):

def __mul__(self, x):
return self.multiply(x)

def __rmul__(self, x):
return self.multiply(x)

def add(self, stacked_array):
"""Stacked Distributed Addition of arrays
"""
self._check_stacked_size(stacked_array)
SumArray = self.copy() #StackedDistributedArray([distarray.copy() for distarray in self.distarrays])
SumArray = self.copy()
for iarr in range(self.narrays):
SumArray[iarr][:] = (self[iarr] + stacked_array[iarr])[:]
return SumArray
Expand All @@ -671,11 +672,11 @@ def iadd(self, stacked_array):
for iarr in range(self.narrays):
self[iarr][:] = (self[iarr] + stacked_array[iarr])[:]
return self

def multiply(self, stacked_array):
if isinstance(stacked_array, StackedDistributedArray):
self._check_stacked_size(stacked_array)
ProductArray = self.copy() #StackedDistributedArray([distarray.copy() for distarray in self.distarrays])
ProductArray = self.copy()

if isinstance(stacked_array, StackedDistributedArray):
# multiply two DistributedArray
Expand All @@ -686,7 +687,7 @@ def multiply(self, stacked_array):
for iarr in range(self.narrays):
ProductArray[iarr][:] = (self[iarr] * stacked_array)[:]
return ProductArray

def dot(self, stacked_array):
self._check_stacked_size(stacked_array)
dotprod = 0.
Expand Down
6 changes: 3 additions & 3 deletions pylops_mpi/basicoperators/VStack.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ class StackedVStack():
----------
ops : :obj:`list`
One or more :class:`pylops_mpi.MPILinearOperator` to be vertically stacked.
Attributes
----------
shape : :obj:`tuple`
Expand All @@ -152,8 +152,8 @@ class StackedVStack():
Notes
-----
An StackedVStack is composed of N :class:`pylops_mpi.MPILinearOperator` stacked
vertically. These MPI operators will be applied sequentially, however distributed
An StackedVStack is composed of N :class:`pylops_mpi.MPILinearOperator` stacked
vertically. These MPI operators will be applied sequentially, however distributed
computations will be performed within each operator.
"""
Expand Down
11 changes: 4 additions & 7 deletions tests/test_stackedarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,10 @@
$ mpiexec -n 10 pytest test_stackedarray.py --with-mpi
"""
import numpy as np
from mpi4py import MPI
import pytest
from numpy.testing import assert_allclose

from pylops_mpi import DistributedArray, Partition, StackedDistributedArray
from pylops_mpi.DistributedArray import local_split

np.random.seed(42)

Expand Down Expand Up @@ -57,13 +55,13 @@ def test_creation(par):
assert_allclose(stacked_arrays[1].local_array,
np.ones(shape=distributed_array1.local_shape,
dtype=par['dtype']), rtol=1e-14)

# Modify array in place
distributed_array0[:] = 2
assert_allclose(stacked_arrays[0].local_array,
2 * np.ones(shape=distributed_array0.local_shape,
dtype=par['dtype']), rtol=1e-14)


@pytest.mark.mpi(min_size=2)
@pytest.mark.parametrize("par", [(par1), (par1j), (par2),
Expand All @@ -81,7 +79,7 @@ def test_stacked_math(par):

stacked_array1 = StackedDistributedArray([distributed_array0, distributed_array1])
stacked_array2 = StackedDistributedArray([distributed_array1, distributed_array0])

# Addition
sum_array = stacked_array1 + stacked_array2
assert isinstance(sum_array, StackedDistributedArray)
Expand Down Expand Up @@ -115,7 +113,6 @@ def test_stacked_math(par):
assert_allclose(l1norm, np.linalg.norm(stacked_array1.asarray().flatten(), 1),
rtol=1e-14)
assert_allclose(l2norm, np.linalg.norm(stacked_array1.asarray(), 2),
rtol=1e-10) # needed to raise it due to how partial norms are combined (with power applied)
rtol=1e-10) # needed to raise it due to how partial norms are combined (with power applied)
assert_allclose(linfnorm, np.linalg.norm(stacked_array1.asarray().flatten(), np.inf),
rtol=1e-14)

0 comments on commit 42a303a

Please sign in to comment.