Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

compiler: Fix issue #2235 #2237

Open
wants to merge 13 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions devito/deprecations.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@ class DevitoDeprecation():

@cached_property
def coeff_warn(self):
warn("The Coefficient API is deprecated and will be removed, coefficients should"
warn("The Coefficient API is deprecated and will be removed, coefficients should "
"be passed directly to the derivative object `u.dx(weights=...)",
DeprecationWarning, stacklevel=2)
return

@cached_property
def symbolic_warn(self):
warn("coefficients='symbolic' is deprecated, coefficients should"
warn("coefficients='symbolic' is deprecated, coefficients should "
"be passed directly to the derivative object `u.dx(weights=...)",
DeprecationWarning, stacklevel=2)
return
Expand Down
13 changes: 13 additions & 0 deletions devito/ir/clusters/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,19 @@ def dspace(self):
# Dimension-centric view of the data space
intervals = IntervalGroup.generate('union', *parts.values())

# 'union' may consume intervals (values) from keys that have dimensions
# not mapped to intervals e.g. issue #2235, resulting in reduced
# iteration size. Here, we relax this mapped upper interval, by
# intersecting intervals with matching only dimensions
for f, v in parts.items():
for i in v:
if i.dim in self.ispace and i.dim in f.dimensions:
# oobs check is not required but helps reduce
# interval reconstruction
ii = intervals[i.dim].intersection(v[i.dim])
if not ii.is_Null:
intervals = intervals.set_upper(i.dim, ii.upper)

# E.g., `db0 -> time`, but `xi NOT-> x`
intervals = intervals.promote(lambda d: not d.is_Sub)
intervals = intervals.zero(set(intervals.dimensions) - oobs)
Expand Down
8 changes: 8 additions & 0 deletions devito/ir/support/space.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,9 @@ def negate(self):
def zero(self):
return Interval(self.dim, 0, 0, self.stamp)

def set_upper(self, v=0):
return Interval(self.dim, self.lower, v, self.stamp)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this implementation doesn't really reflect the method name....

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

better?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it looks like a jump in the class API, given that all other methos are more generic, see zero, flip, lift, etc

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe use sth like "ceil" ?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

To add, should all of these properties be cached? They all build Interval objects, rather than simply returning a private variable

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I do not know, @FabioLuporini , @mloubout ?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this one in particular isn't a property, it's a method

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So this is resolved I guess


def flip(self):
return Interval(self.dim, self.upper, self.lower, self.stamp)

Expand Down Expand Up @@ -496,6 +499,11 @@ def zero(self, d=None):

return IntervalGroup(intervals, relations=self.relations, mode=self.mode)

def set_upper(self, d, v=0):
dims = as_tuple(d)
return IntervalGroup([i.set_upper(v) if i.dim in dims else i for i in self],
relations=self.relations, mode=self.mode)

def lift(self, d=None, v=None):
d = set(self.dimensions if d is None else as_tuple(d))
intervals = [i.lift(v) if i.dim._defines & d else i for i in self]
Expand Down
79 changes: 39 additions & 40 deletions devito/types/dense.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ def __init_finalize__(self, *args, function=None, **kwargs):
# case `self._data is None`
self.data
else:
raise ValueError("`initializer` must be callable or buffer, not %s"
% type(initializer))
raise ValueError(f"`initializer` must be callable or buffer, "
f"not {type(initializer)}")

_subs = Differentiable._subs

Expand All @@ -125,8 +125,8 @@ def wrapper(self):
# Aliasing Functions must not allocate data
return

debug("Allocating host memory for %s%s [%s]"
% (self.name, self.shape_allocated, humanbytes(self.nbytes)))
debug(f"Allocating host memory for {self.name}{self.shape_allocated} "
f"[{humanbytes(self.nbytes)}]")

# Clear up both SymPy and Devito caches to drop unreachable data
CacheManager.clear(force=False)
Expand Down Expand Up @@ -176,8 +176,8 @@ def __coefficients_setup__(self, **kwargs):
if coeffs == 'symbolic':
deprecations.symbolic_warn
else:
raise ValueError("coefficients must be one of %s"
" not %s" % (str(fd_weights_registry), coeffs))
raise ValueError(f"coefficients must be one of {fd_weights_registry} "
f"not {coeffs}")
return coeffs

def __staggered_setup__(self, **kwargs):
Expand Down Expand Up @@ -352,12 +352,12 @@ def _size_outhalo(self):

if self._distributor.is_parallel and (any(left) > 0 or any(right)) > 0:
try:
warning_msg = """A space order of {0} and a halo size of {1} has been
set but the current rank ({2}) has a domain size of
only {3}""".format(self._space_order,
max(self._size_inhalo),
self._distributor.myrank,
min(self.grid.shape_local))
warning_msg = (
f"A space order of {self._space_order} and a halo size of "
f"{max(self._size_inhalo)} has been set but the current rank "
f"({self._distributor.myrank}) has a domain size of only "
f"{min(self.grid.shape_local)}"
)
if not self._distributor.is_boundary_rank:
warning(warning_msg)
else:
Expand Down Expand Up @@ -768,7 +768,7 @@ def _C_get_field(self, region, dim, side=None):
offset = 0
size = ffp(self._C_field_size, self._C_make_index(dim))
else:
raise ValueError("Unknown region `%s`" % str(region))
raise ValueError(f"Unknown region `{region}`")

return RegionMeta(offset, size)

Expand All @@ -781,8 +781,8 @@ def _halo_exchange(self):
# Nothing to do
return
if MPI.COMM_WORLD.size > 1 and self._distributor is None:
raise RuntimeError("`%s` cannot perform a halo exchange as it has "
"no Grid attached" % self.name)
raise RuntimeError(f"`{self.name}` cannot perform a halo exchange "
f"as it has no Grid attached")

neighborhood = self._distributor.neighborhood
comm = self._distributor.comm
Expand Down Expand Up @@ -874,17 +874,16 @@ def _arg_check(self, args, intervals, **kwargs):
If an incompatibility is detected.
"""
if self.name not in args:
raise InvalidArgument("No runtime value for `%s`" % self.name)
raise InvalidArgument(f"No runtime value for `{self.name}`")

data = args[self.name]

if len(data.shape) != self.ndim:
raise InvalidArgument("Shape %s of runtime value `%s` does not match "
"dimensions %s" %
(data.shape, self.name, self.dimensions))
raise InvalidArgument(f"Shape {data.shape} of runtime value `{self.name}` "
f"does not match dimensions {self.dimensions}")
if data.dtype != self.dtype:
warning("Data type %s of runtime value `%s` does not match the "
"Function data type %s" % (data.dtype, self.name, self.dtype))
warning(f"Data type {data.dtype} of runtime value `{self.name}` "
f"does not match the Function data type {self.dtype}")

# Check each Dimension for potential OOB accesses
for i, s in zip(self.dimensions, data.shape):
Expand All @@ -894,11 +893,11 @@ def _arg_check(self, args, intervals, **kwargs):
args.options['linearize'] and \
self.is_regular and \
data.size - 1 >= np.iinfo(np.int32).max:
raise InvalidArgument("`%s`, with its %d elements, is too big for "
"int32 pointer arithmetic. Consider using the "
"'index-mode=int64' option, the save=Buffer(..) "
"API (TimeFunction only), or domain "
"decomposition via MPI" % (self.name, data.size))
raise InvalidArgument(f"`{self.name}`, with its {data.size} elements, "
"is too big for int32 pointer arithmetic."
"Consider using the 'index-mode=int64' option, "
"the save=Buffer(..) (TimeFunction only), or domain "
"decomposition via MPI")

def _arg_finalize(self, args, alias=None):
key = alias or self
Expand Down Expand Up @@ -1134,9 +1133,8 @@ def __shape_setup__(cls, **kwargs):
if d in grid.dimensions:
size = grid.dimension_map[d]
if size.glb != s and s is not None:
raise ValueError("Dimension `%s` is given size `%d`, "
"while `grid` says `%s` has size `%d` "
% (d, s, d, size.glb))
raise ValueError(f"Dimension `{d}` is given size `{s}`, "
f"while `grid` says `{d}` has size `{size.glb}`")
else:
loc_shape.append(size.loc)
else:
Expand Down Expand Up @@ -1192,7 +1190,7 @@ def __padding_setup__(self, **kwargs):
padding = tuple((0, i) if isinstance(i, int) else i for i in padding)

else:
raise TypeError("`padding` must be int or %d-tuple of ints" % self.ndim)
raise TypeError(f"`padding` must be int or {self.ndim}-tuple of ints")
return DimensionTuple(*padding, getters=self.dimensions)

@property
Expand Down Expand Up @@ -1438,7 +1436,7 @@ def __shape_setup__(cls, **kwargs):
elif isinstance(save, int):
shape.insert(cls._time_position, save)
else:
raise TypeError("`save` can be None, int or Buffer, not %s" % type(save))
raise TypeError(f"`save` can be None, int or Buffer, not {type(save)}")
elif dimensions is None:
raise TypeError("`dimensions` required if both `grid` and "
"`shape` are provided")
Expand Down Expand Up @@ -1502,9 +1500,10 @@ def _arg_check(self, args, intervals, **kwargs):

key_time_size = args[self.name].shape[self._time_position]
if self._time_buffering and self._time_size != key_time_size:
raise InvalidArgument("Expected `time_size=%d` for runtime "
"value `%s`, found `%d` instead"
% (self._time_size, self.name, key_time_size))
raise InvalidArgument(
f"Expected `time_size={self._time_size}` for runtime value "
f"`{self.name}`, found `{key_time_size}` instead"
)


class SubFunction(Function):
Expand Down Expand Up @@ -1533,8 +1532,8 @@ def _arg_values(self, **kwargs):
if self._parent is not None and self.parent.name not in kwargs:
return self._parent._arg_defaults(alias=self._parent).reduce_all()
elif self.name in kwargs:
raise RuntimeError("`%s` is a SubFunction, so it can't be assigned "
"a value dynamically" % self.name)
raise RuntimeError(f"`{self.name}` is a SubFunction, so it can't "
"be assigned a value dynamically")
else:
return self._arg_defaults(alias=self)

Expand Down Expand Up @@ -1678,8 +1677,8 @@ def make(self, shape=None, initializer=None, allocator=None, **kwargs):
for n, i in enumerate(self.shape):
v = i.subs(args)
if not v.is_Integer:
raise ValueError("Couldn't resolve `shape[%d]=%s` with the given "
"kwargs (obtained: `%s`)" % (n, i, v))
raise ValueError(f"Couldn't resolve `shape[{n}]={i}` with the given "
f"kwargs (obtained: `{v}`)")
shape.append(int(v))
shape = tuple(shape)
elif len(shape) != self.ndim:
Expand All @@ -1706,6 +1705,6 @@ def _arg_values(self, **kwargs):
# Set new values and re-derive defaults
return new._arg_defaults().reduce_all()
else:
raise InvalidArgument("Illegal runtime value for `%s`" % self.name)
raise InvalidArgument(f"Illegal runtime value for `{self.name}`")
else:
raise InvalidArgument("TempFunction `%s` lacks override" % self.name)
raise InvalidArgument(f"TempFunction `{self.name}` lacks override")
18 changes: 11 additions & 7 deletions examples/userapi/02_apply.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,11 @@
" 'y_m': 0,\n",
" 'y_size': 4,\n",
" 'y_M': 3,\n",
" 'timers': <cparam 'P' (0x7fb0d0550918)>}"
" 'h_x': 0.33333334,\n",
" 'h_y': 0.33333334,\n",
" 'o_x': 0.0,\n",
" 'o_y': 0.0,\n",
" 'timers': <cparam 'P' (0x7f3ce4f15110)>}"
]
},
"execution_count": 5,
Expand Down Expand Up @@ -419,8 +423,8 @@
{
"data": {
"text/plain": [
"PerformanceSummary([('section0',\n",
" PerfEntry(time=3e-06, gflopss=0.0, gpointss=0.0, oi=0.0, ops=0, itershapes=[]))])"
"PerformanceSummary([(PerfKey(name='section0', rank=None),\n",
" PerfEntry(time=1e-06, gflopss=0.0, gpointss=0.0, oi=0.0, ops=0, itershapes=[]))])"
]
},
"execution_count": 14,
Expand Down Expand Up @@ -449,14 +453,14 @@
"name": "stderr",
"output_type": "stream",
"text": [
"Operator `Kernel` run in 0.00 s\n"
"Operator `Kernel` ran in 0.01 s\n"
]
},
{
"data": {
"text/plain": [
"PerformanceSummary([('section0',\n",
" PerfEntry(time=3e-06, gflopss=0.021333333333333333, gpointss=0.010666666666666666, oi=0.16666666666666666, ops=2, itershapes=[(2, 4, 4)]))])"
"PerformanceSummary([(PerfKey(name='section0', rank=None),\n",
" PerfEntry(time=1e-06, gflopss=0.064, gpointss=0.032, oi=0.16666666666666666, ops=2, itershapes=((2, 4, 4),)))])"
]
},
"execution_count": 15,
Expand Down Expand Up @@ -525,7 +529,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
"version": "3.10.12"
}
},
"nbformat": 4,
Expand Down
2 changes: 1 addition & 1 deletion tests/test_checkpointing.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@


@switchconfig(log_level='WARNING')
def test_segmented_incremment():
def test_segmented_increment():
"""
Test for segmented operator execution of a one-sided first order
function (increment). The corresponding set of stencil offsets in
Expand Down
19 changes: 19 additions & 0 deletions tests/test_dimension.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,25 @@ def test_degenerate_to_zero(self):

assert np.all(u.data == 10)

def test_default_timeM(self):
"""
MFE for issue #2235
"""
grid = Grid(shape=(4, 4))

u = TimeFunction(name='u', grid=grid)
usave = TimeFunction(name='usave', grid=grid, save=5)

eqns = [Eq(u.forward, u + 1),
Eq(usave, u)]

op = Operator(eqns)

assert op.arguments()['time_M'] == 4
op.apply()

assert all(np.all(usave.data[i] == i) for i in range(4))


class TestSubDimension:

Expand Down
21 changes: 12 additions & 9 deletions tests/test_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -1990,16 +1990,19 @@ def test_2194_v2(self, eqns, expected, exp_trees, exp_iters):

class TestInternals:

def test_indirection(self):
nt = 10
@pytest.mark.parametrize('nt, offset, epass',
([1, 1, True], [1, 2, False],
[5, 3, True], [3, 5, False],
[4, 1, True], [5, 10, False]))
def test_indirection(self, nt, offset, epass):
grid = Grid(shape=(4, 4))
time = grid.time_dim
x, y = grid.dimensions

f = TimeFunction(name='f', grid=grid, save=nt)
g = TimeFunction(name='g', grid=grid)

idx = time + 1
idx = time + offset
s = Indirection(name='ofs0', mapped=idx)

eqns = [
Expand All @@ -2010,10 +2013,10 @@ def test_indirection(self):
op = Operator(eqns)

assert op._dspace[time].lower == 0
assert op._dspace[time].upper == 1
assert op.arguments()['time_M'] == nt - 2
assert op._dspace[time].upper == offset

op()

assert np.all(f.data[0] == 0.)
assert np.all(f.data[i] == 3. for i in range(1, 10))
if epass:
assert op.arguments()['time_M'] == nt - offset - 1
op()
assert np.all(f.data[0] == 0.)
assert np.all(f.data[i] == 3. for i in range(1, nt))
Loading