diff --git a/test/test_lazybuffer.py b/test/test_lazybuffer.py index 7bdf351fb431b..5e36f5650f9db 100644 --- a/test/test_lazybuffer.py +++ b/test/test_lazybuffer.py @@ -2,7 +2,7 @@ import numpy as np import unittest from tinygrad import Tensor, Device, dtypes -from tinygrad.lazy import LazyBuffer, ReduceOps +from tinygrad.lazy import LazyBuffer, ReduceOps, LoadOps from tinygrad.engine.schedule import create_schedule class TestLazyBuffer(unittest.TestCase): @@ -92,5 +92,26 @@ def test_split_reduce_kernel_dim1(self): for s in sched: assert s.ast[0].src[0].op is ReduceOps.SUM +class TestView(unittest.TestCase): + def test_all_masked_out(self): + # start with non CONST LoadOps + a = Tensor.rand(10, 10) + assert a.lazydata.base.op is not LoadOps.CONST + + # all masked out, degrades to const 0 + b = a.pad(((0, 10), None))[10:] + assert b.shape == (10, 10) + assert b.lazydata.base.op is LoadOps.CONST and b.lazydata.base.arg == 0 + + # mask out dim = 1 works too + b = a.pad((None, (0, 10)))[:, 10:] + assert b.shape == (10, 10) + assert b.lazydata.base.op is LoadOps.CONST and b.lazydata.base.arg == 0 + + # partial masked out does not degrade into CONST + b = a.pad(((0, 5), None))[5:] + assert b.shape == (10, 10) + assert b.lazydata.base.op is not LoadOps.CONST + if __name__ == "__main__": unittest.main() diff --git a/tinygrad/lazy.py b/tinygrad/lazy.py index 80c72ad971a83..ce45adf453127 100644 --- a/tinygrad/lazy.py +++ b/tinygrad/lazy.py @@ -200,7 +200,7 @@ def r(self, op:ReduceOps, axis:Tuple[int, ...]) -> LazyBuffer: # *** movement ops *** def _view(self, new_st:ShapeTracker) -> LazyBuffer: - if self.st.size == 0 or (new_st.views[-1].mask is not None and all((x[1]-x[0]) == 0 for x in new_st.views[-1].mask)): + if self.st.size == 0 or (new_st.views[-1].mask is not None and any((x[1]-x[0]) == 0 for x in new_st.views[-1].mask)): return self.const(0, new_st.shape) if new_st.contiguous and self.base.shape == new_st.shape: return self.base return create_lazybuffer(self.device, new_st, self.dtype, base=self.base)