Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[bug] [refactor] Fix error when ti.init() not called by deprecating Expr.layout_materialized #1347

Merged
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
*.swp
*.swo
/.vs
/tags
/.*_localrc
/tags
/Debug
*.sdf
/x64
Expand Down
6 changes: 3 additions & 3 deletions python/taichi/lang/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def __init__(self, *args, tb=None):

@python_scope
def __setitem__(self, key, value):
impl.get_runtime().try_materialize()
impl.get_runtime().materialize()
self.initialize_accessor()
if key is None:
key = ()
Expand All @@ -49,7 +49,7 @@ def __setitem__(self, key, value):

@python_scope
def __getitem__(self, key):
impl.get_runtime().try_materialize()
impl.get_runtime().materialize()
self.initialize_accessor()
if key is None:
key = ()
Expand Down Expand Up @@ -114,7 +114,7 @@ def fill(self, val):
from .meta import fill_tensor
fill_tensor(self, val)

@deprecated('tensor.parent()', 'tensor.snode().parent()')
#@deprecated('tensor.parent()', 'tensor.snode().parent()')
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So we no longer deprecate this?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, since tensor.parent() returns a tensor while tensor.snode().parent() return a snode, it seems this is irreplaceable and many test is using this..

def parent(self, n=1):
import taichi as ti
p = self.snode().parent(n)
Expand Down
18 changes: 8 additions & 10 deletions python/taichi/lang/impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,6 @@ def __init__(self, kernels=None):
self.target_tape = None
self.inside_complex_kernel = False
self.kernels = kernels or []
Expr.materialize_layout_callback = self.materialize

def get_num_compiled_functions(self):
return len(self.compiled_functions) + len(self.compiled_grad_functions)
Expand All @@ -162,15 +161,10 @@ def create_program(self):
if self.prog is None:
self.prog = taichi_lang_core.Program()

def try_materialize(self):
if not Expr.layout_materialized:
Expr.materialize_layout_callback()

def materialize(self):
if self.materialized:
return
self.create_program()
Expr.layout_materialized = True

def layout():
for func in self.layout_functions:
Expand All @@ -188,8 +182,7 @@ def clear(self):
if self.prog:
self.prog.finalize()
self.prog = None
Expr.materialize_layout_callback = None
Expr.layout_materialized = False
self.materialized = False

def get_tape(self, loss=None):
from .tape import Tape
Expand Down Expand Up @@ -279,8 +272,13 @@ def var(dt, shape=None, offset=None, needs_grad=False):
assert (offset is not None and shape is None
) == False, f'The shape cannot be None when offset is being set'

assert not get_runtime(
).materialized, 'No new variables can be declared after kernel invocations or Python-scope tensor accesses.'
if get_runtime().materialized:
raise RuntimeError(
"No new variables can be declared after materialization, i.e. kernel invocations "
"or Python-scope tensor accesses. I.e., data layouts must be specified before "
"any computation. Try appending ti.init() or ti.reset() "
"right after 'import taichi as ti' if you are using Jupyter notebook."
)

# primal
x = Expr(taichi_lang_core.make_id_expr(""))
Expand Down
4 changes: 2 additions & 2 deletions python/taichi/lang/snode.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def lazy_grad(self):
self.ptr.lazy_grad()

def parent(self, n=1):
impl.get_runtime().try_materialize()
impl.get_runtime().materialize()
p = self.ptr
while p and n > 0:
p = p.parent
Expand All @@ -73,7 +73,7 @@ def dim(self):

@property
def shape(self):
impl.get_runtime().try_materialize()
impl.get_runtime().materialize()
dim = self.ptr.num_active_indices()
ret = [self.ptr.get_num_elements_along_axis(i) for i in range(dim)]

Expand Down
8 changes: 8 additions & 0 deletions python/taichi/misc/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,11 @@ def __ne__(self, other):

def allclose(x, y, **kwargs):
return x == approx(y, **kwargs)


def make_temp_file(*args, **kwargs):
import os
from tempfile import mkstemp
fd, name = mkstemp(*args, **kwargs)
os.close(fd)
return name
14 changes: 4 additions & 10 deletions tests/python/test_image_io.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,8 @@
import taichi as ti
import numpy as np
from taichi import make_temp_file
import pytest
import os
from tempfile import mkstemp


def make_temp(*args, **kwargs):
fd, name = mkstemp(*args, **kwargs)
os.close(fd)
return name


# jpg is also supported but hard to test here since it's lossy:
Expand All @@ -28,7 +22,7 @@ def test_image_io(resx, resy, comp, ext, is_tensor, dt):
pixel = np.random.randint(256, size=shape, dtype=ti.to_numpy_type(dt))
if is_tensor:
pixel_t.from_numpy(pixel)
fn = make_temp(suffix='.' + ext)
fn = make_temp_file(suffix='.' + ext)
if is_tensor:
ti.imwrite(pixel_t, fn)
else:
Expand All @@ -50,7 +44,7 @@ def test_image_io_vector(resx, resy, comp, ext, dt):
pixel = np.random.rand(*shape, comp).astype(ti.to_numpy_type(dt))
pixel_t = ti.Vector(comp, dt, shape)
pixel_t.from_numpy(pixel)
fn = make_temp(suffix='.' + ext)
fn = make_temp_file(suffix='.' + ext)
ti.imwrite(pixel_t, fn)
pixel_r = (ti.imread(fn).astype(ti.to_numpy_type(dt)) + 0.5) / 256.0
assert np.allclose(pixel_r, pixel, atol=2e-2)
Expand All @@ -70,7 +64,7 @@ def test_image_io_uint(resx, resy, comp, ext, dt):
pixel = np.random.randint(256, size=(*shape, comp), dtype=np_type) * np_max
pixel_t = ti.Vector(comp, dt, shape)
pixel_t.from_numpy(pixel)
fn = make_temp(suffix='.' + ext)
fn = make_temp_file(suffix='.' + ext)
ti.imwrite(pixel_t, fn)
pixel_r = ti.imread(fn).astype(np_type) * np_max
assert (pixel_r == pixel).all()
Expand Down
61 changes: 61 additions & 0 deletions tests/python/test_runtime.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import taichi as ti
from taichi import make_temp_file
import sys, os


def test_without_init():
# We want to check if Taichi works well without ``ti.init()``.
# But in test ``ti.init()`` will always be called in last ``@ti.all_archs``.
# So we have to create a new Taichi instance, i.e. test in a sandbox.
content = '''
import taichi as ti
assert ti.cfg.arch == ti.cpu

x = ti.var(ti.i32, (2, 3))
assert x.shape == (2, 3)

x[1, 2] = 4
assert x[1, 2] == 4
'''
filename = make_temp_file()
with open(filename, 'w') as f:
f.write(content)
assert os.system(f'{sys.executable} {filename}') == 0


@ti.all_archs
@ti.all_archs
archibate marked this conversation as resolved.
Show resolved Hide resolved
@ti.must_throw(RuntimeError)
def test_materialization_after_kernel():
x = ti.var(ti.f32, (3, 4))

@ti.kernel
def func():
print(x[2, 3])

func()

y = ti.var(ti.f32, (2, 3))
# ERROR: No new variable should be declared after kernel invocation!


@ti.all_archs
@ti.must_throw(RuntimeError)
def test_materialization_after_access():
x = ti.var(ti.f32, (3, 4))

print(x[2, 3])

y = ti.var(ti.f32, (2, 3))
# ERROR: No new variable should be declared after Python-scope tensor access!


@ti.all_archs
@ti.must_throw(RuntimeError)
def test_materialization_after_get_shape():
x = ti.var(ti.f32, (3, 4))

print(x.shape)

y = ti.var(ti.f32, (2, 3))
# ERROR: No new variable should be declared after Python-scope tensor access!