Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Dgl ops 2 #16416

Merged
merged 12 commits into from
Oct 23, 2019
162 changes: 130 additions & 32 deletions tests/nightly/test_large_array.py
Original file line number Diff line number Diff line change
@@ -798,106 +798,96 @@ def test_batchnorm():
def test_add():
a = nd.ones(shape=(LARGE_X, SMALL_Y))
b = nd.ones(shape=(LARGE_X, SMALL_Y))
c = b
c = c.__add__(a)
c = b.__add__(a)
assert c[0][-1] == 2
assert c.shape == a.shape


def test_sub():
a = 3*nd.ones(shape=(LARGE_X, SMALL_Y))
b = nd.ones(shape=(LARGE_X, SMALL_Y))
c = b
c = c.__sub__(a)
c = b.__sub__(a)
assert c[0][-1] == -2
assert c.shape == a.shape


def test_rsub():
a = 3*nd.ones(shape=(LARGE_X, SMALL_Y))
b = nd.ones(shape=(LARGE_X, SMALL_Y))
c = b
c = c.__rsub__(a)
c = b.__rsub__(a)
assert c[0][-1] == 2
assert c.shape == a.shape


def test_neg():
a = nd.ones(shape=(LARGE_X, SMALL_Y))
c = a
c = c.__neg__()
c = a.__neg__()
assert c[0][-1] == -1
assert c.shape == a.shape


def test_mul():
a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))
b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))
c = b
c = c.__mul__(a)
c = b.__mul__(a)
assert c[0][-1] == 6
assert c.shape == a.shape


def test_div():
a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))
b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))
c = b
c = c.__div__(a)
c = b.__div__(a)
mx_divide = nd.divide(b, a)
assert c[0][-1] == 3/2
assert mx_divide[0][-1] == c[0][-1]
assert c.shape == a.shape


def test_rdiv():
a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))
b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))
c = b
c = c.__rdiv__(a)
c = b.__rdiv__(a)
assert c[0][-1] == 2/3
assert c.shape == a.shape


def test_mod():
a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))
b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))
c = b
c = c.__mod__(a)
c = b.__mod__(a)
assert c[0][-1] == 1
assert c.shape == a.shape


def test_rmod():
a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))
b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))
c = b
c = c.__rmod__(a)
c = b.__rmod__(a)
assert c[0][-1] == 2
assert c.shape == a.shape


def test_imod():
a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))
b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))
c = b
c = c.__imod__(a)
c = b.__imod__(a)
assert c[0][-1] == 1
assert c.shape == a.shape


def test_pow():
a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))
b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))
c = b
c = c.__pow__(a)
c = b.__pow__(a)
assert c[0][-1] == 9
assert c.shape == a.shape


def test_rpow():
a = 2*nd.ones(shape=(LARGE_X, SMALL_Y))
b = 3*nd.ones(shape=(LARGE_X, SMALL_Y))
c = b
c = c.__rpow__(a)
c = b.__rpow__(a)
assert c[0][-1] == 8
assert c.shape == a.shape

@@ -1079,35 +1069,31 @@ def test_log_softmax():
def test_iadd():
a = nd.array(np.ones((SMALL_Y, LARGE_X)))
b = nd.array(np.ones((SMALL_Y, LARGE_X)))
c = b
c += a
c = b+a
assert c.shape == a.shape
assert c[0][-1] == 2


def test_isub():
a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3)))
b = nd.array(np.ones((SMALL_Y, LARGE_X)))
c = a
c -= b
c = a-b
assert c.shape == a.shape
assert c[0][-1] == 2


def test_imul():
a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 3)))
b = nd.array(np.ones((SMALL_Y, LARGE_X)))
c = b
c *= a
c = b*a
assert c.shape == a.shape
assert c[0][-1] == 3


def test_idiv():
a = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 4)))
b = nd.array(np.array(np.full((SMALL_Y, LARGE_X), 2)))
c = a
c /= b
c = a/b
assert c.shape == a.shape
assert c[0][-1] == 2

@@ -1199,6 +1185,118 @@ def test_full():
assert a[-1][-1] == 3


def test_hyperbolic():
def test_arccosh(a):
mx_res = mx.nd.arccosh(a)
assert_almost_equal(mx_res[-1][-1].asnumpy(), np.arccosh(a[-1][-1].asnumpy()))

def test_arcsinh(a):
mx_res = mx.nd.arcsinh(a)
assert_almost_equal(mx_res[-1][-1].asnumpy(), np.arcsinh(a[-1][-1].asnumpy()))

def test_arctanh(a):
a[-1][-1] = 0 # arctanh of 1 is inf, assert_almost_equal gives "divide by 0" for comparing 2 inf values
mx_res = mx.nd.arctanh(a)
assert_almost_equal(mx_res[-1][-1].asnumpy(), np.arctanh(a[-1][-1].asnumpy()))

def test_cosh(a):
mx_res = mx.nd.cosh(a)
assert_almost_equal(mx_res[-1][-1].asnumpy(), np.cosh(a[-1][-1].asnumpy()))

def test_sinh(a):
mx_res = mx.nd.sinh(a)
assert_almost_equal(mx_res[-1][-1].asnumpy(), np.sinh(a[-1][-1].asnumpy()))

def test_tanh(a):
mx_res = mx.nd.tanh(a)
assert_almost_equal(mx_res[-1][-1].asnumpy(), np.tanh(a[-1][-1].asnumpy()))

a = mx.nd.ones((LARGE_X, SMALL_Y))
test_arccosh(a)
test_arcsinh(a)
test_arctanh(a)
test_cosh(a)
test_sinh(a)
test_tanh(a)


def test_sign():
a = mx.nd.random.normal(-1,1, shape=(LARGE_X, SMALL_Y))
mx_res = mx.nd.sign(a)
assert_almost_equal(mx_res[-1][-1].asnumpy(), np.sign(a[-1][-1].asnumpy()))


def test_logical():
def test_logical_and(a, b):
mx_res = mx.nd.logical_and(a, b)
assert_almost_equal(mx_res[-1][-1].asnumpy(), np.logical_and(a[-1][-1].asnumpy(), b[-1][-1].asnumpy()))

def test_logical_or(a, b):
mx_res = mx.nd.logical_and(a, b)
assert_almost_equal(mx_res[-1][-1].asnumpy(), np.logical_and(a[-1][-1].asnumpy(), b[-1][-1].asnumpy()))

def test_logical_not(a, b):
mx_res = mx.nd.logical_and(a, b)
assert_almost_equal(mx_res[-1][-1].asnumpy(), np.logical_and(a[-1][-1].asnumpy(), b[-1][-1].asnumpy()))

def test_logical_xor(a, b):
mx_res = mx.nd.logical_and(a, b)
assert_almost_equal(mx_res[-1][-1].asnumpy(), np.logical_and(a[-1][-1].asnumpy(), b[-1][-1].asnumpy()))

a = mx.nd.ones((LARGE_X, SMALL_Y))
b = mx.nd.zeros((LARGE_X, SMALL_Y))
test_logical_and(a, b)
test_logical_or(a, b)
test_logical_not(a, b)
test_logical_xor(a, b)


def test_batch_dot():
a = mx.nd.ones((LARGE_X, 5, 10))
b = 2*mx.nd.ones((LARGE_X, 10, 6))
res = mx.nd.batch_dot(a, b)
assert res[0][0][0] == 20
assert res.shape == (LARGE_X, 5, 6)


def test_regression():
shape = (LARGE_X, SMALL_Y)

def check_regression(symbol, forward, backward, shape):
# init executor
data_s = mx.symbol.Variable('data')
label_s = mx.symbol.Variable('label')
out_s = symbol(data=data_s, label=label_s)
grad_req = {'data': 'write', 'label': 'null'}
exe = out_s.simple_bind(ctx=default_context(), data=shape, label=shape, grad_req=grad_req)

arg_map = dict(zip(out_s.list_arguments(), exe.arg_arrays))
grad_map = dict(zip(out_s.list_arguments(), exe.grad_arrays))

# init data
data = mx.random.uniform(-1, -1, shape)
arg_map["data"][:] = data
atol = 1e-5
density = 0.5
stype = 'default'
label = arg_map["label"]
label[:] = rand_ndarray(shape, stype, density=density)
exe.forward(is_train=True)
exe.backward()
np_out = forward(data.asnumpy())
out_grad = backward(np_out, label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(exe.outputs[0].asnumpy(), np_out, atol=atol)
assert_almost_equal(grad_map["data"].asnumpy(), out_grad, atol=atol)

check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y: x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y: x - y,
shape)

if __name__ == '__main__':
import nose
nose.runmodule()