Skip to content

Commit

Permalink
Implemented A_optimize for relative-only measurements.
Browse files Browse the repository at this point in the history
  • Loading branch information
forcefield committed Sep 30, 2020
1 parent dec0a38 commit 43504c4
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 17 deletions.
35 changes: 25 additions & 10 deletions A_opt.py
Original file line number Diff line number Diff line change
Expand Up @@ -659,20 +659,25 @@ def G( x, y, alpha=1., beta=0., trans='N'):
# h vector.
h = matrix( 0., (K*(K+1)/2 + K*(K+1)*(K+1), 1))
# F := Fisher matrix
if nsofar is not None or di2 is not None:
if nsofar is None:
F = matrix( 0., (K, K))
F[::K+1] = di2
else:
F = Fisher_matrix( si2, nsofar, di2)
if nsofar is None:
F = matrix( 0., (K, K))
else:
F = None
F = Fisher_matrix( si2, nsofar, di2)
if di2 is not None: F[::K+1] = di2
elif np.all( np.diag( si2) == 0):
# If the diagonal elements of 1/s[i,i]^2 are all zero, the Fisher
# information matrix is singular, and the quantities are determined
# up to a constant. In this case, we constrain the
# mean of the quantities. This corresponds to adding a constant
# omega to the Fisher matrix. The optimal allocation does not
# depend on the value of omega.
omega = 1.
F += omega

row = K*(K+1)/2
for i in xrange(K):
if F is not None:
for j in xrange(K):
h[row + j*(K+1) : row + (j+1)*(K+1)-1] = F[:,j]
for j in xrange(K):
h[row + j*(K+1) : row + (j+1)*(K+1)-1] = F[:,j]
h[row + (i+1)*(K+1)-1] = 1. # e_i^t
h[row + K*(K+1) + i] = 1. # e_i
row += (K+1)*(K+1)
Expand Down Expand Up @@ -1151,12 +1156,22 @@ def test_sumdR2( ntrials=10, tol=1e-9):
print 'Timing for aligned sum dR: %f seconds per call.' % (tfast/ntrials)
return success

def test_relative_only():
K = 5
np.random.seed( 1)
sij = matrix( np.random.rand( K*K), (K,K))
sij = 0.5*(sij + sij.T)
for i in range(K): sij[i,i] = np.inf
nij = A_optimize_fast( sij)
print nij

def unit_test():
test_Gfunc( ntrials=100)
test_kkt_solver( ntrials=100)
test_sumdR2()

if __name__ == '__main__':
test_relative_only()
K = 200
sij = matrix( np.random.rand( K*K), (K, K))
nsofar = matrix( 0.2*np.random.rand( K*K), (K, K))
Expand Down
35 changes: 28 additions & 7 deletions test_diffnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,16 @@ def check_optimality( sij, nij, optimality='A', delta=1E-1, ntimes=10):
'''
K = sij.size[0]
C = covariance( cvxopt.div( nij, sij**2))
fC = dict(
A = np.trace( C),
D = np.log( linalg.det( C)),
E = np.max( linalg.eig( C)[0]).real,
Etree = np.max( linalg.eig( C)[0]).real
)
fC = dict()
if optimality=='A':
fC['A'] = np.trace( C)
if optimality=='D':
fC['D'] = np.log( linalg.det( C))
if optimality=='E':
fC['E'] = np.max( linalg.eig( C)[0]).real
if optimality=='Etree':
fC['Etree'] = np.max( linalg.eig( C)[0]).real

df = np.zeros( ntimes)
for t in xrange( ntimes):
zeta = matrix( 1. + 2*delta*(np.random.rand( K, K) - 0.5))
Expand Down Expand Up @@ -149,6 +153,20 @@ def check_sparse_A_optimal( sij, ntimes=10, delta=1e-1, tol=1e-5):

return success and success2

def check_relative_only_A_optimal( sij):
'''
'''
sij = matrix(sij)
K = sij.size[0]
for i in range(K): sij[i,i] = np.inf
nij = A_optimize( sij)
success = check_optimality( sij, nij)
if (not success):
print 'FAIL: A_optimize for relative-only measurements did not generate optimal.'
else:
print 'SUCCESS: A_optimize for relative-only measurements.'
return success

def check_hessian( dF, d2F, x0):
'''
Check the Hessian for correctness.
Expand Down Expand Up @@ -268,7 +286,6 @@ def unitTest( tol=1.e-4):
[ 0.1, 1., 0.1 ],
[ 0.1, 0.1, 1.2 ]])


from scipy.optimize import check_grad

def F( x):
Expand Down Expand Up @@ -336,6 +353,10 @@ def d2F( x):
if (check_sparse_A_optimal( sij)):
print 'Sparse A-optimal passed!'

# Check A-optimal when only relative measurements are included.
if (check_relative_only_A_optimal( sij)):
print 'Relative-only A-optimal passed!'

# Test covariance computation
if (test_covariance(5, T=4000)):
print 'Covariance computation passed!'
Expand Down

0 comments on commit 43504c4

Please sign in to comment.