diff --git a/A_opt.py b/A_opt.py index a96b3ab2..f9f191c2 100644 --- a/A_opt.py +++ b/A_opt.py @@ -659,20 +659,25 @@ def G( x, y, alpha=1., beta=0., trans='N'): # h vector. h = matrix( 0., (K*(K+1)/2 + K*(K+1)*(K+1), 1)) # F := Fisher matrix - if nsofar is not None or di2 is not None: - if nsofar is None: - F = matrix( 0., (K, K)) - F[::K+1] = di2 - else: - F = Fisher_matrix( si2, nsofar, di2) + if nsofar is None: + F = matrix( 0., (K, K)) else: - F = None + F = Fisher_matrix( si2, nsofar, di2) + if di2 is not None: F[::K+1] = di2 + elif np.all( np.diag( si2) == 0): + # If the diagonal elements of 1/s[i,i]^2 are all zero, the Fisher + # information matrix is singular, and the quantities are determined + # up to a constant. In this case, we constrain the + # mean of the quantities. This corresponds to adding a constant + # omega to the Fisher matrix. The optimal allocation does not + # depend on the value of omega. + omega = 1. + F += omega row = K*(K+1)/2 for i in xrange(K): - if F is not None: - for j in xrange(K): - h[row + j*(K+1) : row + (j+1)*(K+1)-1] = F[:,j] + for j in xrange(K): + h[row + j*(K+1) : row + (j+1)*(K+1)-1] = F[:,j] h[row + (i+1)*(K+1)-1] = 1. # e_i^t h[row + K*(K+1) + i] = 1. # e_i row += (K+1)*(K+1) @@ -1151,12 +1156,22 @@ def test_sumdR2( ntrials=10, tol=1e-9): print 'Timing for aligned sum dR: %f seconds per call.' % (tfast/ntrials) return success +def test_relative_only(): + K = 5 + np.random.seed( 1) + sij = matrix( np.random.rand( K*K), (K,K)) + sij = 0.5*(sij + sij.T) + for i in range(K): sij[i,i] = np.inf + nij = A_optimize_fast( sij) + print nij + def unit_test(): test_Gfunc( ntrials=100) test_kkt_solver( ntrials=100) test_sumdR2() if __name__ == '__main__': + test_relative_only() K = 200 sij = matrix( np.random.rand( K*K), (K, K)) nsofar = matrix( 0.2*np.random.rand( K*K), (K, K)) diff --git a/test_diffnet.py b/test_diffnet.py index a4ef488a..afdd82db 100644 --- a/test_diffnet.py +++ b/test_diffnet.py @@ -11,12 +11,16 @@ def check_optimality( sij, nij, optimality='A', delta=1E-1, ntimes=10): ''' K = sij.size[0] C = covariance( cvxopt.div( nij, sij**2)) - fC = dict( - A = np.trace( C), - D = np.log( linalg.det( C)), - E = np.max( linalg.eig( C)[0]).real, - Etree = np.max( linalg.eig( C)[0]).real - ) + fC = dict() + if optimality=='A': + fC['A'] = np.trace( C) + if optimality=='D': + fC['D'] = np.log( linalg.det( C)) + if optimality=='E': + fC['E'] = np.max( linalg.eig( C)[0]).real + if optimality=='Etree': + fC['Etree'] = np.max( linalg.eig( C)[0]).real + df = np.zeros( ntimes) for t in xrange( ntimes): zeta = matrix( 1. + 2*delta*(np.random.rand( K, K) - 0.5)) @@ -149,6 +153,20 @@ def check_sparse_A_optimal( sij, ntimes=10, delta=1e-1, tol=1e-5): return success and success2 +def check_relative_only_A_optimal( sij): + ''' + ''' + sij = matrix(sij) + K = sij.size[0] + for i in range(K): sij[i,i] = np.inf + nij = A_optimize( sij) + success = check_optimality( sij, nij) + if (not success): + print 'FAIL: A_optimize for relative-only measurements did not generate optimal.' + else: + print 'SUCCESS: A_optimize for relative-only measurements.' + return success + def check_hessian( dF, d2F, x0): ''' Check the Hessian for correctness. @@ -268,7 +286,6 @@ def unitTest( tol=1.e-4): [ 0.1, 1., 0.1 ], [ 0.1, 0.1, 1.2 ]]) - from scipy.optimize import check_grad def F( x): @@ -336,6 +353,10 @@ def d2F( x): if (check_sparse_A_optimal( sij)): print 'Sparse A-optimal passed!' + # Check A-optimal when only relative measurements are included. + if (check_relative_only_A_optimal( sij)): + print 'Relative-only A-optimal passed!' + # Test covariance computation if (test_covariance(5, T=4000)): print 'Covariance computation passed!'