diff options
10 files changed, 476 insertions, 207 deletions
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py index 064cb33..8ea2b6c 100755 --- a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py @@ -106,9 +106,9 @@ class FISTA(Algorithm): else: - u = self.y - self.invL*self.f.grad(self.y) + u = self.y - self.invL*self.f.gradient(self.y) - self.x = self.g.prox(u,self.invL) + self.x = self.g.proximal(u,self.invL) self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py index 5e92767..d1b5351 100644 --- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py +++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py @@ -158,14 +158,24 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old -= tau*operator.adjoint(y) x = g.proximal(x_old, tau) - xbar.fill(x) - xbar -= x_old + x.subtract(x_old, out=xbar) xbar *= theta xbar += x - + x_old.fill(x) - y_old.fill(y) + y_old.fill(y) + + +# if i%100==0: +# +# p1 = f(operator.direct(x)) + g(x) +# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) ) +# primal.append(p1) +# dual.append(d1) +# pdgap.append(p1-d1) +# print(p1, d1, p1-d1) + else: operator.direct(xbar, out = y_tmp) @@ -186,6 +196,15 @@ def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): x_old.fill(x) y_old.fill(y) +# if i%100==0: +# +# p1 = f(operator.direct(x)) + g(x) +# d1 = - ( f.convex_conjugate(y) + g(-1*operator.adjoint(y)) ) +# primal.append(p1) +# dual.append(d1) +# pdgap.append(p1-d1) +# print(p1, d1, p1-d1) + t_end = time.time() diff --git a/Wrappers/Python/ccpi/optimisation/algs.py b/Wrappers/Python/ccpi/optimisation/algs.py index c142eda..2f819d3 100755 --- a/Wrappers/Python/ccpi/optimisation/algs.py +++ b/Wrappers/Python/ccpi/optimisation/algs.py @@ -72,7 +72,7 @@ def FISTA(x_init, f=None, g=None, opt=None): t_old = 1 - c = f(x_init) + g(x_init) +# c = f(x_init) + g(x_init) # algorithm loop for it in range(0, max_iter): @@ -99,9 +99,9 @@ def FISTA(x_init, f=None, g=None, opt=None): else: - u = y - invL*f.grad(y) + u = y - invL*f.gradient(y) - x = g.prox(u,invL) + x = g.proximal(u,invL) t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) @@ -111,8 +111,8 @@ def FISTA(x_init, f=None, g=None, opt=None): t_old = t # time and criterion - timing[it] = time.time() - time0 - criter[it] = f(x) + g(x); +# timing[it] = time.time() - time0 +# criter[it] = f(x) + g(x); # stopping rule #if np.linalg.norm(x - x_old) < tol * np.linalg.norm(x_old) and it > 10: @@ -121,9 +121,9 @@ def FISTA(x_init, f=None, g=None, opt=None): #print(it, 'out of', 10, 'iterations', end='\r'); #criter = criter[0:it+1]; - timing = numpy.cumsum(timing[0:it+1]); +# timing = numpy.cumsum(timing[0:it+1]); - return x, it, timing, criter + return x #, it, timing, criter def FBPD(x_init, operator=None, constraint=None, data_fidelity=None,\ regulariser=None, opt=None): diff --git a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py index 38bc458..70511bb 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py +++ b/Wrappers/Python/ccpi/optimisation/functions/FunctionOperatorComposition.py @@ -54,15 +54,20 @@ class FunctionOperatorComposition(Function): def proximal(self, x, tau, out=None): - '''proximal does not take into account the Operator''' - - return self.function.proximal(x, tau, out=out) + '''proximal does not take into account the Operator''' + if out is None: + return self.function.proximal(x, tau) + else: + self.function.proximal(x, tau, out=out) + def proximal_conjugate(self, x, tau, out=None): ''' proximal conjugate does not take into account the Operator''' - - return self.function.proximal_conjugate(x, tau, out=out) + if out is None: + return self.function.proximal_conjugate(x, tau) + else: + self.function.proximal_conjugate(x, tau, out=out) def gradient(self, x, out=None): diff --git a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py index 2d0a00a..1946d67 100644 --- a/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py +++ b/Wrappers/Python/ccpi/optimisation/functions/L2NormSquared.py @@ -34,6 +34,7 @@ class L2NormSquared(Function): super(L2NormSquared, self).__init__() self.b = kwargs.get('b',None) + self.L = 2 def __call__(self, x): @@ -88,10 +89,25 @@ class L2NormSquared(Function): ''' if out is None: - if self.b is not None: - return (x - self.b)/(1+2*tau) + self.b - else: + + if self.b is None: return x/(1+2*tau) + else: + tmp = x + tmp -= self.b + tmp /= (1+2*tau) + tmp += self.b + return tmp +# return (x-self.b)/(1+2*tau) + self.b + +# if self.b is not None: +# out=x +# if self.b is not None: +# out -= self.b +# out /= (1+2*tau) +# if self.b is not None: +# out += self.b +# return out else: out.fill(x) if self.b is not None: @@ -247,7 +263,38 @@ if __name__ == '__main__': print(res_out.as_array()) numpy.testing.assert_array_almost_equal(res_no_out.as_array(), \ - res_out.as_array(), decimal=4) + res_out.as_array(), decimal=4) + + + + ig1 = ImageGeometry(2,3) + + tau = 0.1 + + u = ig1.allocate('random_int') + b = ig1.allocate('random_int') + + scalar = 0.5 + f_scaled = scalar * L2NormSquared(b=b) + f_noscaled = L2NormSquared(b=b) + + + res1 = f_scaled.proximal(u, tau) + res2 = f_noscaled.proximal(u, tau*scalar) + +# res2 = (u + tau*b)/(1+tau) + + numpy.testing.assert_array_almost_equal(res1.as_array(), \ + res2.as_array(), decimal=4) + + + + + + + + + diff --git a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py index 3541bc2..7e6b6e7 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py +++ b/Wrappers/Python/ccpi/optimisation/functions/MixedL21Norm.py @@ -54,9 +54,8 @@ class MixedL21Norm(Function): else: - #tmp = [ el**2 for el in x.containers ] - #res = sum(tmp).sqrt().sum() - res = x.pnorm() + tmp = [ el**2 for el in x.containers ] + res = sum(tmp).sqrt().sum() return res @@ -109,7 +108,7 @@ class MixedL21Norm(Function): # x.divide(sum([el*el for el in x.containers]).sqrt().maximum(1.0), out=out) #TODO this is slow, why ??? -# x.divide(x.norm().maximum(1.0), out=out) +# x.divide(x.pnorm().maximum(1.0), out=out) def __rmul__(self, scalar): diff --git a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py index cb85249..7caeab2 100755 --- a/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py +++ b/Wrappers/Python/ccpi/optimisation/functions/ScaledFunction.py @@ -37,11 +37,14 @@ class ScaledFunction(object): '''
def __init__(self, function, scalar):
super(ScaledFunction, self).__init__()
- self.L = None
+
if not isinstance (scalar, Number):
raise TypeError('expected scalar: got {}'.format(type(scalar)))
self.scalar = scalar
self.function = function
+
+ if self.function.L is not None:
+ self.L = self.scalar * self.function.L
def __call__(self,x, out=None):
'''Evaluates the function at x '''
@@ -64,7 +67,7 @@ class ScaledFunction(object): if out is None:
return self.function.proximal(x, tau*self.scalar)
else:
- out.fill( self.function.proximal(x, tau*self.scalar) )
+ self.function.proximal(x, tau*self.scalar, out = out)
def proximal_conjugate(self, x, tau, out = None):
'''This returns the proximal operator for the function at x, tau
diff --git a/Wrappers/Python/wip/fista_TV_denoising.py b/Wrappers/Python/wip/fista_TV_denoising.py new file mode 100644 index 0000000..a9712fc --- /dev/null +++ b/Wrappers/Python/wip/fista_TV_denoising.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG, PDHG_old, FISTA + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \ + MixedL21Norm, FunctionOperatorComposition, BlockFunction, ScaledFunction + +from ccpi.optimisation.algs import FISTA + +from skimage.util import random_noise + +from timeit import default_timer as timer +def dt(steps): + return steps[-1] - steps[-2] + +# Create phantom for TV denoising + +N = 100 + +data = np.zeros((N,N)) +data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +ag = ig + +# Create noisy data. Add Gaussian noise +n1 = random_noise(data, mode = 'gaussian', mean=0, var = 0.05, seed=10) +noisy_data = ImageData(n1) + + +plt.imshow(noisy_data.as_array()) +plt.title('Noisy data') +plt.show() + +# Regularisation Parameter +alpha = 2 + +operator = Gradient(ig) +g = alpha * MixedL21Norm() +f = 0.5 * L2NormSquared(b = noisy_data) + +x_init = ig.allocate() +opt = {'niter':2000} + + +x = FISTA(x_init, f, g, opt) + +#fista = FISTA() +#fista.set_up(x_init, f, g, opt ) +#fista.max_iteration = 10 +# +#fista.run(2000) +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(fista.get_output().as_array()) +#plt.title('no memopt class') + + + diff --git a/Wrappers/Python/wip/pdhg_TV_denoising.py b/Wrappers/Python/wip/pdhg_TV_denoising.py index e142d94..a5cd1bf 100755 --- a/Wrappers/Python/wip/pdhg_TV_denoising.py +++ b/Wrappers/Python/wip/pdhg_TV_denoising.py @@ -23,11 +23,9 @@ from timeit import default_timer as timer def dt(steps): return steps[-1] - steps[-2] -#%% - # Create phantom for TV denoising -N = 500 +N = 100 data = np.zeros((N,N)) data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 @@ -40,16 +38,16 @@ ag = ig n1 = random_noise(data, mode = 'gaussian', mean=0, var = 0.05, seed=10) noisy_data = ImageData(n1) + plt.imshow(noisy_data.as_array()) +plt.title('Noisy data') plt.show() -#%% - # Regularisation Parameter alpha = 2 #method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") -method = '0' +method = '1' if method == '0': @@ -74,12 +72,9 @@ else: # No Composite # ########################################################################### operator = Gradient(ig) - f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) - g = L2NormSquared(b = noisy_data) - - ########################################################################### -#%% - + f = alpha * MixedL21Norm() + g = 0.5 * L2NormSquared(b = noisy_data) + # Compute operator Norm normK = operator.norm() @@ -94,180 +89,36 @@ t1 = timer() res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) t2 = timer() +print(" Run memopt") t3 = timer() res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) t4 = timer() -# -print ("No memopt in {}s, memopt in {}s ".format(t2-t1, t4 -t3)) - -# -#%% -#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) -#pdhg.max_iteration = 2000 -#pdhg.update_objective_interval = 100 -## -#pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) -#pdhgo.max_iteration = 2000 -#pdhgo.update_objective_interval = 100 -## -#steps = [timer()] -#pdhgo.run(2000) -#steps.append(timer()) -#t1 = dt(steps) -## -#pdhg.run(2000) -#steps.append(timer()) -#t2 = dt(steps) -# -#print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) -#res = pdhg.get_output() -#res1 = pdhgo.get_output() #%% -#plt.figure(figsize=(15,15)) -#plt.subplot(3,1,1) -#plt.imshow(res.as_array()) -#plt.title('no memopt') -#plt.colorbar() -#plt.subplot(3,1,2) -#plt.imshow(res1.as_array()) -#plt.title('memopt') -#plt.colorbar() -#plt.subplot(3,1,3) -#plt.imshow((res1 - res).abs().as_array()) -#plt.title('diff') -#plt.colorbar() -#plt.show() - - -#plt.figure(figsize=(15,15)) -#plt.subplot(3,1,1) -#plt.imshow(pdhg.get_output().as_array()) -#plt.title('no memopt class') -#plt.colorbar() -#plt.subplot(3,1,2) -#plt.imshow(res.as_array()) -#plt.title('no memopt') -#plt.colorbar() -#plt.subplot(3,1,3) -#plt.imshow((pdhg.get_output() - res).abs().as_array()) -#plt.title('diff') -#plt.colorbar() -#plt.show() -# -# -# -#plt.figure(figsize=(15,15)) -#plt.subplot(3,1,1) -#plt.imshow(pdhgo.get_output().as_array()) -#plt.title('memopt class') -#plt.colorbar() -#plt.subplot(3,1,2) -#plt.imshow(res1.as_array()) -#plt.title('no memopt') -#plt.colorbar() -#plt.subplot(3,1,3) -#plt.imshow((pdhgo.get_output() - res1).abs().as_array()) -#plt.title('diff') -#plt.colorbar() -#plt.show() - - - +plt.figure(figsize=(15,15)) +plt.subplot(3,1,1) +plt.imshow(res.as_array()) +plt.title('no memopt') +plt.colorbar() +plt.subplot(3,1,2) +plt.imshow(res1.as_array()) +plt.title('memopt') +plt.colorbar() +plt.subplot(3,1,3) +plt.imshow((res1 - res).abs().as_array()) +plt.title('diff') +plt.colorbar() +plt.show() +# +plt.plot(np.linspace(0,N,N), res1.as_array()[int(N/2),:], label = 'memopt') +plt.plot(np.linspace(0,N,N), res.as_array()[int(N/2),:], label = 'no memopt') +plt.legend() +plt.show() - -# print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) -# res = pdhg.get_output() -# res1 = pdhgo.get_output() -# -# diff = (res-res1) -# print ("diff norm {} max {}".format(diff.norm(), diff.abs().as_array().max())) -# print ("Sum ( abs(diff) ) {}".format(diff.abs().sum())) -# -# -# plt.figure(figsize=(5,5)) -# plt.subplot(1,3,1) -# plt.imshow(res.as_array()) -# plt.colorbar() -# #plt.show() -# -# #plt.figure(figsize=(5,5)) -# plt.subplot(1,3,2) -# plt.imshow(res1.as_array()) -# plt.colorbar() - -#plt.show() +print ("Time: No memopt in {}s, \n Time: Memopt in {}s ".format(t2-t1, t4 -t3)) +diff = (res1 - res).abs().as_array().max() +print(" Max of abs difference is {}".format(diff)) -#======= -## opt = {'niter':2000, 'memopt': True} -# -## res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) -# -#>>>>>>> origin/pdhg_fix -# -# -## opt = {'niter':2000, 'memopt': False} -## res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) -# -## plt.figure(figsize=(5,5)) -## plt.subplot(1,3,1) -## plt.imshow(res.as_array()) -## plt.title('memopt') -## plt.colorbar() -## plt.subplot(1,3,2) -## plt.imshow(res1.as_array()) -## plt.title('no memopt') -## plt.colorbar() -## plt.subplot(1,3,3) -## plt.imshow((res1 - res).abs().as_array()) -## plt.title('diff') -## plt.colorbar() -## plt.show() -#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) -#pdhg.max_iteration = 2000 -#pdhg.update_objective_interval = 100 -# -# -#pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) -#pdhgo.max_iteration = 2000 -#pdhgo.update_objective_interval = 100 -# -#steps = [timer()] -#pdhgo.run(200) -#steps.append(timer()) -#t1 = dt(steps) -# -#pdhg.run(200) -#steps.append(timer()) -#t2 = dt(steps) -# -#print ("Time difference {} {} {}".format(t1,t2,t2-t1)) -#sol = pdhg.get_output().as_array() -##sol = result.as_array() -## -#fig = plt.figure() -#plt.subplot(1,3,1) -#plt.imshow(noisy_data.as_array()) -#plt.colorbar() -#plt.subplot(1,3,2) -#plt.imshow(sol) -#plt.colorbar() -#plt.subplot(1,3,3) -#plt.imshow(pdhgo.get_output().as_array()) -#plt.colorbar() -# -#plt.show() -### -## -#### -##plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') -##plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') -##plt.legend() -##plt.show() -# -# -##%% -## diff --git a/Wrappers/Python/wip/test_pdhg_profile/profile_pdhg_TV_denoising.py b/Wrappers/Python/wip/test_pdhg_profile/profile_pdhg_TV_denoising.py new file mode 100644 index 0000000..e142d94 --- /dev/null +++ b/Wrappers/Python/wip/test_pdhg_profile/profile_pdhg_TV_denoising.py @@ -0,0 +1,273 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 22 14:53:03 2019 + +@author: evangelos +""" + +from ccpi.framework import ImageData, ImageGeometry, BlockDataContainer + +import numpy as np +import matplotlib.pyplot as plt + +from ccpi.optimisation.algorithms import PDHG, PDHG_old + +from ccpi.optimisation.operators import BlockOperator, Identity, Gradient +from ccpi.optimisation.functions import ZeroFunction, L2NormSquared, \ + MixedL21Norm, FunctionOperatorComposition, BlockFunction, ScaledFunction + +from skimage.util import random_noise + +from timeit import default_timer as timer +def dt(steps): + return steps[-1] - steps[-2] + +#%% + +# Create phantom for TV denoising + +N = 500 + +data = np.zeros((N,N)) +data[round(N/4):round(3*N/4),round(N/4):round(3*N/4)] = 0.5 +data[round(N/8):round(7*N/8),round(3*N/8):round(5*N/8)] = 1 + +ig = ImageGeometry(voxel_num_x = N, voxel_num_y = N) +ag = ig + +# Create noisy data. Add Gaussian noise +n1 = random_noise(data, mode = 'gaussian', mean=0, var = 0.05, seed=10) +noisy_data = ImageData(n1) + +plt.imshow(noisy_data.as_array()) +plt.show() + +#%% + +# Regularisation Parameter +alpha = 2 + +#method = input("Enter structure of PDHG (0=Composite or 1=NotComposite): ") +method = '0' + +if method == '0': + + # Create operators + op1 = Gradient(ig) + op2 = Identity(ig, ag) + + # Form Composite Operator + operator = BlockOperator(op1, op2, shape=(2,1) ) + + #### Create functions + + f1 = alpha * MixedL21Norm() + f2 = 0.5 * L2NormSquared(b = noisy_data) + f = BlockFunction(f1, f2) + + g = ZeroFunction() + +else: + + ########################################################################### + # No Composite # + ########################################################################### + operator = Gradient(ig) + f = alpha * FunctionOperatorComposition(operator, MixedL21Norm()) + g = L2NormSquared(b = noisy_data) + + ########################################################################### +#%% + +# Compute operator Norm +normK = operator.norm() + +# Primal & dual stepsizes +sigma = 1 +tau = 1/(sigma*normK**2) + +opt = {'niter':2000} +opt1 = {'niter':2000, 'memopt': True} + +t1 = timer() +res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +t2 = timer() + + +t3 = timer() +res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt1) +t4 = timer() +# +print ("No memopt in {}s, memopt in {}s ".format(t2-t1, t4 -t3)) + +# +#%% +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +#pdhg.max_iteration = 2000 +#pdhg.update_objective_interval = 100 +## +#pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) +#pdhgo.max_iteration = 2000 +#pdhgo.update_objective_interval = 100 +## +#steps = [timer()] +#pdhgo.run(2000) +#steps.append(timer()) +#t1 = dt(steps) +## +#pdhg.run(2000) +#steps.append(timer()) +#t2 = dt(steps) +# +#print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) +#res = pdhg.get_output() +#res1 = pdhgo.get_output() + +#%% +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(res.as_array()) +#plt.title('no memopt') +#plt.colorbar() +#plt.subplot(3,1,2) +#plt.imshow(res1.as_array()) +#plt.title('memopt') +#plt.colorbar() +#plt.subplot(3,1,3) +#plt.imshow((res1 - res).abs().as_array()) +#plt.title('diff') +#plt.colorbar() +#plt.show() + + +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(pdhg.get_output().as_array()) +#plt.title('no memopt class') +#plt.colorbar() +#plt.subplot(3,1,2) +#plt.imshow(res.as_array()) +#plt.title('no memopt') +#plt.colorbar() +#plt.subplot(3,1,3) +#plt.imshow((pdhg.get_output() - res).abs().as_array()) +#plt.title('diff') +#plt.colorbar() +#plt.show() +# +# +# +#plt.figure(figsize=(15,15)) +#plt.subplot(3,1,1) +#plt.imshow(pdhgo.get_output().as_array()) +#plt.title('memopt class') +#plt.colorbar() +#plt.subplot(3,1,2) +#plt.imshow(res1.as_array()) +#plt.title('no memopt') +#plt.colorbar() +#plt.subplot(3,1,3) +#plt.imshow((pdhgo.get_output() - res1).abs().as_array()) +#plt.title('diff') +#plt.colorbar() +#plt.show() + + + + + +# print ("Time difference {}s {}s {}s Speedup {:.2f}".format(t1,t2,t2-t1, t2/t1)) +# res = pdhg.get_output() +# res1 = pdhgo.get_output() +# +# diff = (res-res1) +# print ("diff norm {} max {}".format(diff.norm(), diff.abs().as_array().max())) +# print ("Sum ( abs(diff) ) {}".format(diff.abs().sum())) +# +# +# plt.figure(figsize=(5,5)) +# plt.subplot(1,3,1) +# plt.imshow(res.as_array()) +# plt.colorbar() +# #plt.show() +# +# #plt.figure(figsize=(5,5)) +# plt.subplot(1,3,2) +# plt.imshow(res1.as_array()) +# plt.colorbar() + +#plt.show() + + + +#======= +## opt = {'niter':2000, 'memopt': True} +# +## res, time, primal, dual, pdgap = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +# +#>>>>>>> origin/pdhg_fix +# +# +## opt = {'niter':2000, 'memopt': False} +## res1, time1, primal1, dual1, pdgap1 = PDHG_old(f, g, operator, tau = tau, sigma = sigma, opt = opt) +# +## plt.figure(figsize=(5,5)) +## plt.subplot(1,3,1) +## plt.imshow(res.as_array()) +## plt.title('memopt') +## plt.colorbar() +## plt.subplot(1,3,2) +## plt.imshow(res1.as_array()) +## plt.title('no memopt') +## plt.colorbar() +## plt.subplot(1,3,3) +## plt.imshow((res1 - res).abs().as_array()) +## plt.title('diff') +## plt.colorbar() +## plt.show() +#pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma) +#pdhg.max_iteration = 2000 +#pdhg.update_objective_interval = 100 +# +# +#pdhgo = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma, memopt=True) +#pdhgo.max_iteration = 2000 +#pdhgo.update_objective_interval = 100 +# +#steps = [timer()] +#pdhgo.run(200) +#steps.append(timer()) +#t1 = dt(steps) +# +#pdhg.run(200) +#steps.append(timer()) +#t2 = dt(steps) +# +#print ("Time difference {} {} {}".format(t1,t2,t2-t1)) +#sol = pdhg.get_output().as_array() +##sol = result.as_array() +## +#fig = plt.figure() +#plt.subplot(1,3,1) +#plt.imshow(noisy_data.as_array()) +#plt.colorbar() +#plt.subplot(1,3,2) +#plt.imshow(sol) +#plt.colorbar() +#plt.subplot(1,3,3) +#plt.imshow(pdhgo.get_output().as_array()) +#plt.colorbar() +# +#plt.show() +### +## +#### +##plt.plot(np.linspace(0,N,N), data[int(N/2),:], label = 'GTruth') +##plt.plot(np.linspace(0,N,N), sol[int(N/2),:], label = 'Recon') +##plt.legend() +##plt.show() +# +# +##%% +## |