From 6876bda04cde114642ebce3d6bd577da40fa34e9 Mon Sep 17 00:00:00 2001
From: gfardell <47746591+gfardell@users.noreply.github.com>
Date: Thu, 4 Jul 2019 21:43:08 +0100
Subject: Gf algorithm bug fix (#348)

* Algorithms updated to initalise all member variables in set_up()

* Added missing data files

* Removed memopts=False path
---
 .../Python/ccpi/optimisation/algorithms/CGLS.py    |  33 +++--
 .../Python/ccpi/optimisation/algorithms/FISTA.py   |  39 +++---
 .../optimisation/algorithms/GradientDescent.py     |  24 ++--
 .../Python/ccpi/optimisation/algorithms/PDHG.py    | 145 +++++----------------
 .../Python/ccpi/optimisation/algorithms/SIRT.py    |  25 ++--
 .../Python/ccpi/optimisation/functions/Norm2Sq.py  |  27 +---
 Wrappers/Python/setup.py                           |   9 +-
 7 files changed, 95 insertions(+), 207 deletions(-)

(limited to 'Wrappers')

diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py
index de904fb..82fbb96 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/CGLS.py
@@ -34,27 +34,25 @@ class CGLS(Algorithm):
         https://web.stanford.edu/group/SOL/software/cgls/
       
     '''
-    
-    
-    
+
     def __init__(self, **kwargs):
         
         super(CGLS, self).__init__()
-        self.x        = kwargs.get('x_init', None)
-        self.operator = kwargs.get('operator', None)
-        self.data     = kwargs.get('data', None)
-        self.tolerance     = kwargs.get('tolerance', 1e-6)
-        if self.x is not None and self.operator is not None and \
-           self.data is not None:
-            print (self.__class__.__name__ , "set_up called from creator")
-            self.set_up(x_init  =kwargs['x_init'],
-                               operator=kwargs['operator'],
-                               data    =kwargs['data'])
-            
-                                    
-    def set_up(self, x_init, operator , data ):
+        x_init    = kwargs.get('x_init', None)
+        operator  = kwargs.get('operator', None)
+        data      = kwargs.get('data', None)
+        tolerance = kwargs.get('tolerance', 1e-6)
+
+        if x_init is not None and operator is not None and data is not None:
+            print(self.__class__.__name__ , "set_up called from creator")
+            self.set_up(x_init=x_init, operator=operator, data=data, tolerance=tolerance)
+
+    def set_up(self, x_init, operator, data, tolerance=1e-6):
 
         self.x = x_init * 0.
+        self.operator = operator
+        self.tolerance = tolerance
+
         self.r = data - self.operator.direct(self.x)
         self.s = self.operator.adjoint(self.r)
         
@@ -64,8 +62,7 @@ class CGLS(Algorithm):
         ##
         self.norms = self.s.norm()
         ##
-        
-        
+
         self.gamma = self.norms0**2
         self.normx = self.x.norm()
         self.xmax = self.normx   
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
index 9e40c95..fa1d8d8 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
@@ -39,40 +39,31 @@ class FISTA(Algorithm):
         '''initialisation can be done at creation time if all 
         proper variables are passed or later with set_up'''
         super(FISTA, self).__init__()
-        self.f = kwargs.get('f', None)
-        self.g = kwargs.get('g', ZeroFunction())
-        self.x_init = kwargs.get('x_init',None)
-        self.invL = None
-        self.t_old = 1
-        if self.x_init is not None and \
-           self.f is not None and self.g is not None:
-            print ("FISTA set_up called from creator")
-            self.set_up(self.x_init, self.f, self.g)        
+        f = kwargs.get('f', None)
+        g = kwargs.get('g', ZeroFunction())
+        x_init = kwargs.get('x_init', None)
+
+        if x_init is not None and f is not None:
+            print(self.__class__.__name__ , "set_up called from creator")
+            self.set_up(x_init=x_init, f=f, g=g)
+
+    def set_up(self, x_init, f, g=ZeroFunction()):
 
-    
-    def set_up(self, x_init, f, g, opt=None, **kwargs):
-        
-        self.f = f
-        self.g = g
-        
-        # algorithmic parameters
-        if opt is None: 
-            opt = {'tol': 1e-4}
-        
         self.y = x_init.copy()
         self.x_old = x_init.copy()
         self.x = x_init.copy()
-        self.u = x_init.copy()            
+        self.u = x_init.copy()
 
+        self.f = f
+        self.g = g
 
         self.invL = 1/f.L
-        
-        self.t_old = 1
+        self.t = 1
         self.update_objective()
         self.configured = True
             
     def update(self):
-
+        self.t_old = self.t
         self.f.gradient(self.y, out=self.u)
         self.u.__imul__( -self.invL )
         self.u.__iadd__( self.y )
@@ -87,7 +78,7 @@ class FISTA(Algorithm):
         self.y.__iadd__( self.x )
         
         self.x_old.fill(self.x)
-        self.t_old = self.t            
+
         
     def update_objective(self):
         self.loss.append( self.f(self.x) + self.g(self.x) )    
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
index cbccd73..8c2b693 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
@@ -25,18 +25,14 @@ class GradientDescent(Algorithm):
         '''initialisation can be done at creation time if all 
         proper variables are passed or later with set_up'''
         super(GradientDescent, self).__init__()
-        self.x = None
-        self.rate = 0
-        self.objective_function = None
-        self.regulariser = None
-        args = ['x_init', 'objective_function', 'rate']
-        for k,v in kwargs.items():
-            if k in args:
-                args.pop(args.index(k))
-        if len(args) == 0:
-            self.set_up(x_init=kwargs['x_init'],
-                               objective_function=kwargs['objective_function'],
-                               rate=kwargs['rate'])
+
+        x_init               = kwargs.get('x_init', None)
+        objective_function   = kwargs.get('objective_function', None)
+        rate                 = kwargs.get('rate', None)
+
+        if x_init is not None and objective_function is not None and rate is not None:
+            print(self.__class__.__name__, "set_up called from creator")
+            self.set_up(x_init=x_init, objective_function=objective_function, rate=rate)
     
     def should_stop(self):
         '''stopping cryterion, currently only based on number of iterations'''
@@ -47,14 +43,17 @@ class GradientDescent(Algorithm):
         self.x = x_init.copy()
         self.objective_function = objective_function
         self.rate = rate
+
         self.loss.append(objective_function(x_init))
         self.iteration = 0
+
         try:
             self.memopt = self.objective_function.memopt
         except AttributeError as ae:
             self.memopt = False
         if self.memopt:
             self.x_update = x_init.copy()
+
         self.configured = True
 
     def update(self):
@@ -68,4 +67,3 @@ class GradientDescent(Algorithm):
 
     def update_objective(self):
         self.loss.append(self.objective_function(self.x))
-        
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
index 2503fe6..8f37765 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/PDHG.py
@@ -24,42 +24,42 @@ from ccpi.optimisation.operators import BlockOperator
 from ccpi.framework import BlockDataContainer
 from ccpi.optimisation.functions import FunctionOperatorComposition
 
+
 class PDHG(Algorithm):
     '''Primal Dual Hybrid Gradient'''
 
     def __init__(self, **kwargs):
         super(PDHG, self).__init__(max_iteration=kwargs.get('max_iteration',0))
-        self.f        = kwargs.get('f', None)
-        self.operator = kwargs.get('operator', None)
-        self.g        = kwargs.get('g', None)
-        self.tau      = kwargs.get('tau', None)
-        self.sigma    = kwargs.get('sigma', 1.)
-        
-        
-        if self.f is not None and self.operator is not None and \
-           self.g is not None:
-            if self.tau is None:
-                # Compute operator Norm
-                normK = self.operator.norm()
-                # Primal & dual stepsizes
-                self.tau = 1/(self.sigma*normK**2)
-            print ("Calling from creator")
-            self.set_up(self.f,
-                        self.g,
-                        self.operator,
-                        self.tau, 
-                        self.sigma)
-
-    def set_up(self, f, g, operator, tau = None, sigma = None, opt = None, **kwargs):
+        f        = kwargs.get('f', None)
+        operator = kwargs.get('operator', None)
+        g        = kwargs.get('g', None)
+        tau      = kwargs.get('tau', None)
+        sigma    = kwargs.get('sigma', 1.)
+
+        if f is not None and operator is not None and g is not None:
+            print(self.__class__.__name__ , "set_up called from creator")
+            self.set_up(f=f, g=g, operator=operator, tau=tau, sigma=sigma)
+
+    def set_up(self, f, g, operator, tau=None, sigma=1.):
+
+        # can't happen with default sigma
+        if sigma is None and tau is None:
+            raise ValueError('Need sigma*tau||K||^2<1')
+
         # algorithmic parameters
-        self.operator = operator
         self.f = f
         self.g = g
+        self.operator = operator
+
         self.tau = tau
         self.sigma = sigma
-        self.opt = opt
-        if sigma is None and tau is None:
-            raise ValueError('Need sigma*tau||K||^2<1') 
+
+        if self.tau is None:
+            # Compute operator Norm
+            normK = self.operator.norm()
+            # Primal & dual stepsizes
+            self.tau = 1 / (self.sigma * normK ** 2)
+
 
         self.x_old = self.operator.domain_geometry().allocate()
         self.x_tmp = self.x_old.copy()
@@ -83,7 +83,7 @@ class PDHG(Algorithm):
         self.y_tmp *= self.sigma
         self.y_tmp += self.y_old
 
-        #self.y = self.f.proximal_conjugate(self.y_old, self.sigma)
+        # self.y = self.f.proximal_conjugate(self.y_old, self.sigma)
         self.f.proximal_conjugate(self.y_tmp, self.sigma, out=self.y)
         
         # Gradient ascent, Primal problem solution
@@ -91,10 +91,9 @@ class PDHG(Algorithm):
         self.x_tmp *= -1*self.tau
         self.x_tmp += self.x_old
 
-            
         self.g.proximal(self.x_tmp, self.tau, out=self.x)
 
-        #Update
+        # Update
         self.x.subtract(self.x_old, out=self.xbar)
         self.xbar *= self.theta
         self.xbar += self.x
@@ -107,90 +106,4 @@ class PDHG(Algorithm):
         p1 = self.f(self.operator.direct(self.x)) + self.g(self.x)
         d1 = -(self.f.convex_conjugate(self.y) + self.g.convex_conjugate(-1*self.operator.adjoint(self.y)))
 
-        self.loss.append([p1,d1,p1-d1])
-
-
-
-def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs):
-        
-    # algorithmic parameters
-    if opt is None: 
-        opt = {'tol': 1e-6, 'niter': 500, 'show_iter': 100, \
-               'memopt': False} 
-        
-    if sigma is None and tau is None:
-        raise ValueError('Need sigma*tau||K||^2<1') 
-                
-    niter = opt['niter'] if 'niter' in opt.keys() else 1000
-    tol = opt['tol'] if 'tol' in opt.keys() else 1e-4
-    memopt = opt['memopt'] if 'memopt' in opt.keys() else False  
-    show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False 
-    stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False 
-
-    x_old = operator.domain_geometry().allocate()
-    y_old = operator.range_geometry().allocate()       
-            
-    xbar = x_old.copy()
-    x_tmp = x_old.copy()
-    x = x_old.copy()
-    
-    y_tmp = y_old.copy()
-    y = y_tmp.copy()
-
-        
-    # relaxation parameter
-    theta = 1
-    
-    t = time.time()
-    
-    primal = []
-    dual = []
-    pdgap = []
-    
-    
-    for i in range(niter):
-        
-
-        
-        if memopt:
-            operator.direct(xbar, out = y_tmp)             
-            y_tmp *= sigma
-            y_tmp += y_old 
-        else:
-            y_tmp = y_old +  sigma * operator.direct(xbar)                        
-        
-        f.proximal_conjugate(y_tmp, sigma, out=y)
-        
-        if memopt:
-            operator.adjoint(y, out = x_tmp)   
-            x_tmp *= -1*tau
-            x_tmp += x_old
-        else:
-            x_tmp = x_old - tau*operator.adjoint(y)
-            
-        g.proximal(x_tmp, tau, out=x)
-             
-        x.subtract(x_old, out=xbar)
-        xbar *= theta
-        xbar += x
-                                              
-        x_old.fill(x)
-        y_old.fill(y)
-                    
-        if i%10==0:
-            
-            p1 = f(operator.direct(x)) + g(x)
-            d1 = - ( f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) )            
-            primal.append(p1)
-            dual.append(d1)
-            pdgap.append(p1-d1) 
-            print(p1, d1, p1-d1)
-            
-        
-                         
-    t_end = time.time()        
-        
-    return x, t_end - t, primal, dual, pdgap
-
-
-
+        self.loss.append([p1, d1, p1-d1])
\ No newline at end of file
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py b/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py
index 02ca937..ca5b084 100644
--- a/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/SIRT.py
@@ -31,19 +31,17 @@ class SIRT(Algorithm):
     '''
     def __init__(self, **kwargs):
         super(SIRT, self).__init__()
-        self.x          = kwargs.get('x_init', None)
-        self.operator   = kwargs.get('operator', None)
-        self.data       = kwargs.get('data', None)
-        self.constraint = kwargs.get('constraint', None)
-        if self.x is not None and self.operator is not None and \
-           self.data is not None:
-            print ("Calling from creator")
-            self.set_up(x_init=kwargs['x_init'],
-                        operator=kwargs['operator'],
-                        data=kwargs['data'],
-                        constraint=kwargs['constraint'])
 
-    def set_up(self, x_init, operator , data, constraint=None ):
+        x_init     = kwargs.get('x_init', None)
+        operator   = kwargs.get('operator', None)
+        data       = kwargs.get('data', None)
+        constraint = kwargs.get('constraint', None)
+
+        if x_init is not None and operator is not None and data is not None:
+            print(self.__class__.__name__, "set_up called from creator")
+            self.set_up(x_init=x_init, operator=operator, data=data, constraint=constraint)
+
+    def set_up(self, x_init, operator, data, constraint=None):
         
         self.x = x_init.copy()
         self.operator = operator
@@ -57,6 +55,7 @@ class SIRT(Algorithm):
         # Set up scaling matrices D and M.
         self.M = 1/self.operator.direct(self.operator.domain_geometry().allocate(value=1.0))        
         self.D = 1/self.operator.adjoint(self.operator.range_geometry().allocate(value=1.0))
+        self.update_objective()
         self.configured = True
 
 
@@ -67,7 +66,7 @@ class SIRT(Algorithm):
         self.x += self.relax_par * (self.D*self.operator.adjoint(self.M*self.r))
         
         if self.constraint is not None:
-            self.x = self.constraint.proximal(self.x,None)
+            self.x = self.constraint.proximal(self.x, None)
             # self.constraint.proximal(self.x,None, out=self.x)
 
     def update_objective(self):
diff --git a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py
index 204fdc4..eea300d 100755
--- a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py
@@ -36,27 +36,14 @@ class Norm2Sq(Function):
     
     '''
     
-    def __init__(self,A,b,c=1.0,memopt=False):
+    def __init__(self, A, b, c=1.0):
         super(Norm2Sq, self).__init__()
     
         self.A = A  # Should be an operator, default identity
         self.b = b  # Default zero DataSet?
         self.c = c  # Default 1.
-        if memopt:
-            try:
-                self.range_tmp = A.range_geometry().allocate()
-                self.domain_tmp = A.domain_geometry().allocate()
-                self.memopt = True
-            except NameError as ne:
-                warnings.warn(str(ne))
-                self.memopt = False
-            except NotImplementedError as nie:
-                print (nie)
-                warnings.warn(str(nie))
-                self.memopt = False
-        else:
-            self.memopt = False
-        
+        self.range_tmp = A.range_geometry().allocate()
+
         # Compute the Lipschitz parameter from the operator if possible
         # Leave it initialised to None otherwise
         try:
@@ -69,7 +56,7 @@ class Norm2Sq(Function):
     #def grad(self,x):
     #    return self.gradient(x, out=None)
 
-    def __call__(self,x):
+    def __call__(self, x):
         #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel()))
         #if out is None:
         #    return self.c*( ( (self.A.direct(x)-self.b)**2).sum() )
@@ -84,8 +71,8 @@ class Norm2Sq(Function):
             # added for compatibility with SIRF 
             return (y.norm()**2) * self.c
     
-    def gradient(self, x, out = None):
-        if self.memopt:
+    def gradient(self, x, out=None):
+        if out is not None:
             #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b )
             self.A.direct(x, out=self.range_tmp)
             self.range_tmp -= self.b 
@@ -93,4 +80,4 @@ class Norm2Sq(Function):
             #self.direct_placehold.multiply(2.0*self.c, out=out)
             out *= (self.c * 2.0)
         else:
-            return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
+            return (2.0*self.c)*self.A.adjoint(self.A.direct(x) - self.b)
diff --git a/Wrappers/Python/setup.py b/Wrappers/Python/setup.py
index e680df3..ea6181e 100644
--- a/Wrappers/Python/setup.py
+++ b/Wrappers/Python/setup.py
@@ -37,9 +37,12 @@ setup(
               'ccpi.processors',
               'ccpi.contrib','ccpi.contrib.optimisation',
               'ccpi.contrib.optimisation.algorithms'],
-    data_files = [('share/ccpi', ['data/boat.tiff', 'data/peppers.tiff',
-                                 'data/camera.png', 
-                                 'data/resolution_chart.tiff', 'data/shapes.png'])],
+    data_files = [('share/ccpi', ['data/boat.tiff',
+                                  'data/peppers.tiff',
+                                  'data/camera.png',
+                                  'data/resolution_chart.tiff',
+                                  'data/shapes.png',
+                                  'data/24737_fd_normalised.nxs'])],
 
     # Project uses reStructuredText, so ensure that the docutils get
     # installed or upgraded on the target machine
-- 
cgit v1.2.3