From 0f542eafe04c4fe7568f83e935859665ffc77e3b Mon Sep 17 00:00:00 2001
From: Edoardo Pasca <edo.paskino@gmail.com>
Date: Fri, 5 Jul 2019 14:25:10 +0100
Subject: Norm2Sq and FISTA to give hints of why they fail (#351)

* Norm2Sq and FISTA to give hints of why they fail

* added denoising tests from demos

* L1Norm store instance of ShrinkageOperator

* relax limit for python2

* added source of tests
---
 .../Python/ccpi/optimisation/algorithms/FISTA.py   |   3 +-
 .../Python/ccpi/optimisation/functions/L1Norm.py   |  11 +-
 .../Python/ccpi/optimisation/functions/Norm2Sq.py  |   6 +-
 Wrappers/Python/test/test_algorithms.py            | 229 ++++++++++++++++++++-
 4 files changed, 239 insertions(+), 10 deletions(-)

(limited to 'Wrappers/Python')

diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
index 347dacd..e23116b 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/FISTA.py
@@ -73,7 +73,8 @@ class FISTA(Algorithm):
 
         self.f = f
         self.g = g
-
+        if f.L is None:
+            raise ValueError('Error: Fidelity Function\'s Lipschitz constant is set to None')
         self.invL = 1/f.L
         self.t = 1
         self.update_objective()
diff --git a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py
index 97d03b9..cc4bef8 100644
--- a/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/L1Norm.py
@@ -35,7 +35,8 @@ class L1Norm(Function):
     def __init__(self, **kwargs):
         
         super(L1Norm, self).__init__()
-        self.b = kwargs.get('b',None) 
+        self.b = kwargs.get('b',None)
+        self.shinkage_operator = ShrinkageOperator()
         
     def __call__(self, x):
         
@@ -69,14 +70,14 @@ class L1Norm(Function):
         
         if out is None:
             if self.b is not None:
-                return self.b + ShrinkageOperator.__call__(self, x - self.b, tau)
+                return self.b + self.shinkage_operator(x - self.b, tau)
             else:
-                return ShrinkageOperator.__call__(self, x, tau)             
+                return self.shinkage_operator(x, tau)             
         else:
             if self.b is not None:
-                out.fill(self.b + ShrinkageOperator.__call__(self, x - self.b, tau))
+                out.fill(self.b + self.shinkage_operator(x - self.b, tau))
             else:
-                out.fill(ShrinkageOperator.__call__(self, x, tau))
+                out.fill(self.shinkage_operator(x, tau))
                                     
     def proximal_conjugate(self, x, tau, out=None):
         
diff --git a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py
index c8f5e46..0da6e50 100755
--- a/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py
+++ b/Wrappers/Python/ccpi/optimisation/functions/Norm2Sq.py
@@ -50,9 +50,11 @@ class Norm2Sq(Function):
         try:
             self.L = 2.0*self.c*(self.A.norm()**2)
         except AttributeError as ae:
-            pass
+            warnings.warn('{} could not calculate Lipschitz Constant. {}'.format(
+                self.__class__.__name__, ae))
         except NotImplementedError as noe:
-            pass
+            warnings.warn('{} could not calculate Lipschitz Constant. {}'.format(
+                self.__class__.__name__, noe))
         
     #def grad(self,x):
     #    return self.gradient(x, out=None)
diff --git a/Wrappers/Python/test/test_algorithms.py b/Wrappers/Python/test/test_algorithms.py
index f00467c..1577fa6 100755
--- a/Wrappers/Python/test/test_algorithms.py
+++ b/Wrappers/Python/test/test_algorithms.py
@@ -30,8 +30,12 @@ from ccpi.optimisation.algorithms import GradientDescent
 from ccpi.optimisation.algorithms import CGLS
 from ccpi.optimisation.algorithms import FISTA
 
+from ccpi.optimisation.algorithms import PDHG
 
-
+from ccpi.optimisation.operators import Gradient, BlockOperator, FiniteDiff
+from ccpi.optimisation.functions import MixedL21Norm, BlockFunction, L1Norm, KullbackLeibler                     
+from ccpi.framework import TestData
+import os ,sys
 
 class TestAlgorithms(unittest.TestCase):
     def setUp(self):
@@ -108,8 +112,229 @@ class TestAlgorithms(unittest.TestCase):
         alg.run(20, verbose=True)
         self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array())
                
+    def test_FISTA_Norm2Sq(self):
+        print ("Test FISTA Norm2Sq")
+        ig = ImageGeometry(127,139,149)
+        x_init = ImageData(geometry=ig)
+        b = x_init.copy()
+        # fill with random numbers
+        b.fill(numpy.random.random(x_init.shape))
+        x_init = ig.allocate(ImageGeometry.RANDOM)
+        identity = Identity(ig)
+        
+	    #### it seems FISTA does not work with Nowm2Sq
+        norm2sq = Norm2Sq(identity, b)
+        #norm2sq.L = 2 * norm2sq.c * identity.norm()**2
+        #norm2sq = FunctionOperatorComposition(L2NormSquared(b=b), identity)
+        opt = {'tol': 1e-4, 'memopt':False}
+        print ("initial objective", norm2sq(x_init))
+        alg = FISTA(x_init=x_init, f=norm2sq, g=ZeroFunction())
+        alg.max_iteration = 2
+        alg.run(20, verbose=True)
+        self.assertNumpyArrayAlmostEqual(alg.x.as_array(), b.as_array())
+    def test_FISTA_catch_Lipschitz(self):
+        print ("Test FISTA catch Lipschitz")
+        ig = ImageGeometry(127,139,149)
+        x_init = ImageData(geometry=ig)
+        b = x_init.copy()
+        # fill with random numbers
+        b.fill(numpy.random.random(x_init.shape))
+        x_init = ig.allocate(ImageGeometry.RANDOM)
+        identity = Identity(ig)
+        
+	    #### it seems FISTA does not work with Nowm2Sq
+        norm2sq = Norm2Sq(identity, b)
+        print ('Lipschitz', norm2sq.L)
+        norm2sq.L = None
+        #norm2sq.L = 2 * norm2sq.c * identity.norm()**2
+        #norm2sq = FunctionOperatorComposition(L2NormSquared(b=b), identity)
+        opt = {'tol': 1e-4, 'memopt':False}
+        print ("initial objective", norm2sq(x_init))
+        try:
+            alg = FISTA(x_init=x_init, f=norm2sq, g=ZeroFunction())
+            self.assertTrue(False)
+        except ValueError as ve:
+            print (ve)
+            self.assertTrue(True)
+    def test_PDHG_Denoising(self):
+        print ("PDHG Denoising with 3 noises")
+        # adapted from demo PDHG_TV_Color_Denoising.py in CIL-Demos repository
+        
+        loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi'))
+        data = loader.load(TestData.PEPPERS, size=(256,256))
+        ig = data.geometry
+        ag = ig
+
+        which_noise = 0
+        # Create noisy data. 
+        noises = ['gaussian', 'poisson', 's&p']
+        noise = noises[which_noise]
+        
+        def setup(data, noise):
+            if noise == 's&p':
+                n1 = TestData.random_noise(data.as_array(), mode = noise, salt_vs_pepper = 0.9, amount=0.2, seed=10)
+            elif noise == 'poisson':
+                scale = 5
+                n1 = TestData.random_noise( data.as_array()/scale, mode = noise, seed = 10)*scale
+            elif noise == 'gaussian':
+                n1 = TestData.random_noise(data.as_array(), mode = noise, seed = 10)
+            else:
+                raise ValueError('Unsupported Noise ', noise)
+            noisy_data = ig.allocate()
+            noisy_data.fill(n1)
+        
+            # Regularisation Parameter depending on the noise distribution
+            if noise == 's&p':
+                alpha = 0.8
+            elif noise == 'poisson':
+                alpha = 1
+            elif noise == 'gaussian':
+                alpha = .3
+                # fidelity
+            if noise == 's&p':
+                g = L1Norm(b=noisy_data)
+            elif noise == 'poisson':
+                g = KullbackLeibler(noisy_data)
+            elif noise == 'gaussian':
+                g = 0.5 * L2NormSquared(b=noisy_data)
+            return noisy_data, alpha, g
+
+        noisy_data, alpha, g = setup(data, noise)
+        operator = Gradient(ig, correlation=Gradient.CORRELATION_SPACE)
+
+        f1 =  alpha * MixedL21Norm()
+
+        
+                    
+        # Compute operator Norm
+        normK = operator.norm()
+
+        # Primal & dual stepsizes
+        sigma = 1
+        tau = 1/(sigma*normK**2)
+
+        # Setup and run the PDHG algorithm
+        pdhg1 = PDHG(f=f1,g=g,operator=operator, tau=tau, sigma=sigma)
+        pdhg1.max_iteration = 2000
+        pdhg1.update_objective_interval = 200
+        pdhg1.run(1000)
+
+        rmse = (pdhg1.get_output() - data).norm() / data.as_array().size
+        print ("RMSE", rmse)
+        self.assertLess(rmse, 2e-4)
+
+        which_noise = 1
+        noise = noises[which_noise]
+        noisy_data, alpha, g = setup(data, noise)
+        operator = Gradient(ig, correlation=Gradient.CORRELATION_SPACE)
+
+        f1 =  alpha * MixedL21Norm()
+
+        
+                    
+        # Compute operator Norm
+        normK = operator.norm()
+
+        # Primal & dual stepsizes
+        sigma = 1
+        tau = 1/(sigma*normK**2)
+
+        # Setup and run the PDHG algorithm
+        pdhg1 = PDHG(f=f1,g=g,operator=operator, tau=tau, sigma=sigma)
+        pdhg1.max_iteration = 2000
+        pdhg1.update_objective_interval = 200
+        pdhg1.run(1000)
+
+        rmse = (pdhg1.get_output() - data).norm() / data.as_array().size
+        print ("RMSE", rmse)
+        self.assertLess(rmse, 2e-4)
+        
+        
+        which_noise = 2
+        noise = noises[which_noise]
+        noisy_data, alpha, g = setup(data, noise)
+        operator = Gradient(ig, correlation=Gradient.CORRELATION_SPACE)
+
+        f1 =  alpha * MixedL21Norm()
+
+        
+                    
+        # Compute operator Norm
+        normK = operator.norm()
+
+        # Primal & dual stepsizes
+        sigma = 1
+        tau = 1/(sigma*normK**2)
+
+        # Setup and run the PDHG algorithm
+        pdhg1 = PDHG(f=f1,g=g,operator=operator, tau=tau, sigma=sigma)
+        pdhg1.max_iteration = 2000
+        pdhg1.update_objective_interval = 200
+        pdhg1.run(1000)
+
+        rmse = (pdhg1.get_output() - data).norm() / data.as_array().size
+        print ("RMSE", rmse)
+        self.assertLess(rmse, 2e-4)
+
+    def test_FISTA_Denoising(self):
+        print ("FISTA Denoising Poisson Noise Tikhonov")
+        # adapted from demo FISTA_Tikhonov_Poisson_Denoising.py in CIL-Demos repository
+        loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi'))
+        data = loader.load(TestData.SHAPES)
+        ig = data.geometry
+        ag = ig
+        N=300
+        # Create Noisy data with Poisson noise
+        scale = 5
+        n1 = TestData.random_noise( data.as_array()/scale, mode = 'poisson', seed = 10)*scale
+        noisy_data = ImageData(n1)
+
+        # Regularisation Parameter
+        alpha = 10
+
+        # Setup and run the FISTA algorithm
+        operator = Gradient(ig)
+        fid = KullbackLeibler(noisy_data)
+
+        def KL_Prox_PosCone(x, tau, out=None):
+                
+            if out is None: 
+                tmp = 0.5 *( (x - fid.bnoise - tau) + ( (x + fid.bnoise - tau)**2 + 4*tau*fid.b   ) .sqrt() )
+                return tmp.maximum(0)
+            else:            
+                tmp =  0.5 *( (x - fid.bnoise - tau) + 
+                            ( (x + fid.bnoise - tau)**2 + 4*tau*fid.b   ) .sqrt()
+                            )
+                x.add(fid.bnoise, out=out)
+                out -= tau
+                out *= out
+                tmp = fid.b * (4 * tau)
+                out.add(tmp, out=out)
+                out.sqrt(out=out)
+                
+                x.subtract(fid.bnoise, out=tmp)
+                tmp -= tau
+                
+                out += tmp
+                
+                out *= 0.5
+                
+                # ADD the constraint here
+                out.maximum(0, out=out)
+                
+        fid.proximal = KL_Prox_PosCone
+
+        reg = FunctionOperatorComposition(alpha * L2NormSquared(), operator)
+
+        x_init = ig.allocate()
+        fista = FISTA(x_init=x_init , f=reg, g=fid)
+        fista.max_iteration = 3000
+        fista.update_objective_interval = 500
+        fista.run(3000, verbose=True)
+        rmse = (fista.get_output() - data).norm() / data.as_array().size
+        print ("RMSE", rmse)
+        self.assertLess(rmse, 4.2e-4)
 
-    
     def assertNumpyArrayEqual(self, first, second):
         res = True
         try:
-- 
cgit v1.2.3