Skip to content
Commits on Source (9)
......@@ -44,7 +44,7 @@ EJML is in Maven central repository and can easily be added to Gradle, Maven, an
```
<groupId>org.ejml</groupId>
<artifactId>ejml-all</artifactId>
<version>0.34</version>
<version>0.37.1</version>
```
This will add the entire library. Alternatively, you can include the required modules individually:
......
plugins {
id "com.peterabeles.gversion" version "1.1"
id "com.peterabeles.gversion" version "1.3.0"
}
gversion {
......@@ -13,7 +13,7 @@ allprojects {
apply plugin: 'eclipse'
group = 'org.ejml'
version = '0.34'
version = '0.37.1'
}
subprojects {
......@@ -84,7 +84,13 @@ subprojects {
}
dependencies {
compile 'com.google.code.findbugs:jsr305:3.0.2' // @Nullable
testCompile group: 'junit', name: 'junit', version: '4.12'
['core','generator-annprocess'].each { String a->
benchmarkCompile('org.openjdk.jmh:jmh-'+a+':1.19')
}
}
jar {
......@@ -228,7 +234,8 @@ task oneJarBin(type: Jar, dependsOn: javadocProjects.collect { it + ":compileJav
}
task wrapper(type: Wrapper) {
gradleVersion = '4.3'
distributionType = Wrapper.DistributionType.BIN
gradleVersion = '4.10.2'
}
project(':main:ejml-core').compileJava.dependsOn(createVersionFile)
......
<!--
~ Copyright (c) 2009-2017, Peter Abeles. All Rights Reserved.
~
~ This file is part of Efficient Java Matrix Library (EJML).
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project name="EJML" basedir="." default="main">
<property name="src.dir" value="src"/>
<property name="experimental.src.dir" value="experimental/src"/>
<property name="test.dir" value="test"/>
<property name="build.dir" value="build"/>
<property name="classes.dir" value="${build.dir}/classes"/>
<property name="jar.dir" value="${build.dir}/jar"/>
<property name="generate.dir" value="generate/src"/>
<property name="testbuild.dir" value="build/test"/>
<property name="testclasses.dir" value="${testbuild.dir}/classes"/>
<property name="testreport.dir" value="${testbuild.dir}/report"/>
<property name="junit.dir" value="lib/"/>
<path id="project.classpath">
<!--<fileset dir="${lib.dir}" includes="**/*.jar"/>-->
</path>
<path id="test.classpath">
<path refid="project.classpath"/>
<fileset dir="${junit.dir}" includes="junit*.jar"/>
<fileset dir="${jar.dir}" includes="**/${ant.project.name}.jar"/>
</path>
<target name="clean">
<delete dir="${build.dir}"/>
<delete dir="docs/api"/>
</target>
<target name="compile">
<!-- Capture the path as a delimited property using the refid attribute -->
<!--<property name="myclasspath" refid="project.classpath"/>-->
<!-- Emit the property to the ant console -->
<!--<echo message="Classpath = ${myclasspath}"/>-->
<mkdir dir="${classes.dir}"/>
<javac srcdir="${src.dir}" destdir="${classes.dir}" includeantruntime="false">
<classpath refid="project.classpath"/>
</javac>
</target>
<target name="compile experimental">
<mkdir dir="${classes.dir}"/>
<javac srcdir="${experimental.src.dir}" destdir="${classes.dir}" includeantruntime="false">
<classpath refid="project.classpath"/>
</javac>
</target>
<target name="jar" depends="compile">
<mkdir dir="${jar.dir}"/>
<jar destfile="${jar.dir}/${ant.project.name}.jar" basedir="${classes.dir}"/>
</target>
<target name="jar experimental" depends="compile,compile experimental">
<mkdir dir="${jar.dir}"/>
<jar destfile="${jar.dir}/${ant.project.name}.jar" basedir="${classes.dir}"/>
</target>
<target name="test" depends="jar">
<mkdir dir="${testbuild.dir}"/>
<mkdir dir="${testreport.dir}"/>
<mkdir dir="${testclasses.dir}"/>
<javac destdir="${testclasses.dir}" includeantruntime="false">
<src path="${generate.dir}"/>
<src path="${test.dir}"/>
<classpath>
<path refid="test.classpath"/>
</classpath>
</javac>
<junit printsummary="yes" showoutput="yes">
<classpath>
<path refid="test.classpath"/>
<pathelement location="${testclasses.dir}"/>
</classpath>
<formatter type="xml"/>
<batchtest fork="yes" todir="${testreport.dir}">
<fileset dir="${test.dir}" includes="**/Test*.java"/>
</batchtest>
</junit>
</target>
<target name="testreport">
<junitreport todir="${testreport.dir}">
<fileset dir="${testreport.dir}" includes="TEST-*.xml"/>
<report todir="${testreport.dir}"/>
</junitreport>
</target>
<!-- Creates a jar file with the project's source code -->
<target name="srcjar">
<mkdir dir="${jar.dir}"/>
<jar destfile="${jar.dir}/${ant.project.name}-src.jar">
<fileset dir="${src.dir}" includes="**/*.java"/>
</jar>
</target>
<target name="javadoc">
<javadoc
destdir="docs/api"
author="true"
version="true"
use="true"
windowtitle="Efficient Java Matrix Library"
overview="src/overview.html">
<link offline="false" href="http://docs.oracle.com/javase/7/docs/api/" packagelistloc="package-list" />
<packageset dir="main/core/src" defaultexcludes="yes">
<include name="org/ejml/**"/>
</packageset>
<packageset dir="main/dense64/src" defaultexcludes="yes">
<include name="org/ejml/**"/>
</packageset>
<packageset dir="main/denseC64/src" defaultexcludes="yes">
<include name="org/ejml/**"/>
</packageset>
<packageset dir="main/equation/src" defaultexcludes="yes">
<include name="org/ejml/**"/>
</packageset>
<packageset dir="main/simple/src" defaultexcludes="yes">
<include name="org/ejml/**"/>
</packageset>
<doctitle><![CDATA[<h1>Efficient Java Matrix Library API Specification</h1>]]></doctitle>
<bottom><![CDATA[<i>Copyright &#169; 2009-2017 Peter Abeles All Rights Reserved.</i>]]></bottom>
<!--<group title="Group 1 Packages" packages="com.dummy.test.a*"/>-->
<!--<group title="Group 2 Packages" packages="com.dummy.test.b*:com.dummy.test.c*"/>-->
<!--<link offline="true" href="http://java.sun.com/j2se/1.5.0/docs/api/" packagelistLoc="C:\tmp"/>-->
<!--<link href="http://developer.java.sun.com/developer/products/xml/docs/api/"/>-->
</javadoc>
</target>
<!-- Generates JavaDOC but with tracking information for google analytics -->
<target name="javadocWeb">
<javadoc
destdir="docs/api"
author="true"
version="true"
use="true"
windowtitle="Efficient Java Matrix Library">
<link offline="false" href="http://docs.oracle.com/javase/7/docs/api/" packagelistloc="package-list" />
<packageset dir="main/ejml-cdense/src" defaultexcludes="yes">
<include name="org/ejml/**"/>
</packageset>
<packageset dir="main/ejml-core/src" defaultexcludes="yes">
<include name="org/ejml/**"/>
</packageset>
<packageset dir="main/ejml-ddense/src" defaultexcludes="yes">
<include name="org/ejml/**"/>
</packageset>
<packageset dir="main/ejml-dsparse/src" defaultexcludes="yes">
<include name="org/ejml/**"/>
</packageset>
<packageset dir="main/ejml-fdense/src" defaultexcludes="yes">
<include name="org/ejml/**"/>
</packageset>
<packageset dir="main/ejml-simple/src" defaultexcludes="yes">
<include name="org/ejml/**"/>
</packageset>
<packageset dir="main/ejml-zdense/src" defaultexcludes="yes">
<include name="org/ejml/**"/>
</packageset>
<doctitle><![CDATA[<h1>Efficient Java Matrix Library API Specification:</h1>]]></doctitle>
<bottom><![CDATA[<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-10413214-7', 'ejml.org');
ga('send', 'pageview');
</script>
<br>
<b>Copyright &copy; 2009-2017 Peter Abeles</b>
]]>
</bottom>
</javadoc>
</target>
<target name="clean-build" depends="clean,jar"/>
<target name="main" depends="clean,jar"/>
</project>
......@@ -2,6 +2,101 @@ Change log for EJML
Date format: year/month/day
----- Version 0.37.1
2018/12/25
- Equations
* Fixed bug with "a = b' - c"
- CommonOps_DDRM
* scaleRow() and scaleCol()
- Added FancyPrint class
----- Version 0.37
2018/11/11
- CommonOps_DSCC
* elementMult() resizes internal data. In a test it went from 5600ms to 8ms to compute
Thanks Gabor Szarnyas for pointing out this issue
* Added fully sparse solve function
* fill(), sumCol(), minCol(), maxCol(), sumRow(), minRow(), maxRow()
- CovarianceOps_DDRM
* Bug fix with 1x1 matrices. Thanks Ciamac
- SimpleMatrix
* Added more support for sparse and sparse-dense functions
- CommonOps_DDRM
* invertSPD(), solveSPD()
* removeColumns()
- UnrolledCholesky
* Fixed sized matrices
* Dense row major
----- Version 0.36
2018/09/29
- Fixed Size Matrix
* Vector.set() for entire vector
* Outer vector product
* Assign a matrix and vector from an array
- SingularOps_DDRM
* Simpler methods and no need to create a decomposition for basic operations
- Equations
* You can now chain some operations
- CommonOps_DDRM
* New extract() where upper extents are implied
----- Version 0.35
2018/08/24
- UtilEjml
* solver = safe(solver) ensures solvers of any type don't modify their inputs
* printFancy() best formatter of floating point numbers to string
- Matrix.print()
* New default format and added the ability change the format
* Printing using printFancy()
* If format is specified as "matlab" it will print out in a format which can be pasted into Matlab
* Fixed formatting of complex matrices
- Reshape output instead of exception
* Changed dense matrix multiply to have this behavior
- ConvertDArrays
* Methods for converting 2D arrays to and from EJML data types
- MatrixVectorMult_DSCC
* Added mult(Matrix,Vector,Vector)
* Added multAdd(Matrix,Vector,Vector)
* Added mult(Vector,Matrix,Vector)
* Added innerProduct(Vector,Matrix,Vector)
- CommonOps_DSCC
* Added multAdd(sparse,dense,dense)
* Added multTransA(sparse,dense,dense) multAddTransA(sparse,dense,dense)
* Added multTransB(sparse,dense,dense) multAddTransB(sparse,dense,dense)
* Added multTransAB(sparse,dense,dense) multAddTransAB(sparse,dense,dense)
* Added extractDiag(sparse,dense)
* Added innerProductLower(sparse,sparse) B = A'*A. Only lower triangle is saved in B
* Added symmLowerToFull(sparse,sparse) for converting a lower triangular matrix into a symmetric matrix
* Added column operations
* Added multRows() divideRows() multCols() divideCols() multRowsCols() divideRowsCols()
- Added LinearSolverSparseSafe
- LinearSolverSparse
* Changed lockStructure() to setStructureLocked( boolean )
- MatrixSparseTriplet
* No longer uses individual classes for each element and instead uses an array
* Java bloats classes and this should result in a large reduction in memory usage
- Sparse Solver
* Added functions for solving sparse matrices
* Improved efficiency of Triangle solve
* Triangle sparse solve supports lower and upper tall matrices. Plus the transpose.
- SparseMatrix
* added reshape(rows,cols) so that you don't need to specify nz_length when it doesn't matter.
- Improved exception messages for a few functions
- Changing it so that the output of various functions is reshaped so that it fits the input to make
development less tedious
- SimpleMatrix
* Fixed serialization
- Thanks fa-dev1 for reporting the issue
* Added elementMinAbs() because elementMaxAbs() was already included
- Examples
* Improved Levenberg-Marquardt
- Made it more like a "real" implementation and cleaned up the code
----- Version 0.34
2018/04/13
......
libejml-java (0.34-1) UNRELEASED; urgency=medium
libejml-java (0.37.1-1) UNRELEASED; urgency=medium
[ Andreas Tille ]
* New upstream version
* Standards-Version: 4.1.4
* debhelper 12
* Standards-Version: 4.3.0
* Point Vcs-fields to Salsa
* debhelper 11
* Secure URI in copyright format
* Respect DEB_BUILD_OPTIONS in override_dh_auto_test target
* Remove trailing whitespace in debian/copyright
[ Vincent Privat ]
* d/rules: s+gradle+./gradlew+
-- Andreas Tille <tille@debian.org> Wed, 02 May 2018 07:52:24 +0200
-- Andreas Tille <tille@debian.org> Sat, 12 Jan 2019 08:51:56 +0100
libejml-java (0.28-1) unstable; urgency=low
......
......@@ -3,12 +3,12 @@ Maintainer: Debian Java Maintainers <pkg-java-maintainers@lists.alioth.debian.or
Uploaders: Andreas Tille <tille@debian.org>
Section: java
Priority: optional
Build-Depends: debhelper (>= 11~),
Build-Depends: debhelper (>= 12~),
javahelper,
default-jdk,
ant,
gradle
Standards-Version: 4.1.4
Standards-Version: 4.3.0
Vcs-Browser: https://salsa.debian.org/java-team/libejml-java
Vcs-Git: https://salsa.debian.org/java-team/libejml-java.git
Homepage: http://ejml.org/wiki
......
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Contact: Peter Abeles <peter.abeles@gmail.com>
Source: https://github.com/lessthanoptimal/ejml
......
......@@ -18,9 +18,11 @@ override_dh_auto_build:
# javadoc
override_dh_auto_test:
ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS)))
./gradlew --offline --no-daemon --stacktrace \
--gradle-user-home debian/gradle-user-home \
check -x test
endif
override_dh_clean:
./gradlew --offline --no-daemon --stacktrace \
......
......@@ -6,7 +6,6 @@
-------------------
- Add to CommonOps invertSPD()
- Unroll symmetric inverse by minor
- Unroll multiplication for square matrices
- Make QrUpdate more friendly and accessible
......@@ -25,6 +24,9 @@ For some future Release
----------------------------------------------------------
- Sparse
- Look into supernodal methods. Apparently they are faster since they expoit dense matrix kernels
- LU
- block
......
TODO Sparse LDL
TODO constructor size check
TODO New implementation of symmetric eigen
Sparse Matrix
- TODO Add fill reduce algorithms
* Update CommonOps.solve() to specify a reasonable general purpose algorithm
......
- Update change.txt
- Update version in build.gradle
- Update version in readme.md (maven central)
- Update example code on website
git diff --name-status TAG_NAME examples
- clean and generate code:
git clean -fd main/ejml-core/src main/ejml-cdense/src main/ejml-fdense/src
./gradlew autogenerate
- Rebuild and run unit tests
- Commit and tag release
- Create zip of source and library directory
* From a fresh checkout
cd ejml;git checkout SNAPSHOT;rm -rf .git;./gradlew autogenerate;cd ..;zip -r ejml.zip ejml
cd ejml;./gradlew createLibraryDirectory;mv libraries ..;cd ..;mv libraries ejml-v0.XX-libs;zip -r ejml-v0.XX-libs.zip ejml-v0.XX-libs
- Create zip of source code
- Create jar file "./gradlew createLibraryDirectory"
- Update documentation on website
VERSION=v0.37
cd ejml;git checkout SNAPSHOT;./gradlew createVersionFile;./gradlew autogenerate;rm -rf .git;cd ..;zip -r ejml-$VERSION-src.zip ejml
cd ejml;./gradlew createLibraryDirectory;mv libraries ..;cd ..;mv libraries ejml-$VERSION-libs;zip -r ejml-$VERSION-libs.zip ejml-$VERSION-libs
- Update javadoc on website
rm -rf build/docs/javadoc;./gradlew alljavadoc;cd build/docs/;zip -r javadoc.zip javadoc
cd ejml;rm -rf build/docs/javadoc;./gradlew alljavadoc;cd build/docs/;zip -r javadoc.zip javadoc
-----
Push to central:
......@@ -23,7 +30,7 @@ Push to central:
* double check the files
* click release button
Not working? Is ~/.gradle/gradle.properties been setup?
Not working? Has ~/.gradle/gradle.properties been setup?
---------
Once a year:
......
dependencies {
compile project(':main:ejml-all')
['core','generator-annprocess'].each { String a->
compile('org.openjdk.jmh:jmh-'+a+':1.19')
}
}
idea {
......
/*
* Copyright (c) 2009-2017, Peter Abeles. All Rights Reserved.
* Copyright (c) 2009-2018, Peter Abeles. All Rights Reserved.
*
* This file is part of Efficient Java Matrix Library (EJML).
*
......@@ -20,15 +20,30 @@ package org.ejml.example;
import org.ejml.data.DMatrixRMaj;
import org.ejml.dense.row.CommonOps_DDRM;
import org.openjdk.jmh.annotations.*;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
/**
* Compares how fast the filters all run relative to each other.
* Uses JMH to compare the speed of different Kalman filter implementations.
*
* <pre>
* Benchmark Mode Cnt Score Error Units
* BenchmarkKalmanPerformance.equations avgt 5 1282.423 ± 30.398 ms/op
* BenchmarkKalmanPerformance.operations avgt 5 1022.874 ± 29.654 ms/op
* BenchmarkKalmanPerformance.simple_matrix avgt 5 1391.853 ± 23.626 ms/op
* </pre>
*
* @author Peter Abeles
*/
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Warmup(iterations = 5)
@Measurement(iterations = 5)
@State(Scope.Benchmark)
@Fork(value=1)
public class BenchmarkKalmanPerformance {
private static final int NUM_TRIALS = 200;
......@@ -37,9 +52,6 @@ public class BenchmarkKalmanPerformance {
private static int measDOF = 8;
List<KalmanFilter> filters = new ArrayList<KalmanFilter>();
public void run() {
DMatrixRMaj priorX = new DMatrixRMaj(9,1, true, 0.5, -0.2, 0, 0, 0.2, -0.9, 0, 0.2, -0.5);
DMatrixRMaj priorP = CommonOps_DDRM.identity(9);
......@@ -51,24 +63,28 @@ public class BenchmarkKalmanPerformance {
DMatrixRMaj Q = createQ(T,0.1);
DMatrixRMaj H = createH();
for(KalmanFilter f : filters ) {
@Benchmark
public void operations() {
evaluate(new KalmanFilterOperations());
}
@Benchmark
public void equations() {
evaluate(new KalmanFilterEquation());
}
long timeBefore = System.currentTimeMillis();
@Benchmark
public void simple_matrix() {
evaluate(new KalmanFilterSimple());
}
private void evaluate(KalmanFilter f) {
f.configure(F,Q,H);
for( int trial = 0; trial < NUM_TRIALS; trial++ ) {
f.setState(priorX,priorP);
processMeas(f,meas);
}
long timeAfter = System.currentTimeMillis();
System.out.println("Filter = "+f.getClass().getSimpleName());
System.out.println("Elapsed time: "+(timeAfter-timeBefore));
System.gc();
}
}
private List<DMatrixRMaj> createSimulatedMeas(DMatrixRMaj x ) {
......@@ -78,9 +94,6 @@ public class BenchmarkKalmanPerformance {
DMatrixRMaj F = createF(T);
DMatrixRMaj H = createH();
// UtilEjml.print(F);
// UtilEjml.print(H);
DMatrixRMaj x_next = new DMatrixRMaj(x);
DMatrixRMaj z = new DMatrixRMaj(H.numRows,1);
......@@ -157,15 +170,4 @@ public class BenchmarkKalmanPerformance {
return H;
}
public static void main( String args[] ) {
BenchmarkKalmanPerformance benchmark = new BenchmarkKalmanPerformance();
benchmark.filters.add( new KalmanFilterOperations());
benchmark.filters.add( new KalmanFilterSimple());
benchmark.filters.add( new KalmanFilterEquation());
benchmark.run();
}
}
......@@ -77,7 +77,6 @@ public class EquationCustomFunction {
public void process() {
DMatrixRMaj mA = ((VariableMatrix)varA).matrix;
DMatrixRMaj mB = ((VariableMatrix)varB).matrix;
output.matrix.reshape(mA.numCols,mB.numCols);
CommonOps_DDRM.multTransA(mA,mB,output.matrix);
}
......
/*
* Copyright (c) 2009-2017, Peter Abeles. All Rights Reserved.
* Copyright (c) 2009-2018, Peter Abeles. All Rights Reserved.
*
* This file is part of Efficient Java Matrix Library (EJML).
*
......
/*
* Copyright (c) 2009-2017, Peter Abeles. All Rights Reserved.
* Copyright (c) 2009-2018, Peter Abeles. All Rights Reserved.
*
* This file is part of Efficient Java Matrix Library (EJML).
*
......@@ -69,8 +69,8 @@ public class ExampleDecompositionSolve {
DMatrixSparseCSC Ainv = A.createLike();
// Solve for the inverse: P*I = L*U*inv(A)
TriangularSolver_DSCC.solve(L,true,P,tmp,null,null,null);
TriangularSolver_DSCC.solve(U,false,tmp,Ainv,null,null,null);
TriangularSolver_DSCC.solve(L,true,P,tmp,null,null,null,null);
TriangularSolver_DSCC.solve(U,false,tmp,Ainv,null,null,null,null);
// Make sure the inverse has been found. A*inv(A) = identity should be an identity matrix
DMatrixSparseCSC found = A.createLike();
......
/*
* Copyright (c) 2009-2017, Peter Abeles. All Rights Reserved.
* Copyright (c) 2009-2018, Peter Abeles. All Rights Reserved.
*
* This file is part of Efficient Java Matrix Library (EJML).
*
......@@ -19,9 +19,8 @@
package org.ejml.example;
import org.ejml.data.DMatrixRMaj;
import static org.ejml.dense.row.CommonOps_DDRM.*;
import static org.ejml.dense.row.SpecializedOps_DDRM.diffNormF;
import org.ejml.dense.row.CommonOps_DDRM;
import org.ejml.dense.row.NormOps_DDRM;
/**
* <p>
......@@ -47,64 +46,40 @@ import static org.ejml.dense.row.SpecializedOps_DDRM.diffNormF;
* @author Peter Abeles
*/
public class LevenbergMarquardt {
// Convergence criteria
private int maxIterations = 100;
private double ftol = 1e-12;
private double gtol = 1e-12;
// how much the numerical jacobian calculation perturbs the parameters by.
// In better implementation there are better ways to compute this delta. See Numerical Recipes.
private final static double DELTA = 1e-8;
// Dampening. Larger values means it's more like gradient descent
private double initialLambda;
// the function that is optimized
private Function func;
private ResidualFunction function;
// the optimized parameters and associated costs
private DMatrixRMaj param;
private DMatrixRMaj candidateParameters = new DMatrixRMaj(1,1);
private double initialCost;
private double finalCost;
// used by matrix operations
private DMatrixRMaj d;
private DMatrixRMaj H;
private DMatrixRMaj negDelta;
private DMatrixRMaj tempParam;
private DMatrixRMaj A;
private DMatrixRMaj g = new DMatrixRMaj(1,1); // gradient
private DMatrixRMaj H = new DMatrixRMaj(1,1); // Hessian approximation
private DMatrixRMaj Hdiag = new DMatrixRMaj(1,1);
private DMatrixRMaj negativeStep = new DMatrixRMaj(1,1);
// variables used by the numerical jacobian algorithm
private DMatrixRMaj temp0;
private DMatrixRMaj temp1;
private DMatrixRMaj temp0 = new DMatrixRMaj(1,1);
private DMatrixRMaj temp1 = new DMatrixRMaj(1,1);
// used when computing d and H variables
private DMatrixRMaj tempDH;
private DMatrixRMaj residuals = new DMatrixRMaj(1,1);
// Where the numerical Jacobian is stored.
private DMatrixRMaj jacobian;
/**
* Creates a new instance that uses the provided cost function.
*
* @param funcCost Cost function that is being optimized.
*/
public LevenbergMarquardt( Function funcCost )
{
this.initialLambda = 1;
// declare data to some initial small size. It will grow later on as needed.
int maxElements = 1;
int numParam = 1;
this.temp0 = new DMatrixRMaj(maxElements,1);
this.temp1 = new DMatrixRMaj(maxElements,1);
this.tempDH = new DMatrixRMaj(maxElements,1);
this.jacobian = new DMatrixRMaj(numParam,maxElements);
this.func = funcCost;
this.param = new DMatrixRMaj(numParam,1);
this.d = new DMatrixRMaj(numParam,1);
this.H = new DMatrixRMaj(numParam,numParam);
this.negDelta = new DMatrixRMaj(numParam,1);
this.tempParam = new DMatrixRMaj(numParam,1);
this.A = new DMatrixRMaj(numParam,numParam);
}
private DMatrixRMaj jacobian = new DMatrixRMaj(1,1);
public double getInitialCost() {
return initialCost;
......@@ -114,82 +89,100 @@ public class LevenbergMarquardt {
return finalCost;
}
public DMatrixRMaj getParameters() {
return param;
/**
*
* @param initialLambda Initial value of dampening parameter. Try 1 to start
*/
public LevenbergMarquardt(double initialLambda) {
this.initialLambda = initialLambda;
}
/**
* Specifies convergence criteria
*
* @param maxIterations Maximum number of iterations
* @param ftol convergence based on change in function value. try 1e-12
* @param gtol convergence based on residual magnitude. Try 1e-12
*/
public void setConvergence( int maxIterations , double ftol , double gtol ) {
this.maxIterations = maxIterations;
this.ftol = ftol;
this.gtol = gtol;
}
/**
* Finds the best fit parameters.
*
* @param initParam The initial set of parameters for the function.
* @param X The inputs to the function.
* @param Y The "observed" output of the function
* @param function The function being optimized
* @param parameters (Input/Output) initial parameter estimate and storage for optimized parameters
* @return true if it succeeded and false if it did not.
*/
public boolean optimize( DMatrixRMaj initParam ,
DMatrixRMaj X ,
DMatrixRMaj Y )
public boolean optimize(ResidualFunction function, DMatrixRMaj parameters )
{
configure(initParam,X,Y);
configure(function,parameters.getNumElements());
// save the cost of the initial parameters so that it knows if it improves or not
initialCost = cost(param,X,Y);
double previousCost = initialCost = cost(parameters);
// iterate until the difference between the costs is insignificant
// or it iterates too many times
if( !adjustParam(X, Y, initialCost) ) {
finalCost = Double.NaN;
return false;
}
return true;
}
/**
* Iterate until the difference between the costs is insignificant
* or it iterates too many times
*/
private boolean adjustParam(DMatrixRMaj X, DMatrixRMaj Y,
double prevCost) {
// lambda adjusts how big of a step it takes
double lambda = initialLambda;
// the difference between the current and previous cost
double difference = 1000;
for( int iter = 0; iter < 20 || difference < 1e-6 ; iter++ ) {
// if it should recompute the Jacobian in this iteration or not
boolean computeHessian = true;
for( int iter = 0; iter < maxIterations; iter++ ) {
if( computeHessian ) {
// compute some variables based on the gradient
computeDandH(param,X,Y);
computeGradientAndHessian(parameters);
computeHessian = false;
// check for convergence using gradient test
boolean converged = true;
for (int i = 0; i < g.getNumElements(); i++) {
if( Math.abs(g.data[i]) > gtol ) {
converged = false;
break;
}
}
if( converged )
return true;
}
// try various step sizes and see if any of them improve the
// results over what has already been done
boolean foundBetter = false;
for( int i = 0; i < 5; i++ ) {
computeA(A,H,lambda);
// H = H + lambda*I
for (int i = 0; i < H.numRows; i++) {
H.set(i,i, Hdiag.get(i) + lambda);
}
if( !solve(A,d,negDelta) ) {
// In robust implementations failure to solve is handled much better
if( !CommonOps_DDRM.solve(H, g, negativeStep) ) {
return false;
}
// compute the candidate parameters
subtract(param, negDelta, tempParam);
CommonOps_DDRM.subtract(parameters, negativeStep, candidateParameters);
double cost = cost(tempParam,X,Y);
if( cost < prevCost ) {
double cost = cost(candidateParameters);
if( cost <= previousCost ) {
// the candidate parameters produced better results so use it
foundBetter = true;
param.set(tempParam);
difference = prevCost - cost;
prevCost = cost;
computeHessian = true;
parameters.set(candidateParameters);
// check for convergence
// ftol <= (cost(k) - cost(k+1))/cost(k)
boolean converged = ftol*previousCost >= previousCost-cost;
previousCost = cost;
lambda /= 10.0;
if( converged ) {
return true;
}
} else {
lambda *= 10.0;
}
}
// it reached a point where it can't improve so exit
if( !foundBetter )
break;
}
finalCost = prevCost;
finalCost = previousCost;
return true;
}
......@@ -197,135 +190,105 @@ public class LevenbergMarquardt {
* Performs sanity checks on the input data and reshapes internal matrices. By reshaping
* a matrix it will only declare new memory when needed.
*/
protected void configure(DMatrixRMaj initParam , DMatrixRMaj X , DMatrixRMaj Y )
protected void configure(ResidualFunction function , int numParam )
{
if( Y.getNumRows() != X.getNumRows() ) {
throw new IllegalArgumentException("Different vector lengths");
} else if( Y.getNumCols() != 1 || X.getNumCols() != 1 ) {
throw new IllegalArgumentException("Inputs must be a column vector");
}
this.function = function;
int numFunctions = function.numFunctions();
int numParam = initParam.getNumElements();
int numPoints = Y.getNumRows();
if( param.getNumElements() != initParam.getNumElements() ) {
// reshaping a matrix means that new memory is only declared when needed
this.param.reshape(numParam,1, false);
this.d.reshape(numParam,1, false);
this.H.reshape(numParam,numParam, false);
this.negDelta.reshape(numParam,1, false);
this.tempParam.reshape(numParam,1, false);
this.A.reshape(numParam,numParam, false);
}
param.set(initParam);
// reshaping a matrix means that new memory is only declared when needed
temp0.reshape(numPoints,1, false);
temp1.reshape(numPoints,1, false);
tempDH.reshape(numPoints,1, false);
jacobian.reshape(numParam,numPoints, false);
candidateParameters.reshape(numParam,1);
g.reshape(numParam,1);
H.reshape(numParam,numParam);
negativeStep.reshape(numParam,1);
// Normally these variables are thought of as row vectors, but it works out easier if they are column
temp0.reshape(numFunctions,1);
temp1.reshape(numFunctions,1);
residuals.reshape(numFunctions,1);
jacobian.reshape(numFunctions,numParam);
}
/**
* Computes the d and H parameters. Where d is the average error gradient and
* H is an approximation of the hessian.
* Computes the d and H parameters.
*
* d = J'*(f(x)-y) <--- that's also the gradient
* H = J'*J
*/
private void computeDandH(DMatrixRMaj param , DMatrixRMaj x , DMatrixRMaj y )
private void computeGradientAndHessian(DMatrixRMaj param )
{
func.compute(param,x, tempDH);
subtractEquals(tempDH, y);
// residuals = f(x) - y
function.compute(param, residuals);
computeNumericalJacobian(param,x,jacobian);
computeNumericalJacobian(param,jacobian);
int numParam = param.getNumElements();
int length = x.getNumElements();
// d = average{ (f(x_i;p) - y_i) * jacobian(:,i) }
for( int i = 0; i < numParam; i++ ) {
double total = 0;
for( int j = 0; j < length; j++ ) {
total += tempDH.get(j,0)*jacobian.get(i,j);
}
d.set(i,0,total/length);
}
CommonOps_DDRM.multTransA(jacobian, residuals, g);
CommonOps_DDRM.multTransA(jacobian, jacobian, H);
// compute the approximation of the hessian
multTransB(jacobian,jacobian,H);
scale(1.0/length,H);
CommonOps_DDRM.extractDiag(H,Hdiag);
}
/**
* A = H + lambda*I <br>
* <br>
* where I is an identity matrix.
*/
private void computeA(DMatrixRMaj A , DMatrixRMaj H , double lambda )
{
final int numParam = param.getNumElements();
A.set(H);
for( int i = 0; i < numParam; i++ ) {
A.set(i,i, A.get(i,i) + lambda);
}
}
/**
* Computes the "cost" for the parameters given.
*
* cost = (1/N) Sum (f(x;p) - y)^2
* cost = (1/N) Sum (f(x) - y)^2
*/
private double cost(DMatrixRMaj param , DMatrixRMaj X , DMatrixRMaj Y)
private double cost(DMatrixRMaj param )
{
func.compute(param,X, temp0);
function.compute(param, residuals);
double error = diffNormF(temp0,Y);
double error = NormOps_DDRM.normF(residuals);
return error*error / (double)X.numRows;
return error*error / (double)residuals.numRows;
}
/**
* Computes a simple numerical Jacobian.
*
* @param param The set of parameters that the Jacobian is to be computed at.
* @param pt The point around which the Jacobian is to be computed.
* @param deriv Where the jacobian will be stored
* @param param (input) The set of parameters that the Jacobian is to be computed at.
* @param jacobian (output) Where the jacobian will be stored
*/
protected void computeNumericalJacobian( DMatrixRMaj param ,
DMatrixRMaj pt ,
DMatrixRMaj deriv )
DMatrixRMaj jacobian )
{
double invDelta = 1.0/DELTA;
func.compute(param,pt, temp0);
function.compute(param, temp0);
// compute the jacobian by perturbing the parameters slightly
// then seeing how it effects the results.
for( int i = 0; i < param.numRows; i++ ) {
for( int i = 0; i < param.getNumElements(); i++ ) {
param.data[i] += DELTA;
func.compute(param,pt, temp1);
function.compute(param, temp1);
// compute the difference between the two parameters and divide by the delta
add(invDelta,temp1,-invDelta,temp0,temp1);
// temp1 = (temp1 - temp0)/delta
CommonOps_DDRM.add(invDelta,temp1,-invDelta,temp0,temp1);
// copy the results into the jacobian matrix
System.arraycopy(temp1.data,0,deriv.data,i*pt.numRows,pt.numRows);
// J(i,:) = temp1
CommonOps_DDRM.insert(temp1,jacobian,0,i);
param.data[i] -= DELTA;
}
}
/**
* The function that is being optimized.
* The function that is being optimized. Returns the residual. f(x) - y
*/
public interface Function {
public interface ResidualFunction {
/**
* Computes the output for each value in matrix x given the set of parameters.
* Computes the residual vector given the set of input parameters
* Function which goes from N input to M outputs
*
* @param param The parameter for the function.
* @param x the input points.
* @param y the resulting output.
* @param param (Input) N by 1 parameter vector
* @param residual (Output) M by 1 output vector to store the residual = f(x)-y
*/
void compute(DMatrixRMaj param , DMatrixRMaj residual );
/**
* Number of functions in output
* @return function count
*/
public void compute(DMatrixRMaj param , DMatrixRMaj x , DMatrixRMaj y );
int numFunctions();
}
}
/*
* Copyright (c) 2009-2017, Peter Abeles. All Rights Reserved.
* Copyright (c) 2009-2018, Peter Abeles. All Rights Reserved.
*
* This file is part of Efficient Java Matrix Library (EJML).
*
......@@ -60,10 +60,9 @@ public class QRExampleOperations {
for( int i = 0; i < N; i++ ) {
// reshape temporary variables
A_small.reshape(QR.numRows-i,QR.numCols-i,false);
A_mod.reshape(A_small.numRows,A_small.numCols,false);
v.reshape(A_small.numRows,1,false);
Q_k.reshape(v.getNumElements(),v.getNumElements(),false);
A_small.reshape(QR.numRows-i,QR.numCols-i);
v.reshape(A_small.numRows,1);
Q_k.reshape(v.getNumElements(),v.getNumElements());
// use extract matrix to get the column that is to be zeroed
CommonOps_DDRM.extract(QR,i,QR.numRows,i,i+1,v,0,0);
......@@ -113,7 +112,7 @@ public class QRExampleOperations {
DMatrixRMaj Q_k = new DMatrixRMaj(QR.numRows,QR.numRows);
DMatrixRMaj u = new DMatrixRMaj(QR.numRows,1);
DMatrixRMaj temp = new DMatrixRMaj(QR.numRows,QR.numRows);
DMatrixRMaj temp = new DMatrixRMaj(1,1);
int N = Math.min(QR.numCols,QR.numRows);
......
/*
* Copyright (c) 2009-2017, Peter Abeles. All Rights Reserved.
* Copyright (c) 2009-2018, Peter Abeles. All Rights Reserved.
*
* This file is part of Efficient Java Matrix Library (EJML).
*
......@@ -41,22 +41,22 @@ public class TestLevenbergMarquardt {
*/
@Test
public void testNumericalJacobian() {
JacobianTestFunction func = new JacobianTestFunction();
DMatrixRMaj param = new DMatrixRMaj(3,1, true, 2, -1, 4);
LevenbergMarquardt alg = new LevenbergMarquardt(func);
LevenbergMarquardt alg = new LevenbergMarquardt(1);
DMatrixRMaj X = RandomMatrices_DDRM.rectangle(NUM_PTS,1,rand);
DMatrixRMaj numJacobian = new DMatrixRMaj(3,NUM_PTS);
DMatrixRMaj analyticalJacobian = new DMatrixRMaj(3,NUM_PTS);
JacobianTestFunction func = new JacobianTestFunction(X,new DMatrixRMaj(NUM_PTS,1));
DMatrixRMaj numericalJacobian = new DMatrixRMaj(NUM_PTS,3);
DMatrixRMaj analyticalJacobian = new DMatrixRMaj(NUM_PTS,3);
alg.configure(param,X,new DMatrixRMaj(NUM_PTS,1));
alg.computeNumericalJacobian(param,X,numJacobian);
alg.configure(func,param.getNumElements());
alg.computeNumericalJacobian(param,numericalJacobian);
func.deriv(X,analyticalJacobian);
EjmlUnitTests.assertEquals(analyticalJacobian,numJacobian,1e-6);
EjmlUnitTests.assertEquals(analyticalJacobian,numericalJacobian,1e-6);
}
/**
......@@ -76,31 +76,39 @@ public class TestLevenbergMarquardt {
* @param numPoints How many sample points there are.
*/
public void runTrivial( int numPoints ) {
JacobianTestFunction func = new JacobianTestFunction();
DMatrixRMaj paramInit = new DMatrixRMaj(3,1);
DMatrixRMaj param = new DMatrixRMaj(3,1, true, 2, -1, 4);
DMatrixRMaj found = new DMatrixRMaj(3,1);
DMatrixRMaj expected = new DMatrixRMaj(3,1, true, 10, -4, 105.2);
LevenbergMarquardt alg = new LevenbergMarquardt(func);
LevenbergMarquardt alg = new LevenbergMarquardt(1e-4);
DMatrixRMaj X = RandomMatrices_DDRM.rectangle(numPoints,1,rand);
DMatrixRMaj Y = new DMatrixRMaj(numPoints,1);
func.compute(param,X,Y);
// compute the observed output given the true praameters
new JacobianTestFunction(X,Y).function(expected,Y);
JacobianTestFunction func = new JacobianTestFunction(X,Y);
alg.optimize(paramInit,X,Y);
DMatrixRMaj foundParam = alg.getParameters();
alg.optimize(func,found);
assertEquals(0,alg.getFinalCost(), UtilEjml.TEST_F64);
EjmlUnitTests.assertEquals(param,foundParam,1e-6);
for (int i = 0; i < expected.getNumElements(); i++) {
assertEquals(expected.get(i),found.get(i), Math.abs(expected.get(i))*1e-4);
}
}
/**
* A very simple function to test how well the numerical jacobian is computed.
*/
private static class JacobianTestFunction implements LevenbergMarquardt.Function
private static class JacobianTestFunction implements LevenbergMarquardt.ResidualFunction
{
DMatrixRMaj x;
DMatrixRMaj y;
public JacobianTestFunction(DMatrixRMaj x, DMatrixRMaj y) {
this.x = x;
this.y = y;
}
public void deriv(DMatrixRMaj x, DMatrixRMaj deriv) {
double dataX[] = x.data;
......@@ -113,29 +121,45 @@ public class TestLevenbergMarquardt {
double dB = v;
double dC = v*v;
deriv.set(0,j,dA);
deriv.set(1,j,dB);
deriv.set(2,j,dC);
deriv.set(j,0,dA);
deriv.set(j,1,dB);
deriv.set(j,2,dC);
}
}
@Override
public void compute(DMatrixRMaj param, DMatrixRMaj x, DMatrixRMaj y) {
public void function( DMatrixRMaj param , DMatrixRMaj y ) {
double a = param.data[0];
double b = param.data[1];
double c = param.data[2];
double dataX[] = x.data;
double dataY[] = y.data;
int length = x.numRows;
for( int i = 0; i < length; i++ ) {
double v = x.data[i];
y.data[i] = a + b*v + c*v*v;
}
}
@Override
public void compute(DMatrixRMaj param , DMatrixRMaj residual ) {
double a = param.data[0];
double b = param.data[1];
double c = param.data[2];
int length = x.numRows;
for( int i = 0; i < length; i++ ) {
double v = dataX[i];
double v = x.data[i];
dataY[i] = a + b*v + c*v*v;
residual.data[i] = a + b*v + c*v*v - y.data[i];
}
}
@Override
public int numFunctions() {
return y.getNumElements();
}
}
}