### 39.14 Examples

The following example program fits a weighted exponential model with background to experimental data, Y = A \exp(-\lambda t) + b. The first part of the program sets up the functions expb_f and expb_df to calculate the model and its Jacobian. The appropriate fitting function is given by,

f_i = (A \exp(-\lambda t_i) + b) - y_i


where we have chosen t_i = i. The Jacobian matrix J is the derivative of these functions with respect to the three parameters (A, \lambda, b). It is given by,

J_{ij} = d f_i / d x_j


where x_0 = A, x_1 = \lambda and x_2 = b. The weights are given by w_i = 1/\sigma_i^2.

/* expfit.c -- model functions for exponential + background */

struct data {
size_t n;
double * y;
};

int
expb_f (const gsl_vector * x, void *data,
gsl_vector * f)
{
size_t n = ((struct data *)data)->n;
double *y = ((struct data *)data)->y;

double A = gsl_vector_get (x, 0);
double lambda = gsl_vector_get (x, 1);
double b = gsl_vector_get (x, 2);

size_t i;

for (i = 0; i < n; i++)
{
/* Model Yi = A * exp(-lambda * i) + b */
double t = i;
double Yi = A * exp (-lambda * t) + b;
gsl_vector_set (f, i, Yi - y[i]);
}

return GSL_SUCCESS;
}

int
expb_df (const gsl_vector * x, void *data,
gsl_matrix * J)
{
size_t n = ((struct data *)data)->n;

double A = gsl_vector_get (x, 0);
double lambda = gsl_vector_get (x, 1);

size_t i;

for (i = 0; i < n; i++)
{
/* Jacobian matrix J(i,j) = dfi / dxj, */
/* where fi = (Yi - yi)/sigma[i],      */
/*       Yi = A * exp(-lambda * i) + b  */
/* and the xj are the parameters (A,lambda,b) */
double t = i;
double e = exp(-lambda * t);
gsl_matrix_set (J, i, 0, e);
gsl_matrix_set (J, i, 1, -t * A * e);
gsl_matrix_set (J, i, 2, 1.0);
}
return GSL_SUCCESS;
}


The main part of the program sets up a Levenberg-Marquardt solver and some simulated random data. The data uses the known parameters (5.0,0.1,1.0) combined with Gaussian noise (standard deviation = 0.1) over a range of 40 timesteps. The initial guess for the parameters is chosen as (0.0, 1.0, 0.0).

#include <stdlib.h>
#include <stdio.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_vector.h>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_multifit_nlin.h>

#include "expfit.c"

/* number of data points to fit */
#define N 40

int
main (void)
{
const gsl_multifit_fdfsolver_type *T = gsl_multifit_fdfsolver_lmsder;
gsl_multifit_fdfsolver *s;
int status, info;
size_t i;
const size_t n = N;
const size_t p = 3;

gsl_matrix *J = gsl_matrix_alloc(n, p);
gsl_matrix *covar = gsl_matrix_alloc (p, p);
double y[N], weights[N];
struct data d = { n, y };
gsl_multifit_function_fdf f;
double x_init[3] = { 1.0, 0.0, 0.0 };
gsl_vector_view x = gsl_vector_view_array (x_init, p);
gsl_vector_view w = gsl_vector_view_array(weights, n);
const gsl_rng_type * type;
gsl_rng * r;
gsl_vector *res_f;
double chi, chi0;

const double xtol = 1e-8;
const double gtol = 1e-8;
const double ftol = 0.0;

gsl_rng_env_setup();

type = gsl_rng_default;
r = gsl_rng_alloc (type);

f.f = &expb_f;
f.df = &expb_df;   /* set to NULL for finite-difference Jacobian */
f.n = n;
f.p = p;
f.params = &d;

/* This is the data to be fitted */

for (i = 0; i < n; i++)
{
double t = i;
double yi = 1.0 + 5 * exp (-0.1 * t);
double si = 0.1 * yi;
double dy = gsl_ran_gaussian(r, si);

weights[i] = 1.0 / (si * si);
y[i] = yi + dy;
printf ("data: %zu %g %g\n", i, y[i], si);
};

s = gsl_multifit_fdfsolver_alloc (T, n, p);

/* initialize solver with starting point and weights */
gsl_multifit_fdfsolver_wset (s, &f, &x.vector, &w.vector);

/* compute initial residual norm */
res_f = gsl_multifit_fdfsolver_residual(s);
chi0 = gsl_blas_dnrm2(res_f);

/* solve the system with a maximum of 20 iterations */
status = gsl_multifit_fdfsolver_driver(s, 20, xtol, gtol, ftol, &info);

gsl_multifit_fdfsolver_jac(s, J);
gsl_multifit_covar (J, 0.0, covar);

/* compute final residual norm */
chi = gsl_blas_dnrm2(res_f);

#define FIT(i) gsl_vector_get(s->x, i)
#define ERR(i) sqrt(gsl_matrix_get(covar,i,i))

fprintf(stderr, "summary from method '%s'\n",
gsl_multifit_fdfsolver_name(s));
fprintf(stderr, "number of iterations: %zu\n",
gsl_multifit_fdfsolver_niter(s));
fprintf(stderr, "function evaluations: %zu\n", f.nevalf);
fprintf(stderr, "Jacobian evaluations: %zu\n", f.nevaldf);
fprintf(stderr, "reason for stopping: %s\n",
(info == 1) ? "small step size" : "small gradient");
fprintf(stderr, "initial |f(x)| = %g\n", chi0);
fprintf(stderr, "final   |f(x)| = %g\n", chi);

{
double dof = n - p;
double c = GSL_MAX_DBL(1, chi / sqrt(dof));

fprintf(stderr, "chisq/dof = %g\n",  pow(chi, 2.0) / dof);

fprintf (stderr, "A      = %.5f +/- %.5f\n", FIT(0), c*ERR(0));
fprintf (stderr, "lambda = %.5f +/- %.5f\n", FIT(1), c*ERR(1));
fprintf (stderr, "b      = %.5f +/- %.5f\n", FIT(2), c*ERR(2));
}

fprintf (stderr, "status = %s\n", gsl_strerror (status));

gsl_multifit_fdfsolver_free (s);
gsl_matrix_free (covar);
gsl_matrix_free (J);
gsl_rng_free (r);
return 0;
}


The iteration terminates when the relative change in x is smaller than 10^{-8}, or when the magnitude of the gradient falls below 10^{-8}. Here are the results of running the program:

summary from method 'lmsder'
number of iterations: 8
function evaluations: 11
Jacobian evaluations: 9
reason for stopping: small step size
initial |f(x)| = 31.1919
final   |f(x)| = 5.45418
chisq/dof = 0.804002
A      = 5.17379 +/- 0.27938
lambda = 0.11104 +/- 0.00817
b      = 1.05283 +/- 0.05365
status = success


The approximate values of the parameters are found correctly, and the chi-squared value indicates a good fit (the chi-squared per degree of freedom is approximately 1). In this case the errors on the parameters can be estimated from the square roots of the diagonal elements of the covariance matrix.

If the chi-squared value shows a poor fit (i.e. chi^2/dof >> 1) then the error estimates obtained from the covariance matrix will be too small. In the example program the error estimates are multiplied by \sqrt{\chi^2/dof} in this case, a common way of increasing the errors for a poor fit. Note that a poor fit will result from the use an inappropriate model, and the scaled error estimates may then be outside the range of validity for Gaussian errors.