Wednesday, 1 July 2015

Interior Point Method: Primal Affine scaling algorithm in Python

On this post we implement The Primal Affine scaling algorithm, one of the Interior Point Methods. Let us start by the Problem Formulation:

\begin{equation*}\label{a} \begin{aligned} & {\underset{x} min} & & c^T x\\ & \text{subject to} & & Ax= b \quad \quad \quad (P)\\ &&& x \ge 0\\ \end{aligned} \end{equation*}

where $x\in \mathbb{R}^n$ is the vector of variables, $c \in \mathbb{R}^n$ is the vector of linear costs, $A \in \mathbb{R}^{m\times n}$ is the matrix of constraint and $b \in \mathbb{R}^m$ is the right-hand-side vector. In general, two main hypothesis are considered: Matrix $A$ is full row rank and $P$ is nondegenerate. However, as shown later, some techniques are available to deal with problems where $A$ is not full rank.

This algorithm includes a Big-M formulation and therefore the Big-M value must be chosen in advance, several approaches are possible:
A first approach is start $M$ with a large value, $10^{30}$, for instance. However, if $M$ is too large it can causes numerical problems, since it is known that the presence of coefficients larger than the rest of variables by many orders of magnitude have potentially undesirable numerical aspects. Therefore, ideally we should search for a $\textit{"sufficiently large"}$ $M$. A standard decision is choose a $M$ value which is at least $n$ times larger than the largest value of any of the variables in the specific Linear programming problem.
For this implementation, the second approach with $n=100$ was chosen. Nevertheless, the first approach has been also tested, obtaining bad results in several problems, with numerical instability when approaching to the optimal value, hence,

$$$\label{big-m} M = n \cdot \mathrm{max}_{1 \le i \le n}\;|c_i|$$$

where $c$ is the cost vector and $n$ the number of variables. In any case, the presented implementation provides the option of supplying a custom $M$ in case the user previously knows an upper or lower bound on some quantity (e.g. a capacity limit).

We focus now on the algorithm implementation. The primal affine scaling algorithm has been implemented in a class called PrimalAffine() with several arguments. Furthermore, a function called _pre_check checks the input of the class: If the matrix and vectors are of the class ndarray. A relevant function is _check: This function validates that the vector $b$ and $c$ are column vectors and checks if the matrix $A$ is a full matrix, otherwise throw an exception and do not continue solving the problem. This check can be removed if the Cholesky decomposition for semidefinite matrices is implemented (we implement it later).

The following code is the main routine in the algorithm, where two functions $({\tt \_dual\_gap}$ and ${\tt \_compute\_y})$ are included. Basically, these two separated functions aim to improve readability.
import numpy as np
import scipy.io
from scipy.sparse import csr_matrix
import scipy
import pandas as pd
import time
import matplotlib.pyplot as plt
import math
import numba

class PrimalAffine(object):
def __init__(self, c, a, b, opt_gap=1.0e-6, eps=1.0e-12, rho=0.95, m=None):
self.c = c
self.a = a
self.b = b
self.opt_gap = opt_gap
self.eps = eps
self.rho = rho
self._alphas = []
self._f_values = []
self._gaps = []
self._iter = []
self._pre_check()

if m is None:
self.m = 100 * np.max(abs(self.c))
else:
self.m = m
print("Max value = {}".format(np.max(abs(self.c))))
print("Big-M = {}".format(self.m))

self._optimize_primal()

def _pre_check(self):
if not (isinstance(self.a, np.ndarray) or isinstance(self.b, np.ndarray) or isinstance(self.c, np.ndarray)):
raise Exception('Inputs must be a numpy arrays')

def _check(self, ra, ca, rb, rc):
if np.linalg.matrix_rank(self.a) != ra:
raise Exception('Matrix A is not full rank')

if rb != ra:
if np.size(self.b) == ra:
self.b = np.transpose(self.b)
else:
raise AttributeError('dimension of vector b is not correct')

if rc != ca:
if np.size(self.c) == ca:
self.c = np.transpose(self.c)
else:
raise AttributeError('dimension of vector b is not correct')

def _optimize_primal(self):

r_a, c_a = np.shape(self.a)
r_b = np.shape(self.b)[0]
r_c = np.shape(self.c)[0]

self._check(r_a, c_a, r_b, r_c)

x = np.ones((c_a, 1))
a = np.c_[self.a, self.b - np.dot(self.a, x)]
x = np.ones((c_a + 1, 1))
c = np.vstack([self.c, self.m])
d = np.eye(c_a + 1)
y = self._compute_y(a, d, c)

i = 0
while self._dual_gap(c, x, self.b, y) > self.opt_gap:
z = c - np.dot(np.transpose(a), y)
dx = np.dot(-d, z)
if np.all(dx >= 0):
print('Unbounded problem')
break
alpha = self.rho * np.min(-x[dx < self.eps] / dx[dx < self.eps].ravel())
x += alpha*dx
d = np.diag(x.ravel())**2
y = self._compute_y(a, d, c)
i += 1

f_obj = np.dot(np.transpose(c), x)
self._f_values.append(f_obj.ravel()[0])
self._alphas.append(alpha.ravel()[0])
self._gaps.append(self._dual_gap(c, x, self.b, y).ravel()[0])

if x[c_a] > 1e-5:
print('Infeasible problem. x[n+1] = {}'.format(x[c_a]))
else:
pass
print('\n', x[0:c_a])

def _compute_y(self, aa, dd, cc):
term1 = np.dot(np.dot(aa, dd), np.transpose(aa))
term2 = np.dot(np.dot(aa, dd), cc)
L = self._chol_semidefinite(term1)
sol = self._chol_solve(L, term2)
return sol

@staticmethod
def _dual_gap(cc, xx, bb, yy):
term1 = np.dot(np.transpose(cc), xx)
term2 = term1 - np.dot(np.transpose(bb), yy)
term3 = 1.0 + abs(term1)
return abs(term2)/term3

@staticmethod
@numba.autojit
def _chol_semidefinite(A):
n = len(A)

# create zero matrix for L
L = np.zeros((n, n))

# Perform the Cholesky decomposition
for i in range(n):
for k in range(i+1):
tmp_sum = 0
for j in range(k):
tmp_sum += L[i, j] * L[k, j]

if i == k:  # diagonal
L[i, k] = math.sqrt(A[i, i] - tmp_sum)
if L[i, k] <= 0:
L[i, k] = 10**10

else:
L[i, k] = (1.0 / L[k, k] * (A[i, k] - tmp_sum))
return L

@staticmethod
def _chol_solve(L, b):
L_inv = np.linalg.inv(L)
z = np.dot(L_inv, b)
y = np.dot(np.transpose(L_inv), z)
return y

def results(self):
d = {'gap': self._gaps, 'fobj': self._f_values, 'alpha': self._alphas}
df = pd.DataFrame(d)
print(df)

A few comments about the above code:
• An important point is to check whether the problem is infeasible or not. Therefore, if $x_{n+1}^* > \epsilon_{inf}$ then problem $(P)$ is infeasible, due to $M \gg 0$ in the optimal solution and hence, $x_{n+1}^* \approx 0$. We set $\epsilon_{inf}$ to $1{e-}5$.
• We included the function to compute the Cholesky decomposition in a positive semidefinite matrix. Unfortunately, this implementation is not very efficient, even using good libraries like ${\tt Numpy}$, so in order to speed up the computation we use the library ${\tt Numba}$ so that the code can be just-in-time compiled by using the LLVM compiler. The ${\tt @numba.autojit}$ decorator tells ${\tt Numba}$ to compile the function. In average, by using a JIT compiler, it is possible to achieve $\times 3$ speed up. This function is based on the Cholesky-Banachiewicz algorithm applying a modification, so that if a value of the diagonal is 0 (zero pivot), it is replaced by a large value $L_{ik} = 10^{64}$ that is in a sense surrogates for $\infty$.
Furthermore, we can test our algorithm solving problems of the Netlib collection. The Netlib problems in format ${\tt .mat}$ have been downloaded from NETLIB problems. As the algorithm was implemented in Python a function has been included to read .mat files.
def mat_parse(file):
mat_content = scipy.io.loadmat(file)
mat_struct = mat_content['Problem']
val = mat_struct[0, 0]
A = csr_matrix(val['A']).todense()
b = val['b']
c = val['aux'][0][0][0]
return A, b, c

if __name__ == '__main__':
# http://www.cise.ufl.edu/research/sparse/mat/LPnetlib/
A, b, c = mat_parse('lp_sctap3.mat')

print(np.shape(A))
print(np.shape(b))
print(np.shape(c))

t1 = time.perf_counter()
primal = PrimalAffine(c, A, b, opt_gap=1e-6)
elapsed = (time.perf_counter() - t1)
print(elapsed)
primal.results()

Finally, this is a small example that shows the behaviour of this algorithm:
$$$A = \begin{pmatrix} 2 & 1 & -1 & 0\\ 3& 4& 0 & 1\\ \end{pmatrix} \quad b = \begin{pmatrix} 2 \\ 12 \end{pmatrix} \quad c=\begin{pmatrix} 3 \\ 1 \\ 0 \\ 0 \end{pmatrix}$$$
And the following table and plot summarize the steps towards the optimal solution of the problem:

Iter alpha fobj gap x1 x2 x3 x4
0 0.0086 19.52871 0.76302 0.85813 1.95431 1.67057 1.40837
1 0.0636 5.15115 0.35592 0.78414 2.04874 1.61701 1.44264
2 0.7896 2.96509 0.08685 0.03921 2.54061 0.61903 1.71583
3 3.1027 2.27926 0.06842 0.03034 2.17289 0.23358 3.2172
4 4.4464 2.05176 0.01697 0.02578 1.96012 0.01168 4.082
5 36.9018 2.01468 0.00486 0.00129 2.00406 0.00664 3.97979
6 140.6546 2.00183 0.00061 0.00106 1.99833 0.00044 4.00352
7 900.1966 2.00055 0.00018 0.00005 2.00016 0.00027 3.9992
8 3579.9455 2.00009 0.00003 0.00004 1.99993 0.00001 4.00016
9 22196.8995 2.00002 0.00001 0 2.00001 0.00001 3.99997
10 101489.1179 2 0 0 2 0 4.00001
11 425757.3225 2 0 0 2 0 4

 Figure 1: Steps towards the optimal solution. Optimal solution = 2. x1 = 0 and x2 = 2. Total number of iterations = 12.

Friday, 24 April 2015

AMPL reporting solver statistics

This a simple script that I have been working on, which provides some Solver statistics to AMPL. At this moment, the script can handle the MINOS solver output and generate cool charts to summarize the process till the solver reaches a solution.
As usual, I have written a python script that is called by the shell command in AMPL.

option solver minos;
option minos_options "Superbasics_limit=100 summary_file=1 1=summary.txt summary_level = 1 summary_frequency=1";

printf "MINOS_report: generating AMPL results...\n";
shell 'C:\Python27\python.exe "MINOS_report.py" summary.txt';

As said, this script was written to work with MINOS, so after choosing MINOS as solver, we must provide to MINOS the output file which will contain the solver's statistics. The python script just parse the output file, clean it and generate the chart shown below. This code is just an example and does not aim to be very fast or optimum in terms of computational efficiency since is just a first test.
import pandas as pd
import matplotlib.pyplot as plt
import sys
import os

class Generate(object):
def __init__(self, i_file):
self.f_input = i_file
self.f_output = "results_clean.csv"
self.data = []
self.output = []

def clean_file(self):
with open(self.f_input, "r") as f:
it = 0
for line in f:
if "EXIT" in line:
break
elif line[:7].strip().isdigit() and int(line[:7].strip()) > it \
and not "T" in line:
if len(line) > 10:
#print line
it += 1
self.output.append(line)
f.close()
w = open(self.f_output, "w")
w.writelines(self.output)
w.close()
print "MINOS_report: new file > {0}".format(self.f_output)

def get_data(self):
cols = [(1, 7), (8, 16), (17, 22), (23, 32), (33, 48), (49, 54), (55, 60), (61, 65)]
self.data = pd.read_fwf(self.f_output, colspecs=cols, header=None)

def plots(self):

itn = len(self.data)
rgradient = self.data[1]
ninf = self.data[2]
sinf = self.data[3]
objective = self.data[4]
nobj = self.data[5]
ncon = self.data[6]
nsb = self.data[7]

it = list(range(1, itn + 1))

_min_obj = objective[itn - 1]
_max_nsb = nsb[itn - 1]
_max_nobj = nobj[itn - 1]
_sinf = sinf[itn - 1]
_ninf = ninf[itn - 1]
_ncon = ncon[itn - 1]

plt.figure("AMPL results")

# plot 1
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax1 = plt.subplot(211)
ax1.set_title('Iterations={0}. Objective (optimal solution)={1}'
.format(itn, _min_obj), size=10)

ax1.set_xlabel('iterations')
ax1.set_ylabel('objective function')
l_obj = ax1.plot(it, objective, '.-', color="#0E3EEB", label="obj")
ax2 = ax1.twinx()
ax2.set_ylabel('reduced gradient')
l_rg = ax2.plot(it, rgradient, '.-', color="#EB710E", label="rg")

lns = l_obj + l_rg
labs = [l.get_label() for l in lns]

ax1.grid()
ax1.legend(lns, labs, loc=0)

# plot 2
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax3 = plt.subplot(223)
ax3.set_title('number of superbasics={0}\n # objective and gradient calculations={1}'
.format(_max_nsb, _max_nobj), size=10)

l_sb = ax3.plot(it, nsb, '.-', color="#0E3EEB", label="nsb")
ax3.set_xlabel('iterations')
ax3.set_ylabel('no.superbasics')
ax4 = ax3.twinx()
l_nobj = ax4.plot(it, nobj, '.-', color="#EB710E", label="nobj")
ax4.set_ylabel('calculations obj. & gradient')

lns = l_sb + l_nobj
labs = [l.get_label() for l in lns]

ax3.grid()
ax3.legend(lns, labs, loc=4)

# plot 3
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
plt.subplot(224)
plt.title('no. of infeasibilities={0}\n sum of infeasibilities={1}\n '
'# nonlinear constraints evaluations={2}'.format(_ninf, _sinf, _ncon), size=10)
plt.plot(it, ncon, 'g-', label="ncon")
plt.plot(it, ninf, 'y-', label="ninf")
plt.plot(it, sinf, 'm-', label="sinf")
plt.xlabel('iterations')
plt.grid()
plt.legend()

plt.tight_layout()
plt.show()

if __name__ == "__main__":
summary_file = sys.argv[1]
#summary_file = "C:/Users/Guillermo Navas/Desktop/MSc Statistics and Operations research/Continuous optimization/" \
#                   "Modelling and solving optimization problems/Lab assignment 2 _ Network flow problems/solver_summary.txt"
report = Generate(summary_file)
report.clean_file()
report.get_data()
report.plots()