\documentclass[a4paper]{article}
\usepackage{html}
\usepackage[dvips]{graphics,color,epsfig}
\usepackage{path}
\usepackage{amssymb}
\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{psfrag}
\usepackage{algorithm}
\usepackage{algpseudocode}
\newcommand{\N}{\ensuremath{\mathbb{N}}}
\newcommand{\F}{\ensuremath{\mathbb{F}}}
\newcommand{\Z}{\ensuremath{\mathbb{Z}}}
\newcommand{\R}{\ensuremath{\mathbb{R}}}
\newcommand{\Q}{\ensuremath{\mathbb{Q}}}
\newcommand{\C}{\ensuremath{\mathbb{C}}}
\newcommand{\MBI}[1]{\ensuremath{M_{#1}^{-1}}}
\newcommand{\RMBI}[1]{\ensuremath{\check{M}_{#1}^{-1}}}
\newcommand{\pmu}[2]{\ensuremath{p_{\mu_{j}}^{(#1)}(\varepsilon, #2)}}
\newcommand{\pmuz}[2]{\ensuremath{\dot{p}_{\mu_{j}}^{(#1)}(\varepsilon, #2)}}
\newcommand{\px}[3]{\ensuremath{p_{x_{#1}}^{(#2)}(\varepsilon, #3)}}
\newcommand{\pxz}[3]{\ensuremath{\dot{p}_{x_{#1}}^{(#2)}(\varepsilon, #3)}}
\newcommand{\xe}[1]{\ensuremath{x_{#1}(\varepsilon)}}
\newcommand{\cab}[3]{\ensuremath{\mathcal{A}_{>}(#1, #2, #3)}}
\newcommand{\can}[3]{\ensuremath{\mathcal{A}_{<}(#1, #2, #3)}}
\newtheorem{lemma}{Lemma}
\newtheorem{assumption}{Assumption}
\newtheorem{definition}{Definition}
\title{Degeneracy}
\author{Frans J.\ Wessendorp}
%
\begin{document}
\maketitle
\section{Row rank and column rank assumption}
The QP-solver so far assumes nondegeneracy of the quadratic program to
solve~\cite{Sven}, page 19. For ease of reference we will restate the
nondegeneracy assumptions, that is given the QP,
%\begin{equation}
\begin{eqnarray}
\label{def:QP}
(QP)\quad minimize& c^{T}x + x^{T} D x & \nonumber \\
s.t. & \sum_{j=0}^{n-1}a_{ij}x_{j} = b_{i} & i \in E \nonumber \\
& \sum_{j=0}^{n-1}a_{ij}x_{j} \leq b_{i} & i \in I^{\leq} \\
& \sum_{j=0}^{n-1}a_{ij}x_{j} \geq b_{i} & i \in I^{\geq} \nonumber \\
& x_{j} \geq 0 & j \in \{0 \ldots n-1 \}
\nonumber
\end{eqnarray}
%\end{equation}
with $D$ positive semi definite, where $I:= I^{\leq} \cup I^{\geq}$ and
$\left| E \right| + \left| I \right| = m$,
the following conditions
\begin{assumption} \label{ass:nondegeneracy}
Nondegeneracy
\begin{enumerate}
\item $Rank\left( A \right) = m$
\item The subsystem $A_{G}x_{G} = b$ has only solutions for sets $G \subseteq
\{0 \ldots n-1\}$ with $\left|G \right| \geq m$.
\end{enumerate}
\end{assumption}
must be met. In the following we will show how these
assumptions can be dropped. To this end we will first describe how the auxiliary
problem is set up.
\section{The auxiliary problem}
The auxiliary problem is constructed by augmenting the constraint matrix $A$
with columns corresponding to slack and artificial variables. The slack columns
are added in the standard way. For each of the equality constraints
$\sum_{j=0}^{n-1} {a_{i j}x_{j}} = b_{i}, i \in E$ the original matrix $A$
is augmented by an artificial column
\begin{equation} \label{def:art_col}
\tilde{a} = \left\{
\begin{array}{ll}
-e_{i} & \mbox{if $b_{i} < 0$} \\
e_{i} & \mbox{otherwise,}
\end{array}
\right.
\end{equation}
where $e_{i}$ denotes the $i$-th column of the identity matrix.
If the set of inequality constraints with infeasible origin
$I_{inf}:=\{i \in I^{\leq} \left| \right. b_{i} < 0 \} \cup
\{i \in I^{\geq} \left| \right. b_{i} > 0 \}$
is nonempty the original matrix $A$
is augmented by a special artificial column $\tilde{a}^{s}$, defined as
\begin{equation}\label{def:spec_art_col}
\tilde{a}^{s}_{i} = \left\{
\begin{array}{ll}
-1 & \mbox{if $i \in I^{\leq}, b_{i} < 0$} \\
1 & \mbox{if $i \in I^{\geq}, b_{i} > 0$} \\
0 & \mbox{otherwise}
\end{array}
\right.
\end{equation}
If we denote by $O$ the index set of original variables, by $S$ the index set
of slack variables, by $art$ the index set of artificial variables and by
$\tilde{A}$ the original constraint matrix $A$ augmented in the above way
the auxiliary problem may be expressed as
\begin{eqnarray*}
\mbox{minimize} & \tilde{c}^{T}x & \\
s.t. & \tilde{A}x = b_{i} & i \in \{0 \ldots m-1 \} \\
& x_{j} \geq 0 & j \in O \cup S \cup art
\end{eqnarray*}
where
\begin{equation} \label{def:aux_c}
\tilde{c}_{j} = \left\{
\begin{array}{ll}
0 & \mbox{if $j \in O \cup S$} \\
> 0 & \mbox{if $j \in art$}
\end{array}
\right.
\end{equation}
For later use we introduce the bijection $\sigma$ defined as
\begin{equation}
\sigma: S \cup art \setminus \{\tilde{a}^{s}\} \rightarrow I \cup E
\end{equation}
which maps the index sets of slack and artificial variables to
the index sets of their inequality and equality constraints.
\subsection{Initialization of the auxiliary problem}
Since only constraints which are satisfied with equality determine the values of
the basic variables with respect to a given basis only the equality constraints
as well as the currently active inequality constraints are
considered (see also ~\cite{Sven}, Section 2.4).
To this end the set of basic variables $B$ is partitioned into original and
slack variables, that is
$B=B_{O} \cup B_{S}$, where $B_O \subseteq O \cup art$,
and the set of inequality constraints $I$ is partitioned as
$I=S_{B} \cup S_{N}$, where $S_{B}:=\sigma(B_{S})$
and
$S_{N}:=\sigma(S \setminus B_{S})$, if $\sigma$ denotes the bijection
$S \rightarrow I$.
The set of active constraints
$C=E \cup S_{N}$ is
introduced, such that a `reduced' basis matrix $\check{A}_{B}$
with respect to $B$ is defined as
\begin{equation}
\label{def:red_basis_phaseI}
\check{A}_{B}:=\tilde{A}_{C, B_{O}}
\end{equation}
Note that for degenerate bases active constraints do not neccessarily occur in
the index set $C$ only.
For later use, we introduce $A_{B}$ for the unreduced basis,
\begin{equation}
\label{def:basis_phaseI}
A_{B}:= \tilde{A}_{C \cup S_{B}, B_{O} \cup B_{S}}
\end{equation}
Let $i_{0} \in I_{inf}$ be the index of a constraint that has a most infeasible
origin, that is
\[
\left| b_{i_{0}} \right| \geq \left|b_{i}\right|, \quad i \in I_{inf}
\]
then $B_{O}$, $B_{S}$ and the initial set of basic and nonbasic constraints
$S_{B}$ and $S_{N}$are initialized as
\begin{equation}
\begin{array}{ccccccc}
\label{def:headings_init_io}
B_{O}^{(0)} &:=& art && B_{S}^{(0)} &:=& S \setminus
\{\sigma^{-1}\left(i_{0}\right)\} \\
S_{B}^{(0)} &:=& I \setminus \{i_{0}\} && S_{N}^{(0)} & := & \{ i_{0} \}
\end{array}
\end{equation}
If on the other hand, $I_{inf}=\emptyset$ then $B_{O}$, $B_{S}$, $S_{B}$ and
$S_{N}$ are initialized as
\begin{equation}
\begin{array}{ccccccc}
\label{def:headings_init_fo}
B_{O}^{(0)} &:=& art && B_{S}^{(0)} &:=& S \\
S_{B}^{(0)} &:=&I && S_{N}^{(0)}&:=& \emptyset
\end{array}
\end{equation}
where $art$ does not contain a special artificial variable.
\subsection{Expelling artificial variables from the Basis}
At the end of phase I some artificial variables may remain in the basis.
In that case, if the original problem is to be feasible, the basis has to be
degenerate.
Chv\'{a}tal~\cite{Chvatal}, Chapter 8, describes a procedure that,
given a system $Ax = b$ and an optimal
basis $B$, computes a subsystem $A'x = b'$ of constraints with $A'$ having full
row rank and a basis $B'$ such that the set of feasible solutions for both
systems is the same. The procedure tries to pivot the artificial variables out of
the basis, the constraints corresponding to the artificial variables that can
not be driven out of the basis in this manner can be removed without changing
the solution set of the system.
Since the proof for the equality of the solution sets of $Ax = b$ and
$A'x = b'$ only works if the special artificial variable can be pivoted out of
the basis we show first that this can always be achieved.
For the sake of notational convenience we assume here that the index set of the
artificial variables $art\setminus \{\tilde{a}^{s}\}$ is defined as
$art\setminus \{\tilde{a}^{s}\}=\{l+1 \ldots l+\left|E\right| \}$,
$l \geq n-1$ and that the basic artificial variable $x_{i+l}$ appears in the
basis heading $B_{O}$ at position $i$.
\subsubsection{The special artificial variable}
\label{sec:spec_art_unpert}
Suppose the special artificial variable is the $k$-th entry in the basis
heading $B$. Then
\[
r = e_{k}^{T} \check{A}_{B}^{-1}
\]
denotes the corresponding row of the basis inverse and
the nonbasic variable $x_{j}$ can be pivoted into the basis
with the special artificial variable leaving iff
\begin{equation} \label{eq:piv_precond}
e_{k}^{T}\check{A}_{B}^{-1}\tilde{A}_{C,j} \neq 0
\end{equation}
This can easily be verified by considering the corresponding eta matrix
whose $k$-th column is $\check{A}_{B}^{-1}\tilde{A}_{C,j}$ and whose determinant
is nonzero iff condition (\ref{eq:piv_precond}) holds.
Since $r \check{A}_{B} = e_{k}^{T}$, there exists by definition of
the special artificial column $\tilde{a}^{s}$ and the fact that $B$ is a basis
at least one
$i \in I_{inf} \cap S_{N} \supset \emptyset$ such that
$r_{i} \neq 0$, which in turn implies that condition
(\ref{eq:piv_precond}) holds for some nonbasic
$j = \sigma^{-1} \left( i \right)$.
\subsubsection{Pivoting the artificial variables out of the basis}
The procedure described in~\cite{Chvatal} is outlined in
algorithm~(\ref{alg:expel_art_var}). We avoid iterating
over sets that change during the computation, we only use member tests on such
sets. The primitive $update(j,i)$
updates the reduced basis inverse $\check{A}_{B}^{-1}$ with the entering
variable $x_{j}$ and leaving variable $x_{i}$ and updates the basis heading
accordingly. The procedure claims that every solution of
\begin{equation}
\sum_{j=0}^{n-1}a_{ij}x_{j}=b_{i} \quad i \in E
\end{equation}
is also a solution of
\begin{equation}
\sum_{j=0}^{n-1}a_{ij}x_{j}=b_{i} \quad i \in E \setminus J
\end{equation}
the proof of which we omit here.
\begin{algorithm}
\caption{Expel basic artificial variables from basis}
\label{alg:expel_art_var}
\begin{algorithmic}[0]
\ForAll{$i \in art$}
\If{$i \in B_{O}$}
\State $r \gets e_{i}^{T}\check{A}_{B}^{-1}$
\ForAll{$j \in O \cup S$}
\If{$j \in N$}
\If{$r\tilde{A}_{C,j}\neq 0$}
\State $update(j,i)$
\EndIf
\EndIf
\EndFor
\EndIf
\EndFor
\State $J \gets \sigma(B_{O} \cap art)$
\end{algorithmic}
\end{algorithm}
\subsubsection{Removing the remaining artificial variables and their
constraints}
Let $B^{(k)}$ and $N^{(k)}$ denote the set of indices of basic and nonbasic
variables at the end of Algorithm~(\ref{alg:expel_art_var}).
Since for the remaining basic artificial variables $x_{i}$,
$i \in B_{O}^{(k)} \cap art$
\begin{equation}
e_{i}^{T}\check{A}_{B^{(k)}}^{-1}\tilde{A}_{N^{(k)}} = 0
\end{equation}
holds,
we may, according to the procedure described in~\cite{Chvatal}, remove
the artificial variable $x_{i}$ together with its constraint
$\tilde{A}_{\sigma(i), \bullet}$ without changing the set of feasible
solutions.
This can be achieved by applying a slightly modified update
of type U8, defined in Section~6.3.2 in~\cite{Sven}. An $U8(j,i)$ update is a
LP-update and replaces an original variable $x_{i}$, $i \in B_{O}$ in the basis
by a slack variable $x_{j}$, $j \in S \setminus B_{S}$,
thus $S_{N}$ and $B_{O}$, or more generally, the sets
$C=E \cup S_{N}$ and $B_{O}$ each decrease by one element. Thus the reduced
basis matrix $\check{A}_{B}$ is shrunk by the
row $\left(\check{A}_{B}\right)_{\sigma(j), \bullet}$ and
the column $\left(\check{A}_{B}\right)_{\bullet, i}$. Provided the
update mechanism is general enough to handle variables $x_{j}$, $j \in art$ as
well instead of $x_{j}$, $j \in S \setminus B_{S}$ only, or equivalently,
the update mechanism is capable of
removing rows $\left(\check{A}_{B}\right)_{\sigma(j)}$ with $\sigma(j) \in E$
as well instead of $\sigma(j) \in S_{N}$ only, we can use the update $U8(i,i)$
for our purposes.
Of course the update of the basis headings needs appropriate modification
in this case, that is, $B_{O}^{\prime}:=B_{O} \setminus \{i\}$ and
$E^{\prime}:=E \setminus \{ \sigma(i)\}$, if $\check{A}_{B^{\prime}}$ denotes
the basis matrix after the update.
\subsubsection{Dropping the row rank assumption}
Due to performance reasons alluded to in the last subsection above the feature
of removing redundant equalities is provided only if the compile time tag
\texttt{Has\_no\_inequalities} has type \texttt{Tag\_false}.
So if the constraint matrix $A$ has
only equality constraints, $I=\emptyset$ in Definition~(\ref{def:QP}), and the
constraint matrix is suspected not to have full rank one can define the
compile time tag \texttt{Has\_no\_inequalities} to be of type
\texttt{Tag\_false} at the price of
some small performance penalty. If on the other hand the compile time tag has
type \texttt{Tag\_true}, in that case the solver aborts in case of $Rank(A) 0\}
\]
is nonempty, the original constraint matrix is likewise augmented by a special
artificial column $\tilde{a}_{i}^{s}$ defined as
\begin{equation}
\tilde{a}^{s}_{i} =\left\{
\begin{array}{ll}
-1 & \mbox{if $i \in I^{\leq},
l_{c}\left(b_{i} +
\sum_{j=0}^{n-1}a_{ij}\varepsilon^{j+1}\right) < 0$}\\
1 & \mbox{if $i \in I^{\geq},
l_{c}\left(b_{i} +
\sum_{j=0}^{n-1}a_{ij}\varepsilon^{j+1}\right)> 0$}\\
0 & \mbox{otherwise}
\end{array}
\right.
\end{equation}
The auxiliary problem is then defined as
\begin{eqnarray}
\label{def:aux_prob}
\mbox{minimize} & \tilde{c}^{T}x(\varepsilon) & \nonumber \\
s.t. & \tilde{A}x(\varepsilon) = b_{i} & i \in \{0\ldots m-1\} \\
& x_{j}(\varepsilon) \geq -\varepsilon^{j+1} &
j \in O \cup S \cup art \nonumber
\end{eqnarray}
with $\tilde{c}$ defined as in Definition~\ref{def:aux_c} and the additional
requirement that
\begin{equation}
\label{req:order_eps}
\max_{i \in O \cup S}i < \min_{i \in art}i
\end{equation}
holds.
%Furthermore we require that the index $ind(\tilde{a}^{s})$ is
%the largest one with respect to slack and artificial variables
%\begin{equation}
%\label{req:order_eps_spec_art}
%\max_{i \in S \cup art \setminus \{\tilde{a}^{s} \}}i < ind(\tilde{a}^{s})
%\end{equation}
\marginpar{may the mapping $\sigma: S \rightarrow I$ be arbitrary?}
\subsection{Initialization of the auxiliary problem for the lexicographic
method}
The auxiliary problem is initialized as before, that is $B_{O}^{(0)}$
$B_{S}^{(0)}$, $S_{B}^{(0)}$ and $S_{N}^{(0)}$ are defined as in
Definitions~(\ref{def:headings_init_io}) and~(\ref{def:headings_init_fo}),
depending on feasibility of the origin, the only difference being
the fact that the most infeasible origin $i_{0} \in I_{Inf}$ defined as
\begin{equation*}
\left|b_{i_{0}} + \sum_{j=0}^{n-1}a_{i_{0}j}\varepsilon^{j+1} \right|
>
\left|b_{i} + \sum_{j=0}^{n-1}a_{ij}\varepsilon^{j+1} \right|
\quad i \in I_{inf},
\end{equation*}
is now unique.
\subsection{Resolving ties in phaseI}
\label{sec:Res_ties_phaseI}
\subsubsection{LP-case}
Let $B$ be the current basis and $j \in N$ be the entering variable
and define $\hat{N}:= N \setminus\{j\}$ and
$q:= A_{B}^{-1}\tilde{A}_{\bullet,j}$.
Let $i_{1}, i_{2} \in B$ be
involved in a tie in the unperturbed problem, that is
\begin{equation*}
\min_{i \in B: q_{x_{i}} > 0}
\frac{\left(A_{B}^{-1}b\right)_{x_{i}}}{q_{x_{i}}}
=
\frac{\left(A_{B}^{-1}b\right)_{x_{i_{1}}}}{q_{x_{i_{1}}}}
=
\frac{\left(A_{B}^{-1}b\right)_{x_{i_{2}}}}{q_{x_{i_{2}}}}
\end{equation*}
by Definition~(\ref{def:aux_prob}), we then have to compare
polynomials in $\varepsilon$ of the following form in the perturbed problem
\begin{eqnarray}
\tilde{p}_{x_{i}}^{(L)}\left(\varepsilon, B\right) & := &
\frac{\varepsilon^{i+1}
- \left(A_{B}^{-1}\tilde{A}_{N}\right)_{x_{i}}
\epsilon_{N}}{q_{x_{i}}} \nonumber \\
\label{def:p_x_i_tilde}
& = &
\frac{\varepsilon^{i+1}
- \left(A_{B}^{-1}\tilde{A}_{N \setminus \{j\}}\right)_{x_{i}}
\epsilon_{N \setminus \{j\}}}{q_{x_{i}}}
+ \varepsilon^{j+1}
\end{eqnarray}
Note, that always either $\tilde{p}_{x_{i_{1}}}^{(L)}
\left(\varepsilon, B\right) <
\tilde{p}_{x_{i_{2}}}^{(L)}\left(\varepsilon, B\right)$ or
$\tilde{p}_{x_{i_{1}}}^{(L)}\left(\varepsilon, B\right) >
\tilde{p}_{x_{i_{2}}}^{(L)}\left(\varepsilon, B\right)$, even if
$\xe{i_{1}} = \xe{i_{2}}$,
since the terms $\frac{\varepsilon^{i_{1}+1}}{q_{x_{i_{1}}}}$ and
$\frac{\varepsilon^{i_{2}+1}}{q_{x_{i_{1}}}}$ are unique to
$\tilde{p}_{x_{i_{1}}}^{(L)}\left(\varepsilon, B\right)$ and
$\tilde{p}_{x_{i_{2}}}^{(L)}\left(\varepsilon, B\right)$.
\subsubsection{QP-case}
In preparation of phaseII the QP-case in phaseI uses the QP-machinery for $D=0$.
The basis $M_{B}$ in phaseI is, given the basis heading
$\left[C \cup S_{B}, B_{O} \cup B_{S} \right]$,
according to \cite{Sven} Section~2.3.1, Equation~2.5, defined as
\begin{equation}
\label{def:M_B_phaseI}
M_{B}
\left(\begin{array}{c}
\lambda \\
\hline
x_{B}^{*}
\end{array}
\right)
=
\left(\begin{array}{c}
b \\
\hline
-c_{B}
\end{array}
\right)
\quad
M_{B}:=
\left(\begin{array}{c|c}
0 & A_{B} \\
\hline
A_{B}^{T} & 0
\end{array}
\right)
\end{equation}
such that $M_{B}^{-1}$ is defined as
\begin{equation}
\label{def:M_B_inv_phaseI}
M_{B}^{-1}:=
\left(\begin{array}{c|c}
0 & \left(A_{B}^{-1}\right)^{T} \\
\hline
A_{B}^{-1} & 0
\end{array}
\right)
\end{equation}
Assume $B$ to be the current basis and $j \in N$ the entering variable and
define $\hat{N}$ as above $q$ is according to \cite{Sven}, Section~2.3.2,
Equation~2.2 defined as
\begin{equation}
\label{def:q_phaseI}
q:= M_{B}^{-1}
\left(\begin{array}{c}
\tilde{A}_{\bullet, j} \\
\hline
0
\end{array}
\right)
=
\left(\begin{array}{c}
0 \\
\hline
A_{B}^{-1}\tilde{A}_{\bullet, j}
\end{array}
\right)
\end{equation}
Using $\tilde{c}_{B}=0$ in Definition~(\ref{def:M_B_phaseI})
we can express $i_{1}$, $i_{2}$ involved in a tie in the unperturbed problem
as
\begin{equation*}
\min_{i \in B: q_{x_{i}}>0}
\frac{
\left(M_{B}^{-1}
\left(\begin{array}{c}
b \\
\hline
0
\end{array}
\right)
\right)_{x_{i}}
}{q_{x_{i}}}
=
\frac{
\left(M_{B}^{-1}
\left(\begin{array}{c}
b \\
\hline
0
\end{array}
\right)
\right)_{x_{i_{1}}}
}{q_{x_{i_{1}}}}
=
\frac{
\left(M_{B}^{-1}
\left(\begin{array}{c}
b \\
\hline
0
\end{array}
\right)
\right)_{x_{i_{2}}}
}{q_{x_{i_{2}}}}
\end{equation*}
such that by Definition~(\ref{def:aux_prob}) we have to compare polynomials
in $\varepsilon$ of the following form in the perturbed problem
\begin{equation}
\label{def:p_x_i_tilde_Q_1}
\tilde{p}_{x_{i}}^{(Q)}\left(\varepsilon, B\right) :=
\frac{
\varepsilon^{i+1}
-\left(M_{B}^{-1}
\left(\begin{array}{c}
\tilde{A}_{N} \\
\hline
0
\end{array}
\right)
\right)_{x_{i}}
\epsilon_{N}
}{q_{x_{i}}}
\end{equation}
which is, using Definitions~(\ref{def:M_B_inv_phaseI}) and
(\ref{def:q_phaseI}) the same as
Definition~(\ref{def:p_x_i_tilde}),
but for some shift in the indexing function.
\subsection{Transition to PhaseII}
For the sake of notational convenience we assume here that the index set of the
artificial variables $art\setminus \{\tilde{a}^{s}\}$ is defined as
$art\setminus \{\tilde{a}^{s}\}=\{l+1 \ldots l+\left|E\right| \}$,
$l \geq n-1$ and that the basic artificial variable $x_{i+l}$ appears in the
basis heading $B$ at position $i$.
\subsubsection{Expelling artificial variables from the basis}
Let $N_{-1}:=N \cup \{-1\}$ denote the set of nonbasic variables extended
by $-1$ with $\tilde{A}_{\bullet, -1}:=b$. Define for $i \in B \cap art$
\begin{eqnarray*}
l_{B}\left(i\right):=\min_{j \in R_{i}} j & \text{where}&
R_{i}:=\{ j \in N_{-1} \left| \right.
e_{i}^{T}A_{B}^{-1}\tilde{A}_{\bullet, j} \neq 0 \}
\end{eqnarray*}
\marginpar{Definition restricted to basic artificials only}
\begin{lemma}
\label{lem:art_BxN_zero}
Let $x_{B}(\varepsilon)$ be the optimal solution of the auxiliary problem and
$B \cap art \neq \emptyset$. Then for $i \in B \cap art$ either
$l_{B}\left(i\right) = -1$ or $l_{B}\left(i\right) \geq 0$,
$e_{i}^{T}A_{B}^{-1}\tilde{A}_{\bullet,j} = 0$
for $j \in N \setminus art$.
\end{lemma}
\begin{proof}
We only consider $l_{B}\left(i\right) \geq 0$ for all $i \in B \cap art$, for
$l_{B}\left(i\right) = -1$ for some $i \in B \cap art$ implies infeasibility of
the unperturbed problem.
Since $x_{B}(\varepsilon) = A_{B}^{-1}b - A_{B}^{-1}
\tilde{A}_{N}\epsilon_{N}$
and the implicit constraints are $x_{j}(\varepsilon) \geq -\varepsilon^{j+1}$
for variable $x_{j}(\varepsilon)$, feasibility requires
for some variable $i \in B \cap art$ either $l_{B}\left(i\right) < i$,
$e_{i}^{T}A_{B}^{-1}\tilde{A}_{\bullet, l_{B}\left(i\right)} > 0$ or
$l_{B}\left(i\right) > i$. Let
\begin{equation*}
\ell:= \min_{i \in B \cap art} l_{B}\left(i\right),
\end{equation*}
if $\ell \in N \setminus art$,
then by definition of $\tilde{c}_{B}$ and $\ell$,
$\tilde{c}_{B}A_{B}^{-1}\tilde{A}_{\bullet, \ell} > 0$,
in contradiction to optimality which requires
$\tilde{c}_{\ell} \geq
\tilde{c}_{B}A_{B}^{-1}\tilde{A}_{\bullet, \ell}$,
since $\tilde{c}_{\ell}=0$ for
$\ell \in N \setminus art$.
So $\ell \in N \cap art$, which by requirement~(\ref{req:order_eps}) implies
$e_{i}^{T}A_{B}^{-1}\tilde{A}_{\bullet, j}=0$, for
$i \in B \cap art$ and $j \in N \setminus art$.
\end{proof}
\subsubsection{The special artificial variable}
Suppose $ind(\tilde{a}^{s}) \in B_{O}$ in the optimal solution of the auxiliary
problem.
For the special artificial variable in the perturbed case the same remarks
apply as in the unperturbed case, that is, by
Section~(\ref{sec:spec_art_unpert}) there exists
$j \in S \setminus B_{S}$ such that
\begin{equation*}
e_{k}^{T}A_{B}^{-1}\tilde{A}_{\bullet,j} \neq 0
\end{equation*}
holds, if $\tilde{a}^{s}$ appears in $k$-th position in the basis heading
$B$. By Lemma~(\ref{lem:art_BxN_zero}) on the other hand we have
\begin{equation*}
e_{k}^{T}A_{B}^{-1}\tilde{A}_{\bullet,j} = 0,
\end{equation*}
so $ind(\tilde{a}^{s}) \in N$.
\subsubsection{Removing artificial variables}
By Lemma~(\ref{lem:art_BxN_zero})
\begin{equation}
e_{i}^{T}A_{B}^{-1}\tilde{A}_{N \setminus art}=0, \quad i \in B_{O} \cap art
\end{equation}
holds for the optimal solution of the auxiliary problem, such that by the proof
in \cite{Chvatal}, Chapter~8, the equality constraints
$A_{i}$, $i \in J=\sigma(B_{O} \cap art)$ are linear combinations of the
equality constraints $A_{i}$, $i \in E \setminus J$.
The values of nonbasic artificial variables $x_{j}$, $j \in N \cap art$ are
increased from $-\varepsilon^{j+1}$ to zero and then $x_{j}$ is removed. This
may render the the solution infeasible if
\begin{equation*}
e_{i}^{T}A_{B}^{-1}\tilde{A}_{\bullet, j} > 0
\end{equation*}
for some $i \in B$ and $j \in N \cap art$ with $j < i$, but does not affect the
linear dependence of the constraints $A_{i}$, $i \in J$ on the constraints
$A_{i}$, $i \in E \setminus J$.
Thus by Requirement~(\ref{req:order_eps}) this may only affect
the feasibility of the basic artificial variables
$x_{i}$, $i \in B \cap art$, but since these
basic artificial variables $i \in B \cap art$ can be removed together with
their corresponding constraints $A_{\sigma(i)}$ we end up, after removal of
basic artificial variables $x_{i}$, $i \in B \cap art$ and their corresponding
constraints, with a basic feasible
solution to the original perturbed problem. Like in the unperturbed case
the basic artificial variables
$i \in B \cap art$ are removed by updates of type $U8$.
\subsection{The lexicographic method in phaseII}
Since the nonbasic variables are no longer zero, the objective function as well as
the values of the basic variables are no longer independent of the nonbasic
variables; we therefore for the sake of explicitness restate the perturbed
variants of (UQP($B$)), (QP($B$)) and their KKT conditions for optimality
as well as the perturbed variant of the definition of QP-basis.
\begin{lemma}{KKT conditions for $(QP(B_{\varepsilon}))$}
\label{lemma:KKT_QP(B)_epsilon}
A feasible solution $x^{*}(\varepsilon) \in \mathbb{R}^{n}$ to the quadratic
program
\begin{eqnarray*}
\mbox{$(QP(B_{\varepsilon}))$} & minimize & c_{B \cup N}^{T}
x_{B \cup N}(\varepsilon)
+ x_{B \cup N}^{T}(\varepsilon)
D_{B \cup N, B \cup N} x_{B \cup N}(\varepsilon) \\
& s.t. & A_{B}x_{B}(\varepsilon) = b - A_{N}x_{N}(\varepsilon) \\
& & I_{N}x_{N}(\varepsilon) = \epsilon_{N} \\
& & x_{B}(\varepsilon) \geq \epsilon_{B}
\end{eqnarray*}
with $D$ symmetric, is optimal iff there exists an $m$-vector
$\lambda(\varepsilon)$ and an $\left|B\right|$-vector $\mu(\varepsilon) \geq 0$
such that
\begin{eqnarray}
c_{B}^{T} + 2x_{B}^{*^{\scriptstyle{T}}}(\varepsilon)D_{B,B} +
2\epsilon_{N}^{T}D_{N,B} & = &
-\lambda^{T}(\varepsilon)A_{B} + \mu_{B}^{T}(\varepsilon)I_{B} \\
\mu_{B}^{T}(\varepsilon) \left( -I_{B}x_{B}^{*}(\varepsilon) +
\epsilon_{B} \right) & = & 0
\end{eqnarray}
\end{lemma}
Likewise, the KKT conditions for $(UQP(B_{\varepsilon}))$ are
\begin{lemma}{KKT conditions for $(UQP(B_{\varepsilon}))$}
\label{lemma:KKT_UQP(B)_epsilon}
A feasible solution $x^{*}(\varepsilon) \in \mathbb{R}^{n}$ to the
unconstrained quadratic program
\begin{eqnarray*}
\mbox{$(UQP(B_{\varepsilon}))$} & minimize & c_{B \cup N}^{T}
x_{B \cup N}(\varepsilon)
+ x_{B \cup N}^{T}(\varepsilon) D_{B \cup N, B \cup N}
x_{B \cup N}(\varepsilon) \\
& s.t. & A_{B}x_{B}(\varepsilon) = b - A_{N}x_{N}(\varepsilon) \\
& & I_{N}x_{N}(\varepsilon) = \epsilon_{N}
\end{eqnarray*}
with $D$ symmetric, is optimal iff there exists an $m$-vector
$\lambda(\varepsilon)$ such that
\begin{eqnarray}
c_{B}^{T} + 2x_{B}^{*^{\scriptstyle{T}}}(\varepsilon)D_{B,B} +
2\epsilon_{N}^{T}D_{N,B} & = &
-\lambda^{T}(\varepsilon)A_{B}
\end{eqnarray}
\end{lemma}
\begin{lemma}
\label{lemma:strict}
Any vector $x_{B \cup N}(\varepsilon)$, $x_{B}(\varepsilon) > \epsilon_{B}$
satisfying
\begin{eqnarray}
A_{B}x_{B}(\varepsilon) & = & b - A_{N}x_{N}(\varepsilon) \\
x_{N}(\varepsilon) & = & \epsilon_{N}
\end{eqnarray}
is an optimal solution to $(QP(B_{\varepsilon}))$, iff it is an optimal
solution to $(UQP(B_{\varepsilon}))$.
\end{lemma}
\begin{proof}
For $x_{B}(\varepsilon) > \epsilon_{B}$ the second condition of
Lemma~\ref{lemma:KKT_QP(B)_epsilon} implies $\mu_{B}^{T}(\varepsilon)=0$.
Thus, the first condition of Lemma~\ref{lemma:KKT_QP(B)_epsilon} and the
single condition of Lemma~\ref{lemma:KKT_UQP(B)_epsilon} are equivalent.
On the other hand any optimal solution to $(UQP(B_{\varepsilon}))$ with
$x_{B}(\varepsilon) > \epsilon_{B}$ is feasible and optimal to
$(QP(B_{\varepsilon}))$ too, since the feasible region of the latter is
completely contained in the feasible region of $(UQP(B_{\varepsilon}))$.
\end{proof}
And last but not least the perturbed variant of the definition of a $QP$-basis:
\begin{definition}
A subset $B$ of the variables of a quadratic program in standard form defines
a $QP_{\varepsilon}$-basis iff
\begin{enumerate}
\item the unconstrained subproblem
\begin{eqnarray}
\mbox{(UQP($B_{\epsilon}$))} & minimize & c_{B \cup N}^{T}
x_{B \cup N}(\varepsilon)
+ x_{B \cup N}^{T}(\varepsilon)D_{B \cup N, B \cup N}
x_{B \cup N}(\varepsilon) \nonumber\\
\label{eq:QP_eps_basis_feasibility_B}
& s.t. &A_{B} x_{B}(\varepsilon) = b - A_{N}x_{N}(\varepsilon) \\
\label{eq:QP_eps_basis_feasibility_N}
& &I_{N} x_{N}(\varepsilon) = \epsilon_{N}
\end{eqnarray}
has an unique optimal solution $x_{B}^{*}(\varepsilon) > \epsilon_{B}$ and
\item $A_{B}$ has full row rank, $rank(A_{B})=m$.
\end{enumerate}
\end{definition}
In the following subsections we will mimic the arguments of \cite{Sven},
Sections (2.3.1), (2.3.2) for the perturbed problem.
\subsubsection{Pricing}
Testing whether a nonbasic variable \xe{j} can improve the
current solution $x_{B}^{*}(\varepsilon)$ by entering the current
$QP_{\varepsilon}$-basis $B$ is done as follows.
Let $\hat{B}:=B \cup \{j\}$ and consider the subproblem
\begin{eqnarray*}
\mbox{(QP($\hat{B}_{\varepsilon}$))} &minimize& c_{\hat{B} \cup \hat{N}}^{T}
x_{\hat{B} \cup \hat{N}}(\varepsilon) +
x_{\hat{B} \cup \hat{N}}^{T}(\varepsilon)
D_{\hat{B} \cup \hat{N}}x_{\hat{B} \cup \hat{N}}(\varepsilon)
\\
& s.t. & A_{\hat{B}}x_{\hat{B}}(\varepsilon) = b - A_{\hat{N}}
x_{\hat{N}}(\varepsilon) \\
& & I_{\hat{N}}x_{\hat{N}}(\varepsilon) = \epsilon_{\hat{N}} \\
& & x_{\hat{B}}(\varepsilon) \geq \epsilon_{\hat{B}}
\end{eqnarray*}
By Lemma~\ref{lemma:KKT_QP(B)_epsilon} for the above,
$x_{\hat{B}}^{*}(\varepsilon)$ is an optimal solution
iff there exists vectors $\lambda(\varepsilon)$ and
$\mu(\varepsilon) \geq 0$ such that
\begin{eqnarray}
\label{eq:KKT_lagrange_id}
c_{\hat{B}}^{T} + 2x_{\hat{B}}^{*^{\scriptstyle{T}}}(\varepsilon)D_{\hat{B}, \hat{B}} +
2\epsilon_{\hat{N}}^{T}D_{\hat{N}, \hat{B}}& = &
-\lambda^{T}(\varepsilon)A_{\hat{B}}
+ \mu_{\hat{B}}^{T}(\varepsilon)I_{\hat{B}} \\
\label{eq:KKT_compl_slackness}
\mu_{\hat{B}}^{T}(\varepsilon)
\left(-I_{\hat{B}}x_{\hat{B}}^{*}(\varepsilon)
+ \epsilon_{\hat{B}}\right) & = & 0
\end{eqnarray}
Since $x_{B}^{*}(\varepsilon)> \epsilon_{B}$, $\mu_{B}(\varepsilon)=0$ holds
using~(\ref{eq:KKT_compl_slackness}). Isolating $x_{j}^{*}(\varepsilon)$
in~(\ref{eq:KKT_lagrange_id}) and grouping into $B$ and $j$ components yields
\begin{eqnarray}
\label{eq:KKT_lagrange_id_B}
c_{B}^{T} + {2x_{B}^{*}}^{T}(\varepsilon)D_{B,B}
+ 2\epsilon_{\hat{N}}^{T}D_{\hat{N},B}
+ 2x_{j}^{*}(\varepsilon)D_{B,j}^{T} & = & -\lambda^{T}(\varepsilon) A_{B} \\
\label{eq:KKT_lagrange_id_j}
c_{j} + 2{x_{B}^{*}}^{T}(\varepsilon)D_{B,j}
+ 2x_{j}^{*}(\varepsilon)D_{j,j}
+ 2\epsilon_{\hat{N}}^{T}D_{\hat{N},j} & = &
-\lambda^{T}(\varepsilon) A_{j} + \mu_{j}(\varepsilon)
\end{eqnarray}
Equation~(\ref{eq:KKT_lagrange_id_B}) together with the
feasibility constraints~(\ref{eq:QP_eps_basis_feasibility_B}),
(\ref{eq:QP_eps_basis_feasibility_N}) of
$(UQP(B_{\epsilon}))$ and the fact that $N:=\hat{N} \cup \{j\}$ determine
$\lambda(\varepsilon)$, given $x_{B}^{*}(\varepsilon)$ and
$x_{j}^{*}(\varepsilon) = -\varepsilon^{j+1}$, by the linear equation system
\begin{equation}
M_{B}\left(
\begin{array}{c}
\lambda(\varepsilon) \\
\hline
x_{B}^{*}(\varepsilon)
\end{array}
\right)
=
\left(
\begin{array}{c}
b \\
\hline
-c_{B}
\end{array}
\right)
-
\left(
\begin{array}{c}
A_{N} \\
\hline
2D_{B,N}
\end{array}
\right)\epsilon_{N}
\end{equation}
with $M_{B}$ defined as
\begin{equation}
\label{def:M_B}
M_{B}:=\left(
\begin{array}{c|c}
0 & A_{B} \\
\hline
A_{B}^{T} & 2D_{B,B}
\end{array}
\right)
\end{equation}
By the definition of $QP_{\varepsilon}$-basis, $x_{B}^{*}(\varepsilon)$
is the unique optimal
solution to $(UQP(B_{\varepsilon}))$ and $A_{B}$ has full row rank. Thus, also
$\lambda(\varepsilon)$ is unique and $M_{B}$ is regular,
therefore $M_{B}^{-1}$ exists. Note,
that $M_{B}$ is the same as in the unperturbed problem.
\subsubsection{Ratio Test Step 1}
Starting with a $QP_{\varepsilon}$-basis $B$ and an entering variable
\xe{j},
we want to find a new basis $B^{\prime} \subseteq B \cup \{j\}$ with
better objective function value.
Define $\hat{B}:=B \cup \{j\}$, then $x_{\hat{B}}^{*}(\varepsilon)$ with
$x_{j}^{*}(\varepsilon)=-\varepsilon^{j+1}$ is the optimal solution to
\begin{eqnarray*}
(UQP_{j}^{t}(\hat{B}_{\varepsilon})) & minimize &
c_{\hat{B} \cup \hat{N}}^{T}x_{\hat{B} \cup \hat{N}}(\varepsilon)
+ x_{\hat{B} \cup \hat{N}}^{T}(\varepsilon)
D_{\hat{B} \cup \hat{N},\hat{B} \cup \hat{N}}
x_{\hat{B} \cup \hat{N}}(\varepsilon) \\
& s.t & A_{\hat{B}}x_{\hat{B}}(\varepsilon) =
b - A_{\hat{N}}x_{\hat{N}}(\varepsilon) \\
& & I_{\hat{N}}x_{\hat{N}}(\varepsilon) = \epsilon_{\hat{N}} \\
& & \xe{j} = - \varepsilon^{j+1} + t
\end{eqnarray*}
for $t=0$. $(UQP_{j}^{t}(\hat{B}_{\varepsilon}))$ has a unique solution
$x_{\hat{B}}^{*}(\varepsilon, t)$ for each value of t, given by
\begin{equation}
\label{eq:UQP_j_t_opt_explicit}
M_{B}\left(\begin{array}{c}
\lambda\left(\varepsilon, t\right) \\
\hline
x_{B}^{*}\left(\varepsilon, t\right) \\
\end{array}
\right)
=
\left(\begin{array}{c}
b \\
\hline
-c_{B} \\
\end{array}
\right)
-
\left(\begin{array}{c}
A_{N} \\
\hline
2D_{B,N} \\
\end{array}
\right) \epsilon_{N}
-t
\left(\begin{array}{c}
A_{j} \\
\hline
2D_{B,j} \\
\end{array}
\right)
\end{equation}
and $x_{j}^{*}\left(\varepsilon,t\right)= -\varepsilon^{j+1} + t$. This follows
from the KKT conditions given by Lemma~\ref{lemma:KKT_UQP(B)_epsilon} for the
reformulation of $(UQP_{j}^{t}(\hat{B}_{\varepsilon}))$ as
\begin{eqnarray*}
(UQP_{j}^{t}(\hat{B}_{\varepsilon}))
& minimize & c_{B \cup N}^{T} x_{B \cup N}(\varepsilon) +
x_{B \cup N}^{T}(\varepsilon)D_{B \cup N, B \cup N}
x_{B \cup N}(\varepsilon) \\
& s.t. & A_{B}x_{B}(\varepsilon) = b - A_{N}x_{N}(\varepsilon) \\
&& I_{N}x_{N}(\varepsilon) = \epsilon_{N}^{j}
\end{eqnarray*}
with $\epsilon_{N}^{j}:=\epsilon_{N} + te_{j}$, and the regularity of
$M_{B}$.
While increasing $t$ starting from zero, either some basic variable $i \in B$
may become $\xe{i}=-\varepsilon^{i+1}$ or a local minimum of the
objective function, that is $\mu_{j}(\varepsilon)$ in
Equation~(\ref{eq:KKT_lagrange_id_j}) becomes zero, is reached.
We will show later that these two events never happen simultaneously for the
perturbed problem.
In order to derive $\mu_{j}\left(\varepsilon, t\right)$, define
\begin{eqnarray}
\label{def:sol_eps_zero_I}
\left(\begin{array}{c}
\lambda\left(\varepsilon, 0\right) \\
\hline
x_{B}^{*}\left(\varepsilon, 0\right)
\end{array}
\right)
&:=&M_{B}^{-1}
\left[
\left(\begin{array}{c}
b \\
\hline
-c_{B}
\end{array}
\right)
-
\left(\begin{array}{c}
A_{N} \\
\hline
2D_{B,N}
\end{array}
\right)\epsilon_{N}
\right]
\\
\left(\begin{array}{c}
q_{\lambda} \\
\hline
q_{x}
\end{array}
\right)
&:=&M_{B}^{-1}
\left(\begin{array}{c}
A_{j} \\
\hline
2D_{B,j}
\end{array}
\right)
\end{eqnarray}
such that Equation~(\ref{eq:UQP_j_t_opt_explicit}) becomes
\begin{equation}
\label{eq:UQP_j_t_opt_short}
\left(\begin{array}{c}
\lambda\left(\varepsilon, t\right) \\
\hline
x_{B}^{*}\left(\varepsilon, t\right)
\end{array}
\right)
=
\left(\begin{array}{c}
\lambda\left(\varepsilon, 0\right) \\
\hline
x_{B}^{*}\left(\varepsilon, 0\right)
\end{array}
\right)
-t
\left(\begin{array}{c}
q_{\lambda} \\
\hline
q_{x}
\end{array}
\right),
\end{equation}
$\mu_{j}\left(\varepsilon, t\right)$ can then, by using
Equations~(\ref{eq:KKT_lagrange_id_j}),(\ref{eq:UQP_j_t_opt_short}) and
$x_{j}^{*}\left(\varepsilon, t\right)= -\varepsilon^{j+1} + t$ be
expressed as
\begin{eqnarray}
\label{eq:mu_j_eps_t}
\mu_{j}\left(\varepsilon, t\right) & = & c_{j} +
A_{j}^{T}\lambda\left(\varepsilon, t\right)
+ 2D_{B,j}^{T}x_{B}^{*}\left(\varepsilon, t\right) +
2D_{j,j}x_{j}^{*}\left(\varepsilon, t\right)
+ 2D_{\hat{N}, j}^{T}\epsilon_{\hat{N}} \nonumber \\
& = & c_{j} + A_{j}^{T}\lambda\left(\varepsilon, 0\right)
+ 2D_{B,j}^{T}x_{B}^{*}\left(\varepsilon, 0\right) +
2D_{j,j}\epsilon_{j}
+ 2D_{\hat{N}, j}^{T}\epsilon_{\hat{N}} + \nonumber \\
& & t\left(D_{j,j} - A_{j}^{T}q_{\lambda} - 2D_{B,j}^{T}q_{x}
\right) \\
& = & \mu_{j}\left(\varepsilon, 0\right) + t\nu
\nonumber
\end{eqnarray}
where
\begin{equation}
\label{def:nu}
\nu := 2D_{j,j} - A_{j}^{T}q_{\lambda} - 2D_{B,j}^{T}q_{x}
\end{equation}
and
\begin{equation}
\mu_{j}\left(\varepsilon, 0\right) :=
c_{j} + 2D_{N, j}^{T}\epsilon_{N} +
\left(A_{j}^{T} \left|\right. 2D_{B, j}^{T} \right)
\left(\begin{array}{c}
\lambda\left(\varepsilon, 0\right) \\
\hline
x_{B}^{*}\left(\varepsilon, 0\right)
\end{array}
\right)
\end{equation}
Using Definitions~(\ref{def:sol_eps_zero_I}), (\ref{def:nu}) and elementary
algebraic manipulation
$\mu_{j}(\varepsilon,0)$ may be written as
\begin{eqnarray*}
\mu_{j}\left(\varepsilon, 0\right) &=& c_{j} +
\left(A_{j}^{T} \left|\right. 2D_{B, j}^{T} \right)
M_{B}^{-1}
\left(\begin{array}{c}
b \\
\hline
-c_{B}
\end{array}
\right) + \\
&&
\left[2D_{N, j}^{T} -
\left(A_{j}^{T} \left|\right. 2D_{B, j}^{T} \right)
M_{B}^{-1}
\left(\begin{array}{c}
A_{N} \\
\hline
2D_{B,N}
\end{array}
\right)
\right]\epsilon_{N}
\end{eqnarray*}
Finally, setting $\varepsilon=0$ in the above, we obtain $\mu_{j}(0, 0)$, such
that $\mu_{j}(\varepsilon, 0)$ can be written in terms of $\mu_{j}(0,0)$ as
\begin{equation}
\label{eq:mu_j_eps_zero}
\mu_{j}\left(\varepsilon, 0\right) =
\mu_{j}\left(0,0\right) +
\left[
2D_{j, N} - \left(A_{j}^{T} \left| \right. 2D_{B, j}^{T} \right)
M_{B}^{-1}
\left(\begin{array}{c}
A_{N} \\
\hline
2D_{B, N}
\end{array}
\right)
\right]\epsilon_{N}
\end{equation}
When $\mu_{j}\left(\varepsilon, t\right)$ becomes zero for some $t > 0$,
$\lambda\left(\varepsilon, t\right)$ and $\mu_{\hat{B}}(\varepsilon)$ satisfy
the KKT conditions of Lemma~\ref{lemma:KKT_QP(B)_epsilon}, thus
$x_{B}^{*}\left(\varepsilon, t\right)$,
$x_{j}^{*}\left(\varepsilon, t\right)$ is an optimal solution to
$(QP_{\varepsilon}(\hat{B}))$. Lemma (2.7) in~\cite{Sven} then asserts the
additional requirements of uniqueness of the solution as well as full row rank
of $A_{\hat{B}}$ for $\hat{B}$ being the new $QP_{\varepsilon}$-basis.
In the first case happening, we implicitly add the constraint
$\xe{i}=-\varepsilon^{i+1}$
to $(UQP_{j}^{t}(\hat{B}_{\varepsilon}))$ by removing $i$ from the set $B$.
If $M_{B \setminus \{i\}}$ is regular,
we still have a unique optimal solution to
$(UQP_{j}^{t}(\hat{B}_{\varepsilon} \setminus \{i\}))$ for each value of
$t$ and Ratio Test Step 1 is
iterated. Otherwise we proceed with the Ratio Test Step 2.
\subsubsection{Ties in Ratio Test Step 1}
\label{sec:Ties_ratio_test_step_1}
Consider two basic variables $i_{1}, i_{2} \in B$ involved in a tie in the
unperturbed problem, setting $\varepsilon=0$ in
Equation~(\ref{eq:UQP_j_t_opt_short}), this can be expressed as
\begin{equation}
\check{t}\left(0, B\right)=
\frac{\left(\begin{array}{c}
\lambda\left(0, 0 \right) \\
\hline
x_{B}^{*}\left(0, 0\right)
\end{array}
\right)_{x_{i_{1}}}}{q_{x_{i_{1}}}}
=
\frac{\left(\begin{array}{c}
\lambda\left(0, 0 \right) \\
\hline
x_{B}^{*}\left(0, 0\right)
\end{array}
\right)_{x_{i_{2}}}}{q_{x_{i_{2}}}}
\end{equation}
where
\begin{equation}
\check{t}\left(0, B\right):=\min_{i \in B: q_{x_{i}} > 0}
\frac{\left(\begin{array}{c}
\lambda\left(0, 0\right) \\
\hline
x_{B}^{*}\left(0, 0\right)
\end{array}
\right)_{x_{i}}}{q_{x_{i}}}
\end{equation}
According to Definition~(\ref{def:QP_eps}) of the perturbed problem,
Equation~(\ref{eq:UQP_j_t_opt_short}) and
Definition~(\ref{def:sol_eps_zero_I}),
$\check{t}(\varepsilon, B)$
is defined as
\begin{eqnarray}
\label{def:t_min_eps}
\check{t}\left(\varepsilon, B\right) & := &
\min_{i \in B: q_{x_{i}} > 0}
\frac{\left(\begin{array}{c}
\lambda\left(\varepsilon, 0 \right) \\
\hline
x_{B}^{*}\left(\varepsilon, 0 \right)
\end{array}
\right)_{x_{i}}+ \varepsilon^{i+1}}{q_{x_{i}}} \\
&=&
\check{t}\left(0, B \right) +
\min_{i \in B: q_{x_{i}} > 0} \px{i}{Q_{1}}{B}
\end{eqnarray}
where
\begin{eqnarray}
\label{def:p_x_i_Q_1}
\px{i}{Q_{1}}{B} &:=&
\frac{\varepsilon^{i+1} -
\left(M_{B}^{-1}
\left(\begin{array}{c}
A_{N} \\
\hline
2D_{B, N}
\end{array}
\right)
\right)_{x_{i}}\epsilon_{N}}{q_{x_{i}}} \\
&=&
\frac{\varepsilon^{i+1} -
\left(M_{B}^{-1}
\left(\begin{array}{c}
A_{N \setminus \{j\}} \\
\hline
2D_{B, N \setminus \{j\}}
\end{array}
\right)
\right)_{x_{i}}\epsilon_{N \setminus \{j\}}}{q_{x_{i}}}
+ \varepsilon^{j+1}
\end{eqnarray}
Therefore, in order to resolve the tie in the perturbed problem, we have
to compare the polynomials \px{i_{1}}{Q_{1}}{B} and \px{i_{2}}{Q_{1}}{B}.
Again, as in phaseI we always have either
$\px{i_{1}}{Q_{1}}{B} < \px{i_{2}}{Q_{1}}{B}$ or
$\px{i_{1}}{Q_{1}}{B} > \px{i_{2}}{Q_{1}}{B}$, since the
$\frac{\varepsilon^{i+1}}{q_{x_{i}}}$ terms are unique to each
\px{i}{Q_{1}}{B}.
Ties between a basic variable $x_{i}(0)$ taking its lower bound $0$ value and
$\mu_{j}\left(0, t\right)$
becoming zero in the unperturbed problem, that is,
according to Equation~(\ref{eq:mu_j_eps_t}) with $\varepsilon=0$,
\begin{equation*}
\check{t}\left(0, B\right) =
-\frac{\mu_{j}\left(0, 0\right)}{\nu}
\end{equation*}
can be resolved in the perturbed problem,
given the above equality, by comparing
\begin{equation*}
\check{t}(\varepsilon, B)-\check{t}(0, B) = \px{i}{Q_{1}}{B}
\end{equation*}
and the expression
\begin{equation*}
\frac{-\mu_{j}(\varepsilon, 0)+ \mu_{j}(0,0)}{\nu}.
\end{equation*}
Therefore, taking into account Equation~(\ref{eq:mu_j_eps_zero}),
for the latter expression above, the following two
polynomials in $\varepsilon$ have to be compared in order to resolve the tie
\begin{eqnarray}
\px{i}{Q_{1}}{B} & = &
\frac{\varepsilon^{i+1} -
\left(M_{B}^{-1}
\left(\begin{array}{c}
A_{N \setminus \{j\}} \\
\hline
2D_{B, N \setminus \{j\}}
\end{array}
\right)
\right)_{x_{i}}\epsilon_{N \setminus \{j\}}}{q_{x_{i}}}
+ \varepsilon^{j+1} \\
\label{def:p_mu_j_Q_1}
\pmu{Q_{1}}{B} & := &
-\frac{2D_{j, N} -
\left(A_{j}^{T} \left| \right. 2D_{B, j}^{T} \right)
M_{B}^{-1}
\left(\begin{array}{c}
A_{N} \\
\hline
2D_{B,N}
\end{array}
\right)}{\nu}
\epsilon_{N}
\nonumber \\
&=&
-\frac{2D_{j, N \setminus \{j\}} -
\left(A_{j}^{T} \left| \right. 2D_{B, j}^{T} \right)
M_{B}^{-1}
\left(\begin{array}{c}
A_{N \setminus \{j\}} \\
\hline
2D_{B,N \setminus \{j\}}
\end{array}
\right)}{\nu}
\epsilon_{N \setminus \{j\}}
\nonumber \\
&&
+\varepsilon^{j+1}
\end{eqnarray}
The term $\frac{\varepsilon^{i+1}}{q_{x_{i}}}$ is again unique to
\px{i}{Q_{1}}{B}, such that the tie can always
be resolved.
\subsubsection{Ratio Test Step 2}
Let $B$ be the set of basic variables after the last iteration of
Ratio Test Step~1. Since
$M_{B}$ is no longer regular, Equation~(\ref{eq:UQP_j_t_opt_explicit}) does
no longer
determine unique solutions to $(UQP_{j}^{t}(\hat{B_{\epsilon}}))$ for arbitrary
$t$.
Reconsidering the KKT conditions for $(QP(\hat{B}_{\epsilon}))$, that is
Equations~(\ref{eq:KKT_lagrange_id}),(\ref{eq:KKT_compl_slackness}) yields
\begin{equation}
\label{eq:QP_j_mu_opt_explicit}
M_{\hat{B}}
\left(\begin{array}{c}
\lambda\left(\varepsilon\right) \\
\hline
x_{B}^{*}\left(\varepsilon\right) \\
\hline
x_{j}^{*}\left(\varepsilon\right)
\end{array}
\right)
=
\left(\begin{array}{c}
b \\
\hline
-c_{B} \\
\hline
-c_{j}
\end{array}
\right)
-
\left(\begin{array}{c}
A_{\hat{N}} \\
\hline
2D_{B, \hat{N}} \\
\hline
2D_{j, \hat{N}}
\end{array}
\right)\epsilon_{\hat{N}}
+ \mu_{j}\left(\varepsilon\right)
\left(\begin{array}{c}
0 \\
\hline
0 \\
\hline
1
\end{array}
\right)
\end{equation}
In case $M_{\hat{B}}$ is singular, we proceed directly to Step 3. Otherwise,
the system of linear equations above has a unique solution for each value of
$\mu_{j}\left(\varepsilon\right)$. The solutions are determined by a linear
function in $\mu_{j}\left(\varepsilon\right)$, which can be written as
\begin{equation}
\label{eq:QP_j_mu_opt_short}
\left(\begin{array}{c}
\lambda\left(\varepsilon, \mu_{j}\left(\varepsilon\right)\right) \\
\hline
x_{\hat{B}}^{*}\left(\varepsilon,
\mu_{j}\left(\varepsilon\right)\right)
\end{array}
\right)
=
\left(\begin{array}{c}
\lambda\left(\varepsilon, 0\right) \\
\hline
x_{\hat{B}}^{*}\left(\varepsilon, 0\right)
\end{array}
\right)
+ \mu_{j}(\varepsilon)
\left(\begin{array}{c}
p_{\lambda} \\
\hline
p_{x_{\hat{B}}}
\end{array}
\right)
\end{equation}
with
\begin{eqnarray}
\label{def:sol_eps_zero_II}
\left(\begin{array}{c}
\lambda\left(\varepsilon, 0\right) \\
\hline
x_{B}^{*}\left(\varepsilon, 0\right) \\
\hline
x_{j}^{*}\left(\varepsilon, 0\right)
\end{array}
\right)
&:=&M_{\hat{B}}^{-1}
\left[
\left(\begin{array}{c}
b \\
\hline
-c_{B} \\
\hline
-c_{j}
\end{array}
\right)
-
\left(\begin{array}{c}
A_{\hat{N}} \\
\hline
2D_{B, \hat{N}} \\
\hline
2D_{j, \hat{N}}
\end{array}
\right)\epsilon_{\hat{N}}
\right]
\\
\left(\begin{array}{c}
p_{\lambda} \\
\hline
p_{x_{B}} \\
\hline
p_{x_{j}}
\end{array}
\right)
&:=&M_{\hat{B}}^{-1}
\left(\begin{array}{c}
0 \\
\hline
0 \\
\hline
1
\end{array}
\right).
\end{eqnarray}
Any solution
$x_{\hat{B}}^{*}\left(\varepsilon,\mu_{j}\left(\varepsilon\right)\right)$ is
feasible for $(UQP(\hat{B}))$, and it is optimal if
$\mu_{j}\left(\varepsilon\right)=0$.
Let $\check{t}_{1}(\varepsilon, \tilde{B})$ be the value of $t$
for which $M_{B}$ became singular in the last iteration of Ratio Test Step 1
of the perturbed problem, then
$x_{\hat{B}}^{*}(\varepsilon,
\mu_{j}(\varepsilon, \check{t}_{1}(\varepsilon, \tilde{B})))$
is the current feasible solution at the beginning of Ratio Test Step 2.
While growing $\mu_{j}(\varepsilon)$ from
$\mu_{j}(\varepsilon,\check{t}_{1}(\varepsilon, \tilde{B})
)$
towards zero,
again, either one of the remaining basic variables becomes zero or a local
minimum of the objective function is reached. In case of the latter happening
$\mu_{j}(\varepsilon)$ equals zero, we found an optimal solution
$x_{\hat{B}}^{*}\left(\varepsilon, 0\right)$ to $(UQP(\hat{B_{\varepsilon}}))$,
which by
Lemma~\ref{lemma:strict} is also an optimal solution to the constrained
problem
$QP(\hat{B_{\varepsilon}})$. Uniqueness of the solution follows from the
regularity of $M_{\hat{B}}$, which also implies that $\hat{B}$ is the new basis
in that case.
On the other hand, if some basic variable \xe{k} becomes zero, we
implicitly add the constraint $\xe{k}=-\varepsilon^{k+1}$ to
$(UQP(\hat{B_{\varepsilon}}))$ by removing $k$ from $\hat{B}$. If
$M_{\hat{B} \setminus \{k\}}$ stays regular, we still obtain unique solutions
of Equation~(\ref{eq:QP_j_mu_opt_explicit}) for arbitrary values of
$\mu_{j}(\varepsilon)$. In this case Ratio Test Step 2 is iterated,
otherwise we continue with Step 3.
\subsubsection{Ties in Ratio Test Step 2}
\label{sec:Ties_ratio_test_step_2}
Consider two basic variables $i_{1}, i_{2} \in \hat{B}$ involved in a tie in
the unperturbed problem, setting $\varepsilon=0$ in
Equation~(\ref{eq:QP_j_mu_opt_short}), this can be expressed as
\begin{equation}
\check{\mu}_{j}(0, \hat{B}) =
\frac{\left(\begin{array}{c}
\lambda\left(0,0\right) \\
\hline
x_{\hat{B}}^{*}\left(0,0\right)
\end{array}
\right)_{x_{i_{1}}}}{p_{x_{i_{1}}}}
=
\frac{\left(\begin{array}{c}
\lambda\left(0,0\right) \\
\hline
x_{\hat{B}}^{*}\left(0,0\right)
\end{array}
\right)_{x_{i_{2}}}}{p_{x_{i_{2}}}}
\end{equation}
where
\begin{equation}
\label{def:hat_mu_j_min_0}
\check{\mu}_{j}(0, \hat{B}) :=
\min_{i \in \hat{B}: p_{x_{i}} < 0}
\frac{\left(\begin{array}{c}
\lambda\left(0,0\right) \\
\hline
x_{\hat{B}}^{*}\left(0,0\right)
\end{array}
\right)_{x_{i}}}{p_{x_{i}}}
\end{equation}
Again, similar to ties among basic variables in Ratio Test Step 1,
by Definition~(\ref{def:QP_eps}) of the perturbed problem,
Equation~(\ref{eq:QP_j_mu_opt_short}) and
Definition~(\ref{def:sol_eps_zero_II}),
$\check{\mu}_{j}(\varepsilon, \hat{B})$ is defined as
\begin{eqnarray}
\label{def:hat_mu_j_min_eps}
\check{\mu}_{j}(\varepsilon, \hat{B}) & := &
\min_{i \in \hat{B}: p_{x_{i}} < 0}
\frac{\left(\begin{array}{c}
\lambda\left(\varepsilon, 0\right) \\
\hline
x_{\hat{B}}^{*}\left(\varepsilon,0\right)
\end{array}
\right)_{x_{i}}+ \varepsilon^{i+1}}{p_{x_{i}}} \\
&=&
\check{\mu}_{j}(0, \hat{B}) +
\min_{i \in \hat{B}: p_{x_{i}} < 0} p_{x_{i}}^{(Q_{2})}
(\varepsilon, \hat{B})
\end{eqnarray}
where
\begin{eqnarray}
\label{def:p_x_i_Q_2}
p_{x_{i}}^{(Q_{2})}(\varepsilon, \hat{B}) &:=&
\frac{\varepsilon^{i+1} -
\left(M_{\hat{B}}^{-1}
\left(\begin{array}{c}
A_{\hat{N}} \\
\hline
2D_{\hat{B}, \hat{N}}
\end{array}
\right)
\right)_{x_{i}}\epsilon_{\hat{N}}}{p_{x_{i}}}
\end{eqnarray}
Therefore in order to resolve the tie in the perturbed problem, we have to
compare the polynomials \px{i_{1}}{Q_{2}}{\hat{B}} and
\px{i_{2}}{Q_{2}}{\hat{B}}. Again, because of the term
$\frac{\varepsilon^{i+1}}{p_{x_{i}}}$ we always have either
$\px{i_{1}}{Q_{2}}{\hat{B}} < \px{i_{2}}{Q_{2}}{\hat{B}}$ or
$\px{i_{1}}{Q_{2}}{\hat{B}} > \px{i_{2}}{Q_{2}}{\hat{B}}$.
For ties between a basic variable $x_{i}(0)$ and $\mu_{j}(0)$ becoming zero
in the unperturbed problem, that is
\begin{equation}
\label{eq:tie_unpert_ratio_test_step_2}
\check{\mu}_{j}(0, \hat{B}) = 0
\end{equation}
Therefore, in order to resolve the tie,
given Equation~(\ref{eq:tie_unpert_ratio_test_step_2}), we have to compare
$\check{\mu}_{j}(\varepsilon, \hat{B})$ and the constant zero polynomial,
because of the term $\frac{\varepsilon^{i+1}}{p_{x_{i}}}$ we always have
either $\px{i}{Q_{2}}{\hat{B}} < 0$ or $\px{i}{Q_{2}}{\hat{B}} > 0$.
\subsection{Relaxation of the Definition of a QP-basis}
The only place where Lemma~(\ref{lemma:strict}) is used in the last subsection
is Ratio Test Step~2, where it is used to show that the optimal solution
$x_{\hat{B}}^{*}(\varepsilon, 0)$ to $(UQP(\hat{B}_{\varepsilon}))$
is also optimal to the constrained problem $QP(\hat{B}_{\varepsilon})$.
Since only this direction is needed and Lemma~(\ref{lemma:strict}) still holds
true for this direction if
$x_{\hat{B}}^{*}(\varepsilon, 0) \geq \epsilon_{\hat{B}}$
we can relax the definition of $QP_{\varepsilon}$-basis, that is we
weaken the first requirement on the optimal solution. Since for the
perturbed problem the first requirement of a $QP_{\varepsilon}$-basis is always
met with strict inequality we state the relaxed definition of $QP$-basis for
the unperturbed problem.
\begin{definition}
A subset $B$ of the variables of a quadratic program in standard form defines
a $QP$-basis iff
\begin{enumerate}
\item the unconstrained subproblem
\begin{eqnarray}
\mbox{(UQP($B$))} & minimize & c_{B}^{T}x_{B}
+ x_{B}^{T}D_{B, B}
x_{B} \nonumber\\
\label{eq:QP_basis_feasibility_B}
& s.t. &A_{B} x_{B} = b
\end{eqnarray}
has an unique optimal solution $x_{B}^{*} \geq 0$ and
\item $A_{B}$ has full row rank, $rank(A_{B})=m$.
\end{enumerate}
\end{definition}
\subsection{The lexicographic method in the context of reduced bases}
The polynomials used in the perturbed problem to decide possible ties use
the full basis inverses $M_{B}^{-1}$ and $A_{B}^{-1}$ respectively, since these
are not directly at our disposal, we will express the full basis inverses in
terms of the reduced ones $\check{M}_{B}^{-1}$ and $\check{A}_{B}^{-1}$.
\subsubsection{Expanded basis matrix inverse QP-case}
If the basis heading is given as $\left[C, S_{B}, B_{O}, B_{S} \right]$ the
basis matrix $M_{B}$ has the following form,
\begin{equation*}
\label{def:basis_matrix_form}
M_{B}:=
\left(\begin{array}{c|c|c|c}
0 & 0 & A_{C, B_{O}} & A_{C, B_{S}} \\
\hline
0 & 0 & A_{S_{B}, B_{O}} & A_{S_{B}, B_{S}} \\
\hline
A_{C, B_{O}}^{T} & A_{S_{B}, B_{O}}^{T} & D_{B_{O}, B_{O}}
& D_{B_{O}, B_{S}} \\
\hline
A_{C, B_{S}}^{T} & A_{S_{B}, B_{S}}^{T} & D_{B_{S}, B_{O}}
& D_{B_{S}, B_{S}} \\
\end{array}
\right),
\end{equation*}
since $D_{B_{O}, B_{S}} = D_{B_{S}, B_{O}} = 0$ and
$D_{B_{S}, B_{S}} = 0$ as well as $A_{C, B_{S}}=0$, this boils down to
\begin{equation}
\label{def:basis_matrix}
M_{B}:=
\left(\begin{array}{c|c|c|c}
0 & 0 & A_{C, B_{O}} & 0 \\
\hline
0 & 0 & A_{S_{B}, B_{O}} & A_{S_{B}, B_{S}} \\
\hline
A_{C, B_{O}}^{T} & A_{S_{B}, B_{O}}^{T} & D_{B_{O}, B_{O}}
& 0 \\
\hline
0 & A_{S_{B}, B_{S}}^{T} & 0
& 0 \\
\end{array}
\right).
\end{equation}
Note, that the block $A_{S_{B}, B_{S}}$ is a signed permutation matrix with
$\pm 1$ nonzero entries, such that
\begin{equation}
\label{eq:A_S_BxB_S_inv}
A_{S_{B}, B_{S}}^{T} = A_{S_{B}, B_{S}}^{-1}
\end{equation}
holds, since $A_{S_{B}, B_{S}}$ is orthogonal.
In order to compute the blocks of $M_{B}^{-1}$ in terms of $A$, $D$ and
$\check{M}_{B}^{-1}$ we compare the corresponding
components of
\begin{equation}
\check{M}_{B}^{-1}
\left(\begin{array}{c}
b_{C} \\
\hline
-c_{B_{O}}
\end{array}
\right)
=
\left(\begin{array}{c}
\lambda_{C} \\
\hline
x_{B_{O}}
\end{array}
\right),
\quad
M_{B}^{-1}
\left(\begin{array}{c}
b_{C} \\
\hline
b_{S_{B}} \\
\hline
-c_{B_{O}} \\
\hline
-c_{B_{S}}
\end{array}
\right)
=
\left(\begin{array}{c}
\lambda_{C} \\
\hline
\lambda_{S_{B}} \\
\hline
x_{B_{O}} \\
\hline
x_{B_{S}}
\end{array}
\right)
\end{equation}
for any choice of $b$ and $c$. Note, that $c_{B_{S}}=0$ and by \cite{Sven},
Section 2.4, $\lambda_{S_{B}}=0$.
For the first row of blocks of $M_{B}^{-1}$ we obtain, using $c_{B_{S}}=0$,
\begin{eqnarray*}
\left(\check{M}_{B}^{-1}\right)_{C, C} b_{C}
-\left(\check{M}_{B}^{-1}\right)_{C, B_{O}} c_{B_{O}}
&=&
\left(M_{B}^{-1}\right)_{C, C} b_{C}
+\left(M_{B}^{-1}\right)_{C, S_{B}} b_{S_{B}} - \\
&&
\left(M_{B}^{-1}\right)_{C, B_{O}} c_{B_{O}}
\end{eqnarray*}
which yields
\begin{eqnarray}
\label{eq:M_B_inv_exp_CxC}
\left(M_{B}^{-1}\right)_{C,C} &=& \left(\check{M}_{B}^{-1}\right)_{C,C} \\
\label{eq:M_B_inv_exp_CxS_B}
\left(M_{B}^{-1}\right)_{C,S_{B}} &=& 0 \\
\left(M_{B}^{-1}\right)_{C,B_{O}}&=&\left(\check{M}_{B}^{-1}\right)_{C,B_{O}}
\nonumber
\end{eqnarray}
For the third row of blocks of $M_{B}^{-1}$ we obtain likewise
\begin{eqnarray*}
\left(\check{M}_{B}^{-1}\right)_{B_{O}, C} b_{C}
-\left(\check{M}_{B}^{-1}\right)_{B_{O}, B_{O}} c_{B_{O}}
&=&
\left(M_{B}^{-1}\right)_{B_{O}, C} b_{C}
+\left(M_{B}^{-1}\right)_{B_{O}, S_{B}} b_{S_{B}} - \\
&&
\left(M_{B}^{-1}\right)_{B_{O}, B_{O}} c_{B_{O}}
\end{eqnarray*}
which yields
\begin{eqnarray}
\label{eq:M_B_inv_exp_B_OxC}
\left(M_{B}^{-1}\right)_{B_{O},C} &=&
\left(\check{M}_{B}^{-1}\right)_{B_{O},C} \\
\label{eq:M_B_inv_exp_B_OxS_B}
\left(M_{B}^{-1}\right)_{B_{O},S_{B}} &=& 0 \\
\label{eq:M_B_inv_exp_B_OxB_O}
\left(M_{B}^{-1}\right)_{B_{O},B_{O}}&=&
\left(\check{M}_{B}^{-1}\right)_{B_{O},B_{O}}
\end{eqnarray}
For the fourth row of blocks of $M_{B}^{-1}$ we use the fact that
$A_{S_{B},B_{S}}$ is regular, therefore using
Equation~(\ref{eq:A_S_BxB_S_inv})
\begin{equation}
\label{eq:x_B_S}
x_{B_{S}} = A_{S_{B},B_{S}}^{T}\left(b_{S_{B}} -
A_{S_{B}, B_{O}}x_{B_{O}}\right)
\end{equation}
must hold by definition of the slack variables. Therefore comparing
corresponding components in
\begin{eqnarray*}
\lefteqn{A_{S_{B},B_{S}}^{T}\left(b_{S_{B}} -
A_{S_{B}, B_{O}}
\left[\left(\check{M}_{B}^{-1}\right)_{B_{O}, C}b_{C}
- \left(\check{M}_{B}^{-1}\right)_{B_{O}, B_{O}} c_{B_{O}}\right]\right)
= } \\
& & \quad \quad ,\quad \quad \quad \quad \quad
\left(M_{B}^{-1}\right)_{B_{S}, C} b_{C}
+\left(M_{B}^{-1}\right)_{B_{S}, S_{B}} b_{S_{B}}
-\left(M_{B}^{-1}\right)_{B_{S}, B_{O}} c_{B_{O}}
\end{eqnarray*}
yields
\begin{eqnarray}
\label{eq:M_B_inv_exp_B_SxC}
\left(M_{B}^{-1}\right)_{B_{S}, C} &=&
\alpha\left(\check{M}_{B}^{-1}\right)_{B_{O},C} \\
\label{eq:M_B_inv_exp_B_SxS_B}
\left(M_{B}^{-1}\right)_{B_{S}, S_{B}} &=&
A_{S_{B}, B_{S}}^{T} \\
\label{eq:M_B_inv_exp_B_SxB_O}
\left(M_{B}^{-1}\right)_{B_{S}, B_{O}} &=&
\alpha\left(\check{M}_{B}^{-1}\right)_{B_{O},B_{O}}
\end{eqnarray}
if we define $\alpha$ as
\begin{equation}
\label{def:alpha}
\alpha := -A_{S_{B}, B_{S}}^{T}A_{S_{B}, B_{O}}
\end{equation}
For the second row we take into account that $\lambda_{S_{B}}=0$
\begin{equation*}
\left(M_{B}^{-1}\right)_{S_{B}, C} b_{C}
+\left(M_{B}^{-1}\right)_{S_{B}, S_{B}} b_{S_{B}}
-\left(M_{B}^{-1}\right)_{S_{B}, B_{O}} c_{B_{O}}
= 0
\end{equation*}
this yields, using Equations~(\ref{eq:M_B_inv_exp_CxS_B})
and~(\ref{eq:M_B_inv_exp_B_OxS_B}) and the fact that $M_{B}^{-1}$ is symmetric,
\begin{equation}
\label{eq:M_B_inv_exp_S_BxS_B}
\left(M_{B}^{-1}\right)_{S_{B}, S_{B}} = 0
\end{equation}
In order to obtain the last yet unknown block
$\left(M_{B}^{-1}\right)_{B_{S}, B_{S}}$
we multiply the last row of blocks of $M_{B}^{-1}$ by
the second column of blocks of $M_{B}$ yielding
\begin{equation*}
\alpha\left(\check{M}_{B}^{-1}\right)_{B_{O}, B_{O}} A_{S_{B}, B_{O}}^{T}
+ \left(M_{B}^{-1}\right)_{B_{S}, B_{S}} A_{S_{B}, B_{S}}^{T}
= 0
\end{equation*}
using Equation~(\ref{eq:A_S_BxB_S_inv}) and
Definition~(\ref{def:alpha}) we obtain
\begin{equation}
\label{eq:M_B_inv_exp_B_SxB_S}
\left(M_{B}^{-1}\right)_{B_{S}, B_{S}} =
\alpha\left(\check{M}_{B}^{-1}\right)_{B_{O}, B_{O}}\alpha^{T}
\end{equation}
Collecting the different blocks, given by
Equations~(\ref{eq:M_B_inv_exp_CxC}), (\ref{eq:M_B_inv_exp_CxS_B}),
(\ref{eq:M_B_inv_exp_S_BxS_B}), (\ref{eq:M_B_inv_exp_B_OxC}),
(\ref{eq:M_B_inv_exp_B_OxS_B}), (\ref{eq:M_B_inv_exp_B_OxB_O}),
(\ref{eq:M_B_inv_exp_B_SxC}), (\ref{eq:M_B_inv_exp_B_SxS_B}),
(\ref{eq:M_B_inv_exp_B_SxB_O}), and~(\ref{eq:M_B_inv_exp_B_SxB_S}),
and taking into account Equation~(\ref{eq:A_S_BxB_S_inv}),
we can finally express $M_{B}^{-1}$ in terms
of $\check{M}_{B}^{-1}$, $A$ and $D$, given the basis heading
$\left[C, S_{B}, B_{O}, B_{S}\right]$, as
\begin{equation}
\label{eq:M_B_inv_exp}
M_{B}^{-1}=
\left(\begin{array}{c|c|c|c}
\left(\check{M}_{B}^{-1}\right)_{C,C} &
0 &
\left(\check{M}_{B}^{-1}\right)_{C,B_{O}} &
\left(\check{M}_{B}^{-1}\right)_{C, B_{O}}\alpha^{T} \\
\hline
0 &
0 &
0 &
A_{S_{B},B_{S}} \\
\hline
\left(\check{M}_{B}^{-1}\right)_{B_{O}, C} &
0 &
\left(\check{M}_{B}^{-1}\right)_{B_{O}, B_{O}} &
\left(\check{M}_{B}^{-1}\right)_{B_{O},B_{O}}\alpha^{T} \\
\hline
\alpha\left(\check{M}_{B}^{-1}\right)_{B_{O},
C} &
A_{S_{B}, B_{S}}^{T} &
\alpha\left(\check{M}_{B}^{-1}\right)_{B_{O},
B_{O}} &
\alpha\left(\check{M}_{B}^{-1}\right)_{B_{O}, B_{O}}\alpha^{T}
\end{array}
\right)
\end{equation}
where
\begin{equation*}
\alpha=-A_{S_{B}, B_{S}}^{T}A_{S_{B}, B_{O}}
\end{equation*}
\subsubsection{Expanded basis matrix inverse LP-case}
If the basis heading is given as $\left[B_{O}, B_{S} \right]$ the
basis matrix $A_{B}$ has the following form,
\begin{equation}
A_{B}:=
\left(\begin{array}{c|c}
A_{C, B_{O}} & A_{C, B_{S}} \\
\hline
A_{S_{B}, B_{O}} & A_{S_{B}, B_{S}}
\end{array}
\right).
\end{equation}
Again, we compare corresponding components of
\begin{equation}
\check{A}_{B}^{-1}b_{C}, \quad \quad
A_{B}^{-1}
\left(\begin{array}{c}
b_{C} \\
\hline
b_{S_{B}}
\end{array}
\right)
\end{equation}
For the first row of blocks of $A_{B}^{-1}$ we obtain,
\begin{equation*}
\left(A_{B}^{-1}\right)_{C, B_{O}}b_{C}
+ \left(A_{B}^{-1}\right)_{C, B_{S}}b_{S_{B}} = \check{A}_{B}^{-1}b_{C}
\end{equation*}
which yields,
\begin{eqnarray}
\label{eq:A_B_inv_exp_CxB_O}
\left(A_{B}^{-1}\right)_{C, B_{O}} &=&\check{A}_{B}^{-1} \\
\label{eq:A_B_inv_exp_CxB_S}
\left(A_{B}^{-1}\right)_{C, B_{S}} &=& 0
\end{eqnarray}
For the second row of blocks of $A_{B}^{-1}$ we obtain,
using Equation~(\ref{eq:x_B_S}),
\begin{equation*}
\left(A_{B}^{-1}\right)_{S_{B}, B_{O}} b_{C}
+ \left(A_{B}^{-1}\right)_{S_{B}, B_{S}} b_{S_{B}}
=
A_{S_{B}, B_{S}}^{T}\left(b_{S_{B}} - A_{S_{B}, B_{O}}
\check{A}_{B}^{-1} b_{C} \right)
\end{equation*}
which yields
\begin{eqnarray}
\label{eq:A_B_inv_exp_S_BxB_O}
\left(A_{B}^{-1}\right)_{S_{B}, B_{O}}
&=&
-A_{S_{B}, B_{S}}^{T}A_{S_{B}, B_{O}}\check{A}_{B}^{-1} \\
\label{eq:A_B_inv_exp_S_BxB_S}
\left(A_{B}^{-1}\right)_{S_{B}, B_{S}}
&=&
A_{S_{B}, B_{S}}^{T}
\end{eqnarray}
Collecting the blocks given by Equations~(\ref{eq:A_B_inv_exp_CxB_O}),
(\ref{eq:A_B_inv_exp_CxB_S}), (\ref{eq:A_B_inv_exp_S_BxB_O})
and~(\ref{eq:A_B_inv_exp_S_BxB_S}) and using
Definition~(\ref{def:alpha}) we obtain $A_{B}^{-1}$ in terms of
$\check{A}_{B}^{-1}$ and $A$ as
\begin{equation}
\label{eq:A_B_inv_exp}
A_{B}^{-1}=
\left(\begin{array}{c|c}
\check{A}_{B}^{-1} & 0 \\
\hline
\alpha\check{A}_{B}^{-1} & A_{S_{B}, B_{S}}^{T}
\end{array}
\right)
\end{equation}
where
\begin{equation*}
\alpha = -A_{S_{B}, B_{S}}^{T}A_{S_{B}, B_{O}}
\end{equation*}
\section{Evaluating coefficients of the tie breaking polynomials}
In order to compute coefficients of the tie breaking polynomials we will need to
reference entries of the block matrices involved, for technical reasons these
block matrices may be permuted. We therefore introduce the following
permutations
\begin{equation}
\beta_{O}: B_{O} \rightarrow B_{O}, \quad
\beta_{S}: B_{S} \rightarrow B_{S}
\end{equation}
\begin{equation}
\gamma_{C}: E \cup S_{N} \rightarrow E \cup S_{N}, \quad
\gamma_{S_{B}}: S_{B} \rightarrow S_{B}
\end{equation}
where the permutations are defined with respect to the headings in ascending
order.
An expression that will be often encountered in the next subsections is the
following one, given $i \in B_{S}$ we want to compute
$(\alpha)_{\beta_{S}(i)}$ as a subexpression,
\begin{eqnarray}
\left(\alpha\right)_{\beta_{S}(i)}
&=&
-\left(A_{S_{B}, B_{S}}^{T}\right)_{\beta_{S}(i)}A_{S_{B}, B_{O}}
\nonumber \\
&=&
-A_{\sigma(i), i}\left(A_{S_{B}, B_{O}}\right)_{\gamma_{S_{B}}(\sigma(i))}
\nonumber \\
&=&
\label{eq:alpha_beta_S}
-A_{\sigma(i), i}A_{\sigma(i), B_{O}}
\end{eqnarray}
\subsection{PhaseI}
According to Section~\ref{sec:Res_ties_phaseI} the polynomials
$\tilde{p}_{x_{i}}^{(L)}(\varepsilon, B)$ and
$\tilde{p}_{x_{i}}^{(Q)}(\varepsilon, B)$ are equal such that considering
\begin{equation}
\label{eq:p_x_i_tilde_ref}
\tilde{p}_{x_{i}}^{(L)}\left(\varepsilon, B\right) :=
\frac{\varepsilon^{i+1}
- \left(A_{B}^{-1}\tilde{A}_{N}\right)_{x_{i}}
\epsilon_{N}}{q_{x_{i}}}
=
\frac{\varepsilon^{i+1}
- \left(A_{B}^{-1}\tilde{A}_{\hat{N}}\right)_{x_{i}}
\epsilon_{\hat{N}}}{q_{x_{i}}}
+ \varepsilon^{j+1}
\end{equation}
in light of Equation~(\ref{eq:A_B_inv_exp})
will be sufficient for the LP-case as well as the QP-case
in phaseI when evaluating coefficients.
We will only consider the coefficients of
$\varepsilon^{k+1}$ for $k \in \hat{N}= N \setminus \{j\}$ in
$\tilde{p}_{x_{i}}^{(L)}(\varepsilon, B)$,
since the other cases $k=j$ and $k \in B$
are trivial. Furthermore we only evaluate entities that are not evaluated in the
unperturbed problem, such that, taking into account the
Definition~(\ref{def:epsilon}) of $\epsilon$, we merely consider
the subexpression
\begin{equation}
\label{def:tilde_n_p_x_i_L}
\tilde{n}_{x_{i}}^{(L)}(B)[i,k]:=
(A_{B}^{-1}\tilde{A}_{k})_{x_{i}}
\end{equation}
The correct value
of the coefficient is obtained by scaling the subexpression with the factor
$q_{x_{i}}^{-1}$.
We will distinguish the two
possibilities $i \in B_{O}$ and $i \in B_{S}$ for $x_{i}$ as well as the
various possibilities for $k \in \hat{N}$ in phaseI.
\subsubsection{LP/QP-case: $\tilde{p}_{x_{i}}^{(L)}(\varepsilon, B)$}
\paragraph{$\mathbf{i \in B_{O}}$:}
Assuming $i \in B_{O}$ we distinguish
according to Equation~(\ref{eq:A_B_inv_exp})
and the definition of $\tilde{p}_{x_{i}}^{(L)}(\varepsilon, B)$ the
following cases for $k \in \hat{N}$:
\begin{enumerate}
\item $k \in \hat{N} \cap S$:
\begin{eqnarray}
(A_{B}^{-1}\tilde{A}_{k})_{x_{i}} &=&
(A_{B}^{-1})_{x_{i}, C \cup S_{B}}\tilde{A}_{C \cup S_{B}, k}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{A}_{C, k} +
(A_{B}^{-1})_{x_{i}, S_{B}}\tilde{A}_{S_{B}, k}
\nonumber \\
&&\begin{minipage}{9cm}
because of $\tilde{A}_{S_{B},k}=0$ for $k \in \hat{N} \cap S$ this yields
\end{minipage}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{A}_{C, k}
\nonumber \\
&=&
\left(\check{A}_{B}^{-1}\right)_{\beta_{O}(i),C}\tilde{A}_{C, k}
\nonumber \\
&=&
\left(\check{A}_{B}^{-1}\right)_{\beta_{O}(i), \gamma_{C}(\sigma(k))}
\tilde{A}_{\sigma(k), k}
\end{eqnarray}
\item $k \in \hat{N} \cap art \setminus \{\tilde{a}^{s}\}$:
\begin{eqnarray}
(A_{B}^{-1}\tilde{A}_{k})_{x_{i}} &=&
(A_{B}^{-1})_{x_{i}, C \cup S_{B}}\tilde{A}_{C \cup S_{B}, k}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{A}_{C, k} +
(A_{B}^{-1})_{x_{i}, S_{B}}\tilde{A}_{S_{B}, k}
\nonumber \\
&&\begin{minipage}{9cm}
because of $\tilde{A}_{S_{B},k}=0$ for
$k \in \hat{N} \cap art \setminus \{\tilde{a}^{s}\}$ this yields
\end{minipage}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{A}_{C, k}
\nonumber \\
&=&
\left(\check{A}_{B}^{-1}\right)_{\beta_{O}(i), C}\tilde{A}_{C, k}
\nonumber \\
&=&
\left(\check{A}_{B}^{-1}\right)_{\beta_{O}(i), \gamma_{C}(\sigma(k))}
\tilde{A}_{\sigma(k), k}
\end{eqnarray}
\item $\tilde{A}_{k}=\tilde{a}^{s}$:
\begin{eqnarray}
(A_{B}^{-1}\tilde{A}_{k})_{x_{i}} &=&
(A_{B}^{-1})_{x_{i}, C \cup S_{B}} \tilde{a}_{C \cup S_{B}}^{s}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{a}_{C}^{s} +
(A_{B}^{-1})_{x_{i}, S_{B}}\tilde{a}_{S_{B}}^{s}
\nonumber \\
&&\begin{minipage}{9cm}
by Equation~(\ref{eq:A_B_inv_exp})
$\left(A_{B}^{-1}\right)_{B_{O}, S_{B}}=0$ holds, so
\end{minipage}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{a}_{C}^{s}
\nonumber \\
&=&
\left(\check{A}_{B}^{-1}\right)_{\beta_{O}(i),C}\tilde{a}_{C}^{s}
\nonumber \\
&=&
\left(\check{A}_{B}^{-1}\right)_{\beta_{O}(i), E}\tilde{a}_{E}^{s}
+\left(\check{A}_{B}^{-1}\right)_{\beta_{O}(i), S_{N}}\tilde{a}_{S_{N}}^{s}
\nonumber \\
&&\begin{minipage}{9cm}
because of $\tilde{a}^{s}_{E}=0$ this yields
\end{minipage}
\nonumber \\
&=&
\left(\check{A}_{B}^{-1}\right)_{\beta_{O}(i), S_{N}}\tilde{a}_{S_N}^{s}
\end{eqnarray}
\item $k \in \hat{N} \cap O$:
\begin{eqnarray}
(A_{B}^{-1}\tilde{A}_{k})_{x_{i}} &=&
(A_{B}^{-1})_{x_{i}, C \cup S_{B}}\tilde{A}_{C \cup S_{B}, k}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{A}_{C, k} +
(A_{B}^{-1})_{x_{i}, S_{B}}\tilde{A}_{S_{B}, k}
\nonumber \\
&&\begin{minipage}{9cm}
by Equation~(\ref{eq:A_B_inv_exp})
$\left(A_{B}^{-1}\right)_{B_{O}, S_{B}}=0$ holds, so
\end{minipage}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{A}_{C, k}
\nonumber \\
&=&
\left(\check{A}_{B}^{-1}\right)_{\beta_{O}(i),C}\tilde{A}_{C,k}
\end{eqnarray}
\end{enumerate}
\paragraph{$\mathbf{i \in B_{S}}$:}
Assuming $i \in B_{S}$ we distinguish
according to Equations~(\ref{eq:A_B_inv_exp})
and the definition of $\tilde{p}_{x_{i}}^{(L)}(\varepsilon, B)$
the following cases for $k \in \hat{N}$:
\begin{enumerate}
\item $k \in \hat{N} \cap S$:
\begin{eqnarray}
(A_{B}^{-1}\tilde{A}_{k})_{x_{i}} &=&
(A_{B}^{-1})_{x_{i}, C \cup S_{B}}\tilde{A}_{C \cup S_{B}, k}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{A}_{C, k} +
(A_{B}^{-1})_{x_{i}, S_{B}}\tilde{A}_{S_{B}, k}
\nonumber \\
&&\begin{minipage}{9cm}
because of $\tilde{A}_{S_{B}, k}=0$ for $k \in \hat{N} \cap S$ this yields
\end{minipage}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{A}_{C, k}
\nonumber \\
&=&
\left(\alpha\check{A}_{B}^{-1}\right)_{\beta_{S}(i), C}\tilde{A}_{C, k}
\nonumber \\
&=&
\left(\alpha\check{A}_{B}^{-1}\right)_{\beta_{S}(i), \gamma_{C}(\sigma(k))}
\tilde{A}_{\sigma(k), k}
\nonumber \\
&=&
\left(\alpha\right)_{\beta_{S}(i)}
\left(\check{A}_{B}^{-1}\right)_{\bullet, \gamma_{C}(\sigma(k))}
\tilde{A}_{\sigma(k), k}
\nonumber
\end{eqnarray}
Using Equation~(\ref{eq:alpha_beta_S}) we obtain
\begin{equation}
(A_{B}^{-1}\tilde{A}_{k})_{x_{i}} =
-\tilde{A}_{\sigma(i), i}\tilde{A}_{\sigma(i), B_{O}}
\left(\check{A}_{B}^{-1}\right)_{\bullet, \gamma_{C}(\sigma(k))}
\tilde{A}_{\sigma(k), k}
\end{equation}
\item $k \in \hat{N} \cap art \setminus \{\tilde{a}^{s}\}$:
\begin{eqnarray}
(A_{B}^{-1}\tilde{A}_{k})_{x_{i}} &=&
(A_{B}^{-1})_{x_{i}, C \cup S_{B}}\tilde{A}_{C \cup S_{B}, k}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{A}_{C, k} +
(A_{B}^{-1})_{x_{i}, S_{B}}\tilde{A}_{S_{B}, k}
\nonumber \\
&&\begin{minipage}{9cm}
because of $\tilde{A}_{S_{B}, k}=0$ for
$k \in \hat{N} \cap art \setminus \{ \tilde{a}^{s} \}$ this yields
\end{minipage}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{A}_{C, k}
\nonumber \\
&=&
\left(\alpha\check{A}_{B}^{-1}\right)_{\beta_{S}(i),C}\tilde{A}_{C, k}
\nonumber \\
&=&
\left(\alpha\check{A}_{B}^{-1}\right)_{\beta_{S}(i), \gamma_{C}(\sigma(k))}
\tilde{A}_{\sigma(k), k}
\nonumber \\
&=&
\left(\alpha\right)_{\beta_{S}(i)}
\left(\check{A}_{B}^{-1}\right)_{\bullet, \gamma_{C}(\sigma(k))}
\tilde{A}_{\sigma(k), k}
\nonumber
\end{eqnarray}
Using Equation~(\ref{eq:alpha_beta_S}) we obtain
\begin{equation}
(A_{B}^{-1}\tilde{A}_{k})_{x_{i}} =
-\tilde{A}_{\sigma(i), i}\tilde{A}_{\sigma(i), B_{O}}
\left(\check{A}_{B}^{-1}\right)_{\bullet, \gamma_{C}(\sigma(k))}
\tilde{A}_{\sigma(k), k}
\end{equation}
\item $\tilde{A}_{k}=\tilde{a}^{s}$:
\begin{eqnarray}
(A_{B}^{-1}\tilde{A}_{k})_{x_{i}} &=&
(A_{B}^{-1})_{x_{i}, C \cup S_{B}}\tilde{a}_{C \cup S_{B}}^{s}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{a}_{C}^{s} +
(A_{B}^{-1})_{x_{i}, S_{B}}\tilde{a}_{S_{B}}^{s}
\nonumber \\
&=&
(\alpha\check{A}_{B}^{-1})_{\beta_{S}(i),C}\tilde{a}_{C}^{s}
+(A_{S_{B}, B_{S}}^{T})_{\beta_{S}(i), S_{B}}\tilde{a}_{S_{B}}^{s}
\nonumber \\
&=&
(\alpha\check{A}_{B}^{-1})_{\beta_{S}(i), E}\tilde{a}_{E}^{s}
+(\alpha\check{A}_{B}^{-1})_{\beta_{S}(i), S_{N}}\tilde{a}_{S_{N}}^{s}
+ \tilde{A}_{\sigma(i), i}\tilde{a}_{\gamma_{S_{B}}(\sigma(i))}^{s}
\nonumber \\
&&\begin{minipage}{9cm}
because of $\tilde{a}_{E}^{s}=0$ this yields
\end{minipage}
\nonumber \\
&=&
(\alpha\check{A}_{B}^{-1})_{\beta_{S}(i), S_{N}}\tilde{a}_{S_{N}}^{s}
+ \tilde{A}_{\sigma(i), i}\tilde{a}_{\gamma_{S_{B}}(\sigma(i))}^{s}
\nonumber
\end{eqnarray}
Using Equation~(\ref{eq:alpha_beta_S}) we obtain
\begin{equation}
(A_{B}^{-1}\tilde{A}_{k})_{x_{i}} =
-\tilde{A}_{\sigma(i), i}\tilde{A}_{\sigma(i), B_{O}}
\left(\check{A}_{B}^{-1}\right)_{\bullet, S_{N}}\tilde{a}_{S_{N}}^{s}
+\tilde{A}_{\sigma(i), i}\tilde{a}_{\gamma_{S_{B}}(\sigma(i))}^{s}
\end{equation}
\item $k \in \hat{N} \cap O$:
\begin{eqnarray}
(A_{B}^{-1}\tilde{A}_{k})_{x_{i}} &=&
(A_{B}^{-1})_{x_{i}, C \cup S_{B}}\tilde{A}_{C \cup S_{B}, k}
\nonumber \\
&=&
(A_{B}^{-1})_{x_{i}, C}\tilde{A}_{C, k} +
(A_{B}^{-1})_{x_{i}, S_{B}}\tilde{A}_{S_{B}, k}
\nonumber \\
&=&
(\alpha\check{A}_{B}^{-1})_{\beta_{S}(i),C}\tilde{A}_{C, k}
+ (A_{S_{B}, B_{S}}^{T})_{\beta_{S}(i), S_{B}}\tilde{A}_{S_{B}, k}
\nonumber \\
&=&
\left(\alpha\right)_{\beta_{S}(i)}\check{A}_{B}^{-1}\tilde{A}_{C,k}
+\tilde{A}_{\sigma(i),i}\tilde{A}_{\gamma_{S_{B}}(\sigma(i)),k}
\nonumber
\end{eqnarray}
Using Equation~(\ref{eq:alpha_beta_S}) we obtain
\begin{equation}
(A_{B}^{-1}\tilde{A}_{k})_{x_{i}} =
-\tilde{A}_{\sigma(i), i}\tilde{A}_{\sigma(i), B_{O}}
\check{A}_{B}^{-1}\tilde{A}_{C, k}
+\tilde{A}_{\sigma(i), i}\tilde{A}_{\gamma{S_{B}}(\sigma(i)), k}
\end{equation}
\end{enumerate}
\subsection{PhaseII}
According to Sections~\ref{sec:Ties_ratio_test_step_1}
and~\ref{sec:Ties_ratio_test_step_2} we have in the QP-case
to consider the polynomials $p_{x_{i}}^{(Q_{1})}(\varepsilon, B)$,
\pmu{Q_{1}}{B} and
$p_{x_{i}}^{(Q_{2})}(\varepsilon, \hat{B})$ in light of
Equation~(\ref{eq:M_B_inv_exp}). The LP-case in phaseII can be omitted,
since only LP-type ties can occur in the LP-case
and these have already been treated in the last section.
\subsubsection{Ratio Test Step 1:
$\px{i}{Q_{1}}{B}$}
For ease of reference, we restate the Definition~(\ref{def:p_x_i_Q_1}),
of $p_{x_{i}}^{(Q_{1})}(\varepsilon, B)$
\begin{eqnarray*}
p_{x_{i}}^{(Q_{1})}\left(\varepsilon, B\right) &:=&
\frac{\varepsilon^{i+1} -
\left(M_{B}^{-1}
\left(\begin{array}{c}
A_{N} \\
\hline
2D_{B, N}
\end{array}
\right)
\right)_{x_{i}}\epsilon_{N}}{q_{x_{i}}}
\\
&=&
\frac{\varepsilon^{i+1} -
\left(M_{B}^{-1}
\left(\begin{array}{c}
A_{N \setminus \{j\}} \\
\hline
2D_{B, N \setminus \{j\}}
\end{array}
\right)
\right)_{x_{i}}\epsilon_{N \setminus \{j\}}}{q_{x_{i}}}
+ \varepsilon^{j+1}
\end{eqnarray*}
Again, we will only consider the coefficients of
$\varepsilon^{k+1}$ for $k \in \hat{N}= N \setminus \{j\}$ in
$p_{x_{i}}^{(Q_{1})}(\varepsilon, B)$, since the other cases $k=j$ and $k \in B$
are trivial. Furthermore we only evaluate entities that are not evaluated in the
unperturbed problem, such that, taking into account the
Definition~(\ref{def:epsilon}) of $\epsilon$, we merely consider
the subexpression
\begin{equation}
\label{def:n_x_i_Q_1}
n_{x_{i}}^{(Q_{1})}(B)[i,k]:=
\left(M_{B}^{-1}
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{B,k}
\end{array}
\right)
\right)_{x_{i}}
\end{equation}
The correct value of the coefficient is obtained by scaling the subexpression
with the factor $q_{x_{i}}^{-1}$.
According to Equation~(\ref{eq:M_B_inv_exp}) and the definition of
$p_{x_{i}}^{(Q_{1})}(\varepsilon, B)$ we distinguish $i \in B_{O}$ and
$i \in B_{S}$.
\paragraph{$\mathbf{i \in B_{O}}$:}
Assuming $i \in B_{O}$ we distinguish
according to Definition of $p_{x_{i}}^{(Q_{1})}(\varepsilon, B)$
the following cases for $k \in \hat{N}$:
\begin{enumerate}
\item $k \in \hat{N} \cap S$:
\begin{eqnarray}
\label{eq:r1_i_B_O_k_N_S}
\lefteqn{\left(M_{B}^{-1}
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{B, k}
\end{array}
\right)
\right)_{x_{i}}
=} \nonumber \\
&&
\left(M_{B}^{-1}\right)_{x_{i}, C \cup S_{B}}A_{C \cup S_{B}, k}
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{O} \cup B_{S}}D_{B_{O} \cup B_{S}, k}
\nonumber \\
&=&
\left(M_{B}^{-1}\right)_{x_{i}, C}A_{C, k}
+\left(M_{B}^{-1}\right)_{x_{i}, S_{B}}A_{S_{B}, k}
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{O}}D_{B_{O}, k}
\nonumber \\
&&
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{S}}D_{B_{S}, k}
\nonumber \\
&&\begin{minipage}{9cm}
by Equation~(\ref{eq:M_B_inv_exp}) $\left(M_{B}^{-1}\right)_{B_{O}, S_{B}}=0$,
and because of $D_{B_{O}, k}=0$ for $k \in N \cap S$
and $D_{B_{S},k}=0$ this yields
\end{minipage}
\nonumber \\
&=&
\left(M_{B}^{-1}\right)_{x_{i}, C}A_{C, k}
\nonumber \\
&=&
\left(\check{M}_{B}^{-1}\right)_{\beta_{O}(i),C}A_{C, k}
\nonumber \\
&=&
\left(\check{M}_{B}^{-1}\right)_{\beta_{O}(i), \gamma_{C}(\sigma(k))}
A_{\sigma(k), k}
\end{eqnarray}
\item $k \in \hat{N} \cap O$:
\begin{eqnarray}
\label{eq:r1_i_B_O_k_N_O}
\lefteqn{\left(M_{B}^{-1}
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{B, k}
\end{array}
\right)
\right)_{x_{i}}
=} \nonumber \\
&&
\left(M_{B}^{-1}\right)_{x_{i}, C \cup S_{B}}A_{C \cup S_{B}, k}
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{O} \cup B_{S}}D_{B_{O} \cup B_{S}, k}
\nonumber \\
&=&
\left(M_{B}^{-1}\right)_{x_{i}, C}A_{C, k}
+\left(M_{B}^{-1}\right)_{x_{i}, S_{B}}A_{S_{B}, k}
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{O}}D_{B_{O}, k}
\nonumber \\
&&
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{S}}D_{B_{S}, k}
\nonumber \\
&&\begin{minipage}{9cm}
by Equation~(\ref{eq:M_B_inv_exp}) $\left(M_{B}^{-1}\right)_{B_{O}, S_{B}}=0$
and because of $D_{B_{S},k}=0$ this yields
\end{minipage}
\nonumber \\
&=&
\left(M_{B}^{-1}\right)_{x_{i}, C}A_{C, k}
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{O}}D_{B_{O}, k}
\nonumber \\
&=&
\left(\check{M}_{B}^{-1}\right)_{\beta_{O}(i),C}A_{C,k}
+2\left(\check{M}_{B}^{-1}\right)_{\beta_{O}(i), B_{O}}D_{B_{O}, k}
\nonumber \\
&=&
\left(\check{M}_{B}^{-1}\right)_{\beta_{O}(i)}
\left(\begin{array}{c}
A_{C, k} \\
\hline
2D_{B_{O}, k}
\end{array}
\right)
\end{eqnarray}
\end{enumerate}
\paragraph{$\mathbf{i \in B_{S}}$:}
Assuming $i \in B_{S}$ we distinguish
according to Definition of $p_{x_{i}}^{(Q_{1})}(\varepsilon, B)$
the following two cases for $k \in \hat{N}$:
\begin{enumerate}
\item $k \in \hat{N} \cap S$:
\begin{eqnarray}
\label{eq:r1_i_B_S_k_N_S}
\lefteqn{\left(M_{B}^{-1}
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{B, k}
\end{array}
\right)
\right)_{x_{i}}
=} \nonumber \\
&&
\left(M_{B}^{-1}\right)_{x_{i}, C \cup S_{B}}A_{C \cup S_{B}, k}
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{O} \cup B_{S}}D_{B_{O} \cup B_{S}, k}
\nonumber \\
&=&
\left(M_{B}^{-1}\right)_{x_{i}, C}A_{C, k}
+\left(M_{B}^{-1}\right)_{x_{i}, S_{B}}A_{S_{B}, k}
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{O}}D_{B_{O}, k}
\nonumber \\
&&
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{S}}D_{B_{S}, k}
\nonumber \\
&&\begin{minipage}{9cm}
because of $A_{S_{B},k}=0$ and $D_{B_{O}, k}=0$ for
$k \in N \cap S$ and $D_{B_{S}, k}=0$ this yields
\end{minipage}
\nonumber \\
&=&
\left(M_{B}^{-1}\right)_{x_{i}, C}A_{C, k}
\nonumber \\
&=&
\left(\check{M}_{B}^{-1}\right)_{\beta_{S}(i), C}A_{C, k}
\nonumber \\
&=&
\left(\check{M}_{B}^{-1}\right)_{\beta_{S}(i), \gamma_{C}(\sigma(k))}
A_{\sigma(k), k}
\end{eqnarray}
\item $k \in \hat{N} \cap O$:
\begin{eqnarray}
\lefteqn{\left(M_{B}^{-1}
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{B, k}
\end{array}
\right)
\right)_{x_{i}}
=} \nonumber \\
&&
\left(M_{B}^{-1}\right)_{x_{i}, C \cup S_{B}}A_{C \cup S_{B}, k}
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{O} \cup B_{S}}D_{B_{O} \cup B_{S}, k}
\nonumber \\
&=&
\left(M_{B}^{-1}\right)_{x_{i}, C}A_{C, k}
+\left(M_{B}^{-1}\right)_{x_{i}, S_{B}}A_{S_{B}, k}
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{O}}D_{B_{O}, k}
\nonumber \\
&&
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{S}}D_{B_{S}, k}
\nonumber \\
&&\begin{minipage}{8cm}
because of $D_{B_{S}, k}=0$ this yields
\end{minipage}
\nonumber \\
&=&
\left(M_{B}^{-1}\right)_{x_{i}, C}A_{C, k}
+\left(M_{B}^{-1}\right)_{x_{i}, S_{B}}A_{S_{B}, k}
+2\left(M_{B}^{-1}\right)_{x_{i}, B_{O}}D_{B_{O}, k}
\nonumber \\
&=&
\left(\alpha\left(\check{M}_{B}^{-1}\right)_{B_{O}, C}\right)_{\beta_{S}(i)}
A_{C, k}
+\left(A_{S_{B}, B_{S}}^{T}\right)_{\beta_{S}(i)}A_{S_{B},k}
\nonumber \\
&&
+2\left(\alpha\left(\check{M}_{B}^{-1}\right)_{B_{O}, B_{O}}\right)_{
\beta_{S}(i)}D_{B_{O},k}
\nonumber \\
&=&
\left(\alpha\right)_{\beta_{S}(i)}\left(\check{M}_{B}^{-1}\right)_{B_{O}, C}
A_{C, k}
+2\left(\alpha\right)_{\beta_{S}(i)}\left(\check{M}_{B}^{-1}\right)_{B_{O},
B_{O}}D_{B_{O}, k}
\nonumber \\
&&
+A_{\sigma(i), i}A_{\gamma_{S_{B}}(\sigma(i)), k}
\nonumber \\
&=&
\left(\alpha\right)_{\beta_{S}(i)}\left(\check{M}_{B}^{-1}\right)_{B_{O}}
\left(\begin{array}{c}
A_{C,k} \\
\hline
2D_{B_{O},k}
\end{array}
\right)
+A_{\sigma(i), i}A_{\gamma_{S_{B}}(\sigma(i)), k}
\nonumber
\end{eqnarray}
Using Equation~(\ref{eq:alpha_beta_S}) we obtain
\begin{eqnarray}
\label{eq:r1_i_B_S_k_N_O}
\left(M_{B}^{-1}
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{B, k}
\end{array}
\right)
\right)_{x_{i}}
&=&
-A_{\sigma(i),i}A_{\sigma(i), B_{O}}\left(\check{M}_{B}^{-1}\right)_{B_{O}}
\left(\begin{array}{c}
A_{C,k} \\
\hline
2D_{B_{O},k}
\end{array}
\right)
\nonumber \\
&&
+A_{\sigma(i), i}A_{\gamma_{S_{B}}(\sigma(i)), k}
\end{eqnarray}
\end{enumerate}
\subsubsection{Ratio Test Step 1:
\pmu{Q_{1}}{B}}
For ease of reference, we restate the Definition~(\ref{def:p_mu_j_Q_1}),
of \pmu{Q_{1}}{B}
\begin{eqnarray*}
\pmu{Q_{1}}{B} &:=&
-\frac{2D_{j, N \setminus \{j\}} -
\left(A_{j}^{T} \left| \right. 2D_{B, j}^{T} \right)
M_{B}^{-1}
\left(\begin{array}{c}
A_{N \setminus \{j\}} \\
\hline
2D_{B,N \setminus \{j\}}
\end{array}
\right)}{\nu}
\epsilon_{N \setminus \{j\}}
\\
&&
+\varepsilon^{j+1}
\\
&=&
-\frac{2D_{j, N \setminus \{j\}} -
\left(q_{\lambda}^{T} \left| \right. q_{x}^{T} \right)
\left(\begin{array}{c}
A_{N \setminus \{j\}} \\
\hline
2D_{B,N \setminus \{j\}}
\end{array}
\right)}{\nu}
\epsilon_{N \setminus \{j\}}
+\varepsilon^{j+1}
\end{eqnarray*}
Again, we will only consider the coefficients of
$\varepsilon^{k+1}$ for $k \in \hat{N}= N \setminus \{j\}$ in
\pmu{Q_{1}}{B}, since the other cases $k=j$ and $k \in B$
are trivial. Furthermore we only evaluate entities that are not evaluated in the
unperturbed problem, such that, taking into account the
Definition~(\ref{def:epsilon}) of $\epsilon$
and the fact that $j$ and $B$ remain constant during an iteration of Ratio Test
Step~1 of a given pivot step,
we merely consider the subexpression
\begin{equation}
\label{def:n_mu_j_Q_1}
n_{\mu_{j}}^{(Q_{1})}(B)[j,k]:=
2D_{j, k} -
\left(q_{\lambda}^{T} \left| \right. q_{x}^{T} \right)
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{B,k}
\end{array}
\right)
\end{equation}
The correct value of the coefficient is obtained by scaling the subexpression
with the factor $\nu^{-1}$.
We shall first compute the different components of
$\left(q_{\lambda}^{T} \left|\right. q_{x}^{T}\right)$ in terms of
$\check{M}_{B}^{-1}$.
\begin{eqnarray}
\label{eq:q_C}
q_{\lambda_{C}}
&=&
\left(M_{B}^{-1}\right)_{C,C}A_{C,j}
+\left(M_{B}^{-1}\right)_{C, S_{B}}A_{S_{B}, j}
+2\left(M_{B}^{-1}\right)_{C, B_{O}}D_{B_{O}, j}
\nonumber \\
&&
+2\left(M_{B}^{-1}\right)_{C, B_{S}}D_{B_{S}, j}
\nonumber \\
&&\begin{minipage}{9cm}
by Equation~(\ref{eq:M_B_inv_exp}) $\left(M_{B}^{-1}\right)_{C, S_{B}}=0$
and because of $D_{B_{S},j}=0$ this yields
\end{minipage}
\nonumber \\
&=&
\left(\check{M}_{B}^{-1}\right)_{C,C}A_{C,j}
+2\left(\check{M}_{B}^{-1}\right)_{C, B_{O}}D_{B_{O}, j}
\nonumber \\
&=&
\left(\check{M}_{B}^{-1}\right)_{C}
\left(\begin{array}{c}
A_{C, j} \\
\hline
2D_{B_{O}, j}
\end{array}
\right)
\end{eqnarray}
\begin{eqnarray}
q_{\lambda_{S_{B}}}
&=&
\left(M_{B}^{-1}\right)_{S_{B},C}A_{C,j}
+\left(M_{B}^{-1}\right)_{S_{B}, S_{B}}A_{S_{B}, j}
+2\left(M_{B}^{-1}\right)_{S_{B}, B_{O}}D_{B_{O}, j}
\nonumber \\
&&
+2\left(M_{B}^{-1}\right)_{S_{B}, B_{S}}D_{B_{S}, j}
\nonumber \\
&&\begin{minipage}{9cm}
by Equation~(\ref{eq:M_B_inv_exp}) $\left(M_{B}^{-1}\right)_{S_{B}, C}=0$,
$\left(M_{B}^{-1}\right)_{S_{B}, S_{B}}=0$ and
$\left(M_{B}^{-1}\right)_{S_{B}, B_{O}}=0$
and because of $D_{B_{S},j}=0$
this yields
\end{minipage}
\nonumber \\
&=&
0
\end{eqnarray}
\begin{eqnarray}
\label{eq:q_B_O}
q_{x_{B_{O}}}
&=&
\left(M_{B}^{-1}\right)_{B_{O},C}A_{C,j}
+\left(M_{B}^{-1}\right)_{B_{O}, S_{B}}A_{S_{B}, j}
+2\left(M_{B}^{-1}\right)_{B_{O}, B_{O}}D_{B_{O}, j}
\nonumber \\
&&
+2\left(M_{B}^{-1}\right)_{B_{O}, B_{S}}D_{B_{S}, j}
\nonumber \\
&&\begin{minipage}{9cm}
by Equation~(\ref{eq:M_B_inv_exp}) $\left(M_{B}^{-1}\right)_{B_{O}, S_{B}}=0$
and because of $D_{B_{S},j}=0$ this yields
\end{minipage}
\nonumber \\
&=&
\left(\check{M}_{B}^{-1}\right)_{B_{O}, C}A_{C,j}
+2\left(\check{M}_{B}^{-1}\right)_{B_{O}, B_{O}}D_{B_{O}, j}
\nonumber \\
&=&
\left(\check{M}_{B}^{-1}\right)_{B_{O}}
\left(\begin{array}{c}
A_{C, j} \\
\hline
2D_{B_{O}, j}
\end{array}
\right)
\end{eqnarray}
\begin{eqnarray}
q_{x_{B_{S}}}
&=&
\left(M_{B}^{-1}\right)_{B_{S},C}A_{C,j}
+\left(M_{B}^{-1}\right)_{B_{S}, S_{B}}A_{S_{B}, j}
+2\left(M_{B}^{-1}\right)_{B_{S}, B_{O}}D_{B_{O}, j}
\nonumber \\
&&
+2\left(M_{B}^{-1}\right)_{B_{S}, B_{S}}D_{B_{S}, j}
\nonumber \\
&&\begin{minipage}{9cm}
because of $D_{B_{S},j}=0$ this yields
\end{minipage}
\nonumber \\
&=&
\alpha\left(\check{M}_{B}^{-1}\right)_{B_{O}, C}A_{C, j}
+A_{S_{B}, B_{S}}^{T}A_{S_{B}, j}
+2\alpha\left(\check{M}_{B}^{-1}\right)_{B_{O}, B_{O}}D_{B_{O},j}
\nonumber \\
&=&
\alpha\left(\check{M}_{B}^{-1}\right)_{B_{O}}
\left(\begin{array}{c}
A_{C,j} \\
\hline
2D_{B_{O}, j}
\end{array}
\right)
+A_{S_{B}, B_{S}}^{T}A_{S_{B}, j}
\nonumber
\end{eqnarray}
Using the definition of $\alpha$ and $q_{x_{B_{O}}}$ this can be written as
\begin{equation}
q_{x_{B_{S}}}=
-A_{S_{B}, B_{S}}^{T}A_{S_{B}, B_{O}}q_{x_{B_{O}}}
+A_{S_{B}, B_{S}}^{T}A_{S_{B}, j}
\end{equation}
We are now enabled to compute the coefficients of
\begin{eqnarray}
\left(A_{j}^{T} \left| \right. 2D_{B, j}^{T} \right)
M_{B}^{-1}
\left(\begin{array}{c}
A_{\hat{N}} \\
\hline
2D_{B,\hat{N}}
\end{array}
\right)
&=&
\left(q_{\lambda}^{T} \left| \right. q_{x}^{T} \right)
\left(\begin{array}{c}
A_{\hat{N}} \\
\hline
2D_{B,\hat{N}}
\end{array}
\right)
\nonumber \\
&=&
\left(q_{\lambda_{C}}^{T}\left|\right.
q_{\lambda_{S_{B}}}^{T}\left|\right.
q_{x_{B_{O}}}^{T}\left|\right.
q_{x_{B_{S}}}^{T}
\right)
\left(\begin{array}{c}
A_{C, \hat{N}} \\
\hline
A_{S_{B}, \hat{N}} \\
\hline
2D_{B_{O}, \hat{N}} \\
\hline
2D_{B_{S}, \hat{N}}
\end{array}
\right)
\nonumber \\
&&\begin{minipage}{6cm}
because of $D_{B_{S}, \hat{N}}=0$ and $q_{\lambda_{S_{B}}}=0$ this yields
\end{minipage}
\nonumber \\
&=&
q_{\lambda_{C}}^{T}A_{C, \hat{N}} + 2q_{x_{B_{O}}}^{T}D_{B_{O},\hat{N}}
\nonumber \\
&=&
\left(q_{\lambda_{C}}^{T}\left|\right.q_{x_{B_{O}}}^{T}\right)
\left(\begin{array}{c}
A_{C, \hat{N}} \\
\hline
2D_{B_{O}, \hat{N}}
\end{array}
\right)
\end{eqnarray}
According to Equation~(\ref{eq:M_B_inv_exp}) and the definition of
\pmu{Q_{1}}{B} we distinguish $j \in O \cap N$ and
$j \in S \cap N$.
\paragraph{$\mathbf{j \in O \cap N}$:}
Assuming $j \in O \cap N$ we distinguish according to the definition of
\pmu{Q_{1}}{B} the following two cases for $k \in
\hat{N}$:
\begin{enumerate}
\item $k \in \hat{N} \cap O$:
\begin{eqnarray}
\label{eq:r1_j_O_N_k_N_O}
2D_{j, k}
-\left(q_{\lambda}^{T} \left| \right. q_{x}^{T} \right)
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{B,k}
\end{array}
\right)
&=&
2D_{j, k} - q_{\lambda_{C}}^{T}A_{C, k} - 2q_{x_{B_{O}}}^{T}D_{B_{O},k}
\nonumber \\
&=&
2D_{j, k}
-\left(q_{\lambda_{C}}^{T}\left|\right. q_{x_{B_{O}}}^{T}\right)
\left(\begin{array}{c}
A_{C, k} \\
\hline
2D_{B_{O}, k}
\end{array}
\right)
\end{eqnarray}
\item $k \in \hat{N} \cap S$:
\begin{eqnarray}
\label{eq:r1_j_O_N_k_N_S}
2D_{j,k}
-\left(q_{\lambda}^{T} \left| \right. q_{x}^{T} \right)
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{B, k}
\end{array}
\right)
&=&
2D_{j,k} - q_{\lambda_{C}}^{T}A_{C, k} - 2q_{x_{B_{O}}}^{T}D_{B_{O},k}
\nonumber \\
&&\begin{minipage}{5cm}
Because of $D_{B_{O},k}=0$, $D_{j,k}=0$ for $k \in \hat{N} \cap S$
\end{minipage}
\nonumber \\
&=&
-q_{\lambda_{C}}^{T}A_{C, k}
\nonumber \\
&=&
-\left(q_{\lambda_{C}}\right)_{\gamma_{C}(\sigma(k))}A_{\sigma(k),k}
\end{eqnarray}
\end{enumerate}
\paragraph{$\mathbf{j \in S \cap N}$:}
Assuming $j \in S \cap N$ we distinguish according to the definition of
\pmu{Q_{1}}{B} the following cases for $k \in \hat{N}$:
\begin{enumerate}
\item $k \in \hat{N} \cap O:$
\begin{eqnarray}
\label{eq:r1_j_S_N_k_N_O}
2D_{j,k}
-\left(q_{\lambda}^{T} \left| \right. q_{x}^{T} \right)
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{B, k}
\end{array}
\right)
&=&
2D_{j,k} - q_{\lambda_{C}}^{T}A_{C, k} - 2q_{x_{B_{O}}}^{T}D_{B_{O},k}
\nonumber \\
&&\begin{minipage}{5cm}
Because of $D_{j,k}=0$ for $j \in S \cap N$
\end{minipage}
\nonumber \\
&=&
- q_{\lambda_{C}}^{T}A_{C, k} - 2q_{x_{B_{O}}}^{T}D_{B_{O},k}
\nonumber \\
&=&
-\left(q_{\lambda_{C}}^{T}\left|\right. q_{x_{B_{O}}}^{T}\right)
\left(\begin{array}{c}
A_{C,k} \\
\hline
2D_{B_{O},k}
\end{array}
\right)
\end{eqnarray}
\item $k \in \hat{N} \cap S:$
\begin{eqnarray}
\label{eq:r1_j_S_N_k_N_S}
2D_{j,k}
-\left(q_{\lambda}^{T} \left| \right. q_{x}^{T} \right)
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{B, k}
\end{array}
\right)
&=&
2D_{j,k} - q_{\lambda_{C}}^{T}A_{C, k} - 2q_{x_{B_{O}}}^{T}D_{B_{O},k}
\nonumber \\
&&\begin{minipage}{5cm}
Because of $D_{B_{O},k}=0$, $D_{j,k}=0$ for $j \in S \cap N$
\end{minipage}
\nonumber \\
&=&
-q_{\lambda_{C}}^{T}A_{C,k}
\nonumber \\
&=&
-\left(q_{\lambda_{C}}\right)_{\gamma_{C}(\sigma(k))}A_{\sigma(k),k}
\end{eqnarray}.
\end{enumerate}
\subsubsection{Ratio Test Step 2: \px{i}{Q_{2}}{\hat{B}}}
For ease of reference, we restate the Definition~(\ref{def:p_x_i_Q_2}),
of $\px{i}{Q_{2}}{\hat{B}}$
\begin{eqnarray*}
\px{i}{Q_{2}}{\hat{B}} &:=&
\frac{\varepsilon^{i+1} -
\left(M_{\hat{B}}^{-1}
\left(\begin{array}{c}
A_{N \setminus \{j\}} \\
\hline
2D_{\hat{B}, N \setminus \{j\}}
\end{array}
\right)
\right)_{x_{i}}\epsilon_{N \setminus \{j\}}}{p_{x_{i}}}
\end{eqnarray*}
Again, we will only consider the coefficients of
$\varepsilon^{k+1}$ for $k \in \hat{N}= N \setminus \{j\}$ in
\px{i}{Q_{2}}{\hat{B}}, since the other case $k \in \hat{B}$
is trivial. Furthermore we only evaluate entities that are not evaluated in the
unperturbed problem, such that, taking into account the
Definition~(\ref{def:epsilon}) of $\epsilon$,
we merely consider the subexpression
\begin{equation}
\label{def:n_x_i_Q_2}
n_{x_{i}}^{(Q_{2})}(\hat{B})[i,k]:=
\left(
M_{\hat{B}}^{-1}
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{\hat{B}, k}
\end{array}
\right)
\right)_{x_{i}}
\end{equation}
The correct value of the coefficient is obtained by scaling the subexpression
with the factor $p_{x_{i}}^{-1}$.
Since the above subexpression and the corresponding subexpression
in~(\ref{def:n_x_i_Q_1}) for the polynomial \px{i}{Q_{1}}{B} as
functions differ only in the sets $B$ and $\hat{B}$ and their respective
headings, as well as in the scaling factor
we list the expressions without their derivations.
According to Equation~(\ref{eq:M_B_inv_exp}) and the definition of
\px{i}{Q_{2}}{\hat{B}} we distinguish $i \in \hat{B}_{O}$ and
$i \in \hat{B}_{S}$.
\paragraph{$\mathbf{i \in \hat{B}_{O}}$:}
\begin{enumerate}
\item $k \in \hat{N} \cap S$:
According to Equation~(\ref{eq:r1_i_B_O_k_N_S}) and the appropriate changes
neccessary we obtain
\begin{equation}
\label{eq:r2_i_B_O_k_N_S}
\left(
M_{\hat{B}}^{-1}
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{\hat{B}, k}
\end{array}
\right)
\right)_{x_{i}}
=
\left(\check{M}_{\hat{B}}^{-1}\right)_{\hat{\beta}_{O}(i),
\hat{\gamma}_{\hat{C}}(\sigma(k))}
A_{\sigma(k), k}
\end{equation}
\item $k \in \hat{N} \cap O$:
According to Equation~(\ref{eq:r1_i_B_O_k_N_O}) and the appropriate changes
neccessary we obtain
\begin{equation}
\label{eq:r2_i_B_O_k_N_O}
\left(
M_{\hat{B}}^{-1}
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{\hat{B}, k}
\end{array}
\right)
\right)_{x_{i}}
=
\left(\check{M}_{\hat{B}}^{-1}\right)_{\hat{\beta}_{O}(i)}
\left(\begin{array}{c}
A_{\hat{C}, k} \\
\hline
2D_{\hat{B}_{O}, k}
\end{array}
\right)
\end{equation}
\end{enumerate}
\paragraph{$\mathbf{i \in \hat{B}_{S}}$:}
\begin{enumerate}
\item $k \in \hat{N} \cap S$:
According to Equation~(\ref{eq:r1_i_B_S_k_N_S}) and the appropriate changes
neccessary we obtain
\begin{equation}
\label{eq:r2_i_B_S_k_N_S}
\left(
M_{\hat{B}}^{-1}
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{\hat{B}, k}
\end{array}
\right)
\right)_{x_{i}}
=
\left(\check{M}_{\hat{B}}^{-1}\right)_{\hat{\beta}_{S}(i),
\hat{\gamma}_{\hat{C}}(\sigma(k))}
A_{\sigma(k), k}
\end{equation}
\item $k \in \hat{N} \cap O$:
According to Equation~(\ref{eq:r1_i_B_S_k_N_O}) and the appropriate changes
neccessary we obtain
\begin{eqnarray}
\label{eq:r2_i_B_S_k_N_O}
\left(
M_{\hat{B}}^{-1}
\left(\begin{array}{c}
A_{k} \\
\hline
2D_{\hat{B}, k}
\end{array}
\right)
\right)_{x_{i}}
&=&
-A_{\sigma(i),i}A_{\sigma(i), \hat{B}_{O}}
\left(\check{M}_{\hat{B}}^{-1}\right)_{\hat{B}_{O}}
\left(\begin{array}{c}
A_{\hat{C},k} \\
\hline
2D_{\hat{B}_{O},k}
\end{array}
\right)
\nonumber \\
&&
+A_{\sigma(i), i}A_{\hat{\gamma}_{S_{\hat{B}}}(\sigma(i)), k}
\end{eqnarray}
\end{enumerate}
\section{Ratio Tests for the perturbed problem}
The setup of the auxiliary problem and the transition from PhaseI to PhaseII
excepted, Ratio Test Step~1 and Ratio Test
Step~2 are the only parts of the algorithm that differ for the unperturbed and
perturbed problem.
In this section we present a pseudocode description of the ratio tests needed
for Ratio Test Step~1 and Ratio Test Step~2. We will only consider the most
general cases for which Ratio Test Step~1 and Ratio Test Step~2 occur,
that is for both ratio test steps we consider the QP-case with
inequalities in PhaseII only. Furthermore we will compute the coefficients
of the involved
polynomials $\pmu{Q_{1}}{B}$, $\px{i}{Q_{1}}{B}$ and $\px{i}{Q_{2}}{\hat{B}}$
only when needed. We denote by $\pxz{i}{Q_{1}}{B}[k]$ respectively
$\pxz{i}{Q_{2}}{\hat{B}}[k]$,
$0 \leq k \leq \left|O \cup S\right|$,
the coefficients of $\varepsilon^{k}$ in the polynomials
defined by Definitions~(\ref{def:t_min_eps}) and~(\ref{def:hat_mu_j_min_eps}),
such that $\pxz{i}{Q_{1}}{B}[0]=\check{t}(0, B)$ and
$\pxz{i}{Q_{2}}{\hat{B}}[0]=\check{\mu}_{j}(0, \hat{B})$ respectively.
Similarly, we denote by $\pmuz{Q_{1}}{B}[k]$,
$0 \leq k \leq \left|O \cup S\right|$, the coefficient of $\varepsilon^{k}$
in the polynomial
defined by Definition~(\ref{def:hat_mu_j_min_eps}), such that
$\pmuz{Q_{1}}{B}[0]=-\frac{\mu_{j}(0,0)}{\nu}$.
Note, that subscripted variable names in the pseudocode snippets of the
following subsections denote a single variable name.
\subsection{Ratio Test Step 1}
Since the Ratio Test Step~1 compares according to
Equation~(\ref{eq:mu_j_eps_t})
and Definition~(\ref{def:t_min_eps}) the smallest $t$,
$\check{t}(\varepsilon, B)$, such that some basic variable is leaving, and
$t=-\frac{\mu_{j}(\varepsilon,0)}{\nu}$ such that $\mu_{j}(\varepsilon,t)=0$.
For reasons of efficiency we factored out the most common case, that is, the
computation of $\check{t}(0, B)$ and $\mu_{j}(0, 0)$.
We distinguish three cases with respect to $\check{t}(0, B)$ and
$\mu_{j}(0, t)$, supposing that $T_{k}$ with
$\emptyset \subset T_{k} \subseteq B$, denotes the set of candidate
leaving variables after consideration of of
coefficients $\pxz{i}{Q_{1}}{B}[j]$, $0 \leq j \leq k$:
\begin{algorithm}
\caption{Perturbed Ratio Test 1, $\check{t}(0, B)$}
\label{alg:ratio_test_step_1_0}
\begin{algorithmic}
\Function{ratio\_test\_1\_\_t\_i\_$\varepsilon$}{$B_{O}, B_{S}$}
\State $T_{0}^{\prime} \gets \emptyset,
\quad x_{min} \gets 1, \quad q_{min} \gets 0$
\ForAll{$i \gets 0, \left|B_{O}\right| - 1$}
\If{$q_{B_{O}}[i] > 0$}
\If{$x_{min}*q_{B_{O}}[i] < x_{B_{O}}[i]*q_{min}$}
\State $x_{min} \gets x_{B_{O}}[i],
\quad q_{min} \gets q_{B_{O}}[i],
\quad T_{0}^{\prime} \gets \{B_{O}[i]\}$
\ElsIf{$x_{min}*q_{B_{O}}[i] = x_{B_{O}}[i]*q_{min}$}
\State $T_{0}^{\prime} \gets T_{0}^{\prime} \cup \{B_{O}[i]\}$
\EndIf
\EndIf
\EndFor
\ForAll{$i \gets 0, \left|B_{S}\right| - 1$}
\If{$q_{B_{S}}[i] > 0$}
\If{$x_{min}*q_{B_{S}}[i] < x_{B_{S}}[i]*q_{min}$}
\State $x_{min} \gets x_{B_{S}}[i],
\quad q_{min} \gets q_{B_{S}}[i],
\quad T_{0}^{\prime} \gets \{B_{S}[i]\}$
\ElsIf{$x_{min}*q_{B_{S}}[i] = x_{B_{S}}[i]*q_{min}$}
\State $T_{0}^{\prime} \gets T_{0}^{\prime} \cup \{B_{S}[i]\}$
\EndIf
\EndIf
\EndFor
\State \textbf{return} $(T_{0}^{\prime}, x_{min}, q_{min})$
\EndFunction
%\Function{ratio\_test\_1\_0\_t\_j}{}
%\EndFunction
\end{algorithmic}
\end{algorithm}
\begin{itemize}
\item $-\frac{\mu_{j}(0, 0)}{\nu} < \check{t}(0, B)$:
According to Equation~(\ref{eq:mu_j_eps_t}) and
Definition~(\ref{def:t_min_eps}) we then have
$-\frac{\mu_{j}(\varepsilon, 0)}{\nu} < \check{t}(\varepsilon, B)$ and a local
optimum is found, according to Lemma~2.7 $B \cup \{j\}$ is the new basis.
\item $-\frac{\mu_{j}(0, 0)}{\nu} = \check{t}(0, B)$:
According to Equation~(\ref{eq:mu_j_eps_t}) and
Definition~(\ref{def:t_min_eps})
both $-\frac{\mu_{j}(\varepsilon, 0)}{\nu} < \check{t}(\varepsilon, B)$ and
$-\frac{\mu_{j}(\varepsilon, 0)}{\nu} > \check{t}(\varepsilon, B)$ are
possible.
We continue comparing the coefficients $\pmu{Q_{1}}{B}[k]$ and
$\px{i}{Q_{1}}{B}[k]$, $1 \leq k \leq \left|O \cup S \right|$,
until $\pmu{Q_{1}}{B}[k] \neq \px{i}{Q_{1}}{B}[k]$.
If $-\frac{\mu_{j}(\varepsilon, 0)}{\nu} < \check{t}(\varepsilon, B)$
a local optimum is found and according to Lemma~2.7 $B \cup \{j\}$ is the new
basis,
if $-\frac{\mu_{j}(\varepsilon, 0)}{\nu} > \check{t}(\varepsilon, B)$
we continue computing coefficients
$\px{i}{Q_{1}}{B}[k]$, $1 \leq k \leq \left|O \cup S \right|$, until
$\left|T_{k}\right|=1$. $T_{k}$ then contains the index of the leaving
variable.
\item $-\frac{\mu_{j}(0, 0)}{\nu} > \check{t}(0, B)$:
According to Equation~(\ref{eq:mu_j_eps_t}) and
Definition~(\ref{def:t_min_eps}) we then have
$-\frac{\mu_{j}(\varepsilon, 0)}{\nu} > \check{t}(\varepsilon, B)$. If
$\left|T_{k}\right| > 1$ we continue computing coefficients
$\px{i}{Q_{1}}{B}[k]$, $1 \leq k \leq \left|O \cup S \right|$, until
$\left|T_{k}\right|=1$. $T_{k}$ then contains the index of the leaving
variable.
\end{itemize}
\begin{algorithm}
\caption{Perturbed Ratio Test 1}
\label{alg:ratio_test_step_1_pert}
\begin{algorithmic}
\Function{ratio\_test\_1\_$\varepsilon$}{$B_{O},B_{S},j$}
\State $ratio\_test\_1\_\_q(B_{O},B_{S},j)$
\Comment{Initializes global $q_{B_{O}}$ and $q_{B_{S}}$}
\State $(T_{k}, c_{min}, q_{min})
\gets RATIO\_TEST\_1\_\_T\_I\_\varepsilon(B_{O},B_{S})$
\If{$q_{min}=0$}
\Comment{$\check{t}(\varepsilon, B)=\infty$}
\State \textbf{return} \texttt{unbounded}
\EndIf
\State $(c_{j,k}, \nu)
\gets RATIO\_TEST\_1\_\_T\_J(B_{O}, B_{S},j)$
\If{$c_{j,k}*q_{min} > \nu * c_{min} \wedge \left|T_{k}\right| =1$}
\Comment{$-\frac{\mu_{j}(0, 0)}{\nu}>\check{t}(0, B)
\wedge \left|T_{k}\right|=1$}
\State \textbf{return} $T_{k}$
\ElsIf{$c_{j,k}*q_{min} < \nu * c_{min}$}
\Comment{$-\frac{\mu_{j}(0, 0)}{\nu}<\check{t}(0, B)$}
\State \textbf{return} $\emptyset$
\Else
\Comment{$-\frac{\mu_{j}(0,0)}{\nu}>\check{t}(0,B) \wedge
\left|T_{k}\right|>1 \vee -\frac{\mu_{j}(0,0)}{\nu}=\check{t}(0,B)$}
\State $k \gets 0, \quad leaving \gets c_{j,k}*q_{min} > \nu * c_{min}$
\Repeat
\State $T_{k}^{\prime} \gets \emptyset,
\quad c_{min} \gets 1,
\quad q_{min} \gets 0$
\ForAll{$i \in T_{k}$}
\State $c_{i,k} \gets n_{x_{i}}^{(Q_{1})}(B)[i,k]$
\If{$i < n$} \Comment{$x_{i}$ is original variable}
\If{$c_{min}*q_{B_{O}}[\beta_{O}[i]] \nu *c_{min}$
\EndIf
\State $k \gets k+1, \quad T_{k} \gets T_{k}^{\prime}$
\Until{$\left|T_{k}\right|=1 \wedge leaving$}
\Comment{$-\frac{\mu_{j}(\varepsilon,0)}{\nu}>\check{t}(\varepsilon,B)
\wedge \left|T_{k}\right|=1$}
\State \textbf{return} $T_{k}$
\EndIf
\EndFunction
\end{algorithmic}
\end{algorithm}
The function that computes $\check{t}(0, B)$ is outlined in
Algorithm~(\ref{alg:ratio_test_step_1_0}), the keeping of the set of candidate
leaving variables $T_{0}$ is the only part in which it differs from the
unperturbed variant.
The function \texttt{RATIO\_TEST\_1\_\_T\_J}
which computes $-\frac{\mu_{j}(0,0)}{\nu}$ is omitted here, since it already
occurs in the unperturbed problem.
The function that performs the actual Ratio Test Step~1 by comparing
$\pmuz{Q_{1}}{B}$ and $\pxz{i}{Q_{1}}{B}$
coefficient by coefficient is outlined in
Algorithm~(\ref{alg:ratio_test_step_1_pert}). It returns the unique index of
the leaving variable if
$-\frac{\mu_{j}(\varepsilon, 0)}{\nu} > \check{t}(\varepsilon,B)$, the empty
set if $-\frac{\mu_{j}(\varepsilon, 0)}{\nu} < \check{t}(\varepsilon,B)$
and \texttt{unbounded} if $\check{t}(\varepsilon, B)=\infty$.
Note, that by the remarks of
Section~(\ref{sec:Ties_ratio_test_step_1}) \px{i}{Q_{1}}{B} is unique and
either $\px{i}{Q_{1}}{B} < \pmu{Q_{1}}{B}$ or
$\px{i}{Q_{1}}{B} > \pmu{Q_{1}}{B}$
holds, such that Algorithm~(\ref{alg:ratio_test_step_1_pert}) terminates.
Note, that there is opportunity for improvement; we could distinguish the cases
$\neg leaving \wedge \left|T_{k}\right|=1$, where the index of the potentially
leaving variable $x_{i}$ is known and no minimum among basic variables has to
be determined in order to compute the next coefficient of \px{i}{Q_{1}}{B}, and
$\neg leaving \wedge \left|T_{k}\right|>1$, where the next coefficient of
\px{i}{Q_{1}}{B} is to be computed as minimum over the index set $T_{k}$.
\subsection{Ratio Test Step 2}
Since the Ratio Test Step~2 determines according to
Equation~(\ref{eq:QP_j_mu_opt_short}) the absolute
$\mu_{j}(\varepsilon)$ for which some
basic variable is leaving, supposing that $T_{k}$ with
$\emptyset \subset T_{k} \subseteq \hat{B}$, denotes the set
of candidate leaving variables after consideration of coefficients
$\pxz{i}{Q_{2}}{\hat{B}}[j]$,
$0 \leq j \leq k$,
we distinguish three cases with respect to
$\check{\mu}_{j}(0, \hat{B})$:
\begin{algorithm}
\caption{Perturbed Ratio Test 2, $\check{\mu}_{j}(0,\hat{B})$}
\label{alg:ratio_test_step_2_0}
\begin{algorithmic}
\Function{ratio\_test\_2\_0\_$\varepsilon$}{$B_{O},B_{S}$}
\State $T_{0} \gets \emptyset,
\quad x_{min} \gets 1, \quad p_{min} \gets 0$
\ForAll{$i \gets 0, \left|B_{O}\right| - 1$}
\If{$p_{B_{O}}[i] < 0$}
\If{$x_{min}*p_{B_{O}}[i] < x_{B_{O}}[i]*p_{min}$}
\State $x_{min} \gets x_{B_{O}}[i],
\quad p_{min} \gets p_{B_{O}}[i],
\quad T_{0} \gets \{B_{O}[i]\}$
\ElsIf{$x_{min}*p_{B_{O}}[i] = x_{B_{O}}[i]*p_{min}$}
\State $T_{0} \gets T_{0} \cup \{B_{O}[i]\}$
\EndIf
\EndIf
\EndFor
\ForAll{$i \gets 0, \left|B_{S}\right| - 1$}
\If{$p_{B_{S}}[i] < 0$}
\If{$x_{min}*p_{B_{S}}[i] < x_{B_{S}}[i]*p_{min}$}
\State $x_{min} \gets x_{B_{S}}[i],
\quad p_{min} \gets p_{B_{S}}[i],
\quad T_{0} \gets \{B_{S}[i]\}$
\ElsIf{$x_{min}*p_{B_{S}}[i] = x_{B_{S}}[i]*p_{min}$}
\State $T_{0} \gets T_{0} \cup \{B_{S}[i]\}$
\EndIf
\EndIf
\EndFor
\State \textbf{return} $(T_{0}, x_{min}, p_{min})$
\EndFunction
\end{algorithmic}
\end{algorithm}
\begin{itemize}
\item $\check{\mu}_{j}(0, \hat{B}) > 0$:
According to Definition~(\ref{def:hat_mu_j_min_eps})
$\check{\mu}_{j}(\varepsilon, \hat{B}) > 0$ then holds, so we found an optimal
solution $x_{\hat{B}}^{*}(\varepsilon, 0)$ to $(UQP(\hat{B}_{\varepsilon}))$
which by Lemma~(\ref{lemma:strict}) is also an optimal solution to
$QP(\hat{B}_{\varepsilon})$.
\item $\check{\mu}_{j}(0, \hat{B}) = 0$:
According to Definition~(\ref{def:hat_mu_j_min_eps}) and
Definition~(\ref{def:p_x_i_Q_2}) both
$\check{\mu}_{j}(\varepsilon, \hat{B}) > 0$ and
$\check{\mu}_{j}(\varepsilon, \hat{B}) < 0$ are possible.
We compute coefficients $\px{i}{Q_{2}}{\hat{B}}[k]$,
$1 \leq k \leq \left|O \cup S\right|$, until the sign of
$\check{\mu}_{j}(\varepsilon, \hat{B})$ is known.
If $\check{\mu}_{j}(\varepsilon, \hat{B}) > 0$, we found an optimal solution
$x_{\hat{B}}^{*}(\varepsilon, \hat{B})$ to $UQP(\hat{B}_{\varepsilon})$,
which again by Lemma~(\ref{lemma:strict}) is also an optimal solution to
$QP(\hat{B}_{\varepsilon})$,
if $\check{\mu}_{j}(\varepsilon, \hat{B}) < 0$, we continue computing
coefficients $\px{i}{Q_{2}}{\hat{B}}[k]$ until
$\left|T_{k}\right|=1$. $T_{k}$ then contains the index of the leaving
variable.
\item $\check{\mu}_{j}(0, \hat{B}) < 0$:
According to Definition~(\ref{def:hat_mu_j_min_eps})
$\check{\mu}_{j}(\varepsilon, \hat{B}) < 0$ then holds, so we compute
coefficients $\px{i}{Q_{2}}{\hat{B}}[k]$,
$1 \leq k \leq \left|O \cup S\right|$, until $\left|T_{k}\right|=1$.
$T_{k}$ then contains the index of the leaving
variable.
\end{itemize}
\begin{algorithm}
\caption{Perturbed Ratio Test 2}
\label{alg:ratio_test_step_2_pert}
\begin{algorithmic}
\Function{ratio\_test\_2\_$\varepsilon$}{$B_{O},B_{S},j$}
\State $ratio\_test\_2\_\_p$
\Comment{Initializes global $p_{B_{O}}$ and $p_{B_{S}}$}
\State $(T_{k}, c_{min}, p_{min})
\gets RATIO\_TEST\_2\_0\_\varepsilon(B_{O},B_{S})$
\If{$p_{min} = 0$}
\Comment{$\check{\mu}_{j}(\varepsilon, \hat{B}) = \infty$}
\State \textbf{return} $\emptyset$
\ElsIf{$c_{min} < 0$}
\Comment{$\check{\mu}_{j}(0, \hat{B}) > 0$}
\State \textbf{return} $\emptyset$
\ElsIf{$c_{min} > 0 \wedge \left|T_{k}\right|=1$}
\Comment{$\check{\mu}_{j}(0, \hat{B}) < 0 \wedge
\left|T_{k}\right|=1$}
\State \textbf{return} $T_{k}$
\Else
\Comment{$\check{\mu}_{j}(0, \hat{B}) < 0 \wedge \left|T_{k}\right|>1
\vee \check{\mu}_{j}(0, \hat{B})=0$}
\State $k \gets 0$
\Repeat
\State $T_{k}^{\prime} \gets \emptyset,
\quad c_{min} \gets 1,
\quad p_{min} \gets 0$
\ForAll{$i \in T_{k}$}
\State $c_{i,k} \gets n_{x_{i}}^{(Q_{2})}(\hat{B})[i,k]$
\If{$i < n$} \Comment{$x_{i}$ is original variable}
\If{$c_{min}*p_{B_{O}}[\beta_{O}[i]] 0 \vee c_{min}<0$}
\If{$c_{min} < 0$}
\Comment{$\check{\mu}_{j}(\varepsilon, \hat{B}) > 0$}
\State \textbf{return} $\emptyset$
\Else
\Comment{$\check{\mu}_{j}(\varepsilon, \hat{B}) < 0 \wedge
\left|T_{k}\right|=1$}
\State \textbf{return} $T_{k}$
\EndIf
\EndIf
\EndFunction
\end{algorithmic}
\end{algorithm}
The function that computes $\check{\mu}_{j}(0, \hat{B})$ is outlined in
Algorithm~(\ref{alg:ratio_test_step_2_0}), the keeping of the set of
candidate leaving variables $T_{0}$ excepted, it does not differ from the
unperturbed variant.
The function that computes $\check{\mu}_{j}(\varepsilon, \hat{B})$, based on
the value of $\check{\mu}_{j}(0, \hat{B})$, is outlined
in Algorithm~(\ref{alg:ratio_test_step_2_pert}). It returns the unique index of
the leaving variable if $\check{\mu}_{j}(\varepsilon, \hat{B})<0$ and the empty
set otherwise.
Again, by the remarks of
Section~(\ref{sec:Ties_ratio_test_step_1}) \px{i}{Q_{1}}{\hat{B}} is unique and
either $\px{i}{Q_{2}}{\hat{B}} < 0$ or $\px{i}{Q_{2}}{\hat{B}} > 0$
holds, such that Algorithm~(\ref{alg:ratio_test_step_1_pert}) terminates.
Note, that there is opportunity for improvement here as well;
we could distinguish the cases $\left|T_{k}\right|=1$, where the index of
the potentially leaving variable $x_{i}$ is known and no minimum among basic
variables has to be determined in order to compute the next coefficient of
\px{i}{Q_{2}}{\hat{B}}, and
$\left|T_{k}\right|>1$, where the next coefficient of
\px{i}{Q_{2}}{\hat{B}} is to be computed as minimum over the index set $T_{k}$.
\begin{thebibliography}{99}
\bibitem{Sven} S. Sch\"{o}nherr Quadratic Programming in Geometric Optimization:
Theory, Implementation, and Applications, Dissertation, Diss. ETH No 14738, ETH
Z\"{u}rich, Institute of Theoretical Computer Science, 2002.
\bibitem{Chvatal} Va\v{s}ek Chv\'{a}tal. \textit{Linear Programming}. W. H. Freeman and Company,
New York, Chapter 8, 1983
\end{thebibliography}
\end{document}