%% LyX 2.3.2-2 created this file. For more info, see http://www.lyx.org/.
%% Do not edit unless you really know what you are doing.
\documentclass[english]{article}
\usepackage[T1]{fontenc}
\usepackage[latin9]{inputenc}
\usepackage{geometry}
\geometry{verbose,tmargin=1cm,bmargin=2cm,lmargin=1cm,rmargin=1cm}
\setlength{\parindent}{0bp}
\usepackage{amsmath}
\usepackage{amssymb}
\PassOptionsToPackage{normalem}{ulem}
\usepackage{ulem}
\makeatletter
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% User specified LaTeX commands.
\date{}
\makeatother
\usepackage{babel}
\begin{document}
\title{Fundamentals of Signal Enhancement and Array Signal Processing\\
Solution Manual}
\maketitle
\title{ \textbf{ Lidor Malul 318628005}}
\section*{ 6 An Exhaustive Class of Linear Filters}
\subsection*{6.1}
Show that the Wiener filter can be expressed as
\begin{eqnarray*}
\mathbf{h}_{\mathrm{W}} = \left( \mathbf{I}_M - \mathbf{\Phi}_{\mathbf{y}}^{-1} \mathbf{\Phi}_{\mathrm{in}} \right) \mathbf{i}_{\mathrm{i}} .
\end{eqnarray*}
%
\textbf{\uline{Solution}}\textbf{:}
as we know from (6.35):
\[{{h}_{W}}={{\Phi }_{y}}^{-1}{{\Phi }_{x}}{{i}_{i}}\]
which ${{\Phi }_{y}}$ is :
\[{{\Phi }_{y}}={{\Phi }_{x}}+{{\Phi }_{in}}\Rightarrow {{\Phi }_{x}}={{\Phi }_{y}}-{{\Phi }_{in}}\]
place this conclusion in (6.35) :
\[{{h}_{W}}={{\Phi }_{y}}^{-1}({{\Phi }_{y}}-{{\Phi }_{in}}){{i}_{i}}=({{\Phi }_{y}}^{-1}{{\Phi }_{y}}-{{\Phi }_{y}}^{-1}{{\Phi }_{in}}){{i}_{i}}=({{I}_{M}}-{{\Phi }_{y}}^{-1}{{\Phi }_{in}}){{i}_{i}}\]
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% q2
\subsection*{6.2}
Using Woodbury's identity, show that
\begin{eqnarray*}
\mathbf{\Phi}_{\mathbf{y}}^{-1} = \mathbf{\Phi}_{\mathrm{in}}^{-1} - \mathbf{\Phi}_{\mathrm{in}}^{-1} \mathbf{Q}_{\mathbf{x}}'
\left( \mathbf{\Lambda}_{\mathbf{x}}'^{-1} + \mathbf{Q}_{\mathbf{x}}'^H \mathbf{\Phi}_{\mathrm{in}}^{-1} \mathbf{Q}_{\mathbf{x}}' \right)^{-1}
\mathbf{Q}_{\mathbf{x}}'^H \mathbf{\Phi}_{\mathrm{in}}^{-1} .
\end{eqnarray*}
%
\textbf{\uline{Solution}}\textbf{:}
we write ${{\Phi }_{x}}$ with his eigenvalue decomposition :
\[{{\Phi }_{x}}={{Q}_{x}}^{'}{{\Lambda }_{x}}^{'}{{Q}_{x}}^{'H}\]
now we can express ${{\Phi }_{y}}^{-1}$ as:
\[{{\Phi }_{y}}^{-1}={{({{\Phi }_{in}}+{{\Phi }_{x}})}^{-1}}={{({{\Phi }_{in}}+{{Q}_{x}}^{'}{{\Lambda }_{x}}^{'}{{Q}_{x}}^{'H})}^{-1}}\]
woodbury identity determines that:
if:
${{\Phi }_{in}}$ a ${M}\times {M} $ reversible matrix
${{\Lambda }_{x}}^{'}$ a ${{R}_{x}}\times {{R}_{x}}$ reversible matrix
${{Q}_{x}}^{'}$ a ${M}\times {{R}_{x}}$ matrix
${{Q}_{x}}^{'H}$ a ${{R}_{x}}\times {m}$ matrix
so:
\[
{{({{\Phi }_{in}}+{{Q}_{x}}^{'}{{\Lambda }_{x}}^{'}{{Q}_{x}}^{'H})}^{-1}}={{\Phi }_{in}}^{-1}-{{\Phi }_{in}}^{-1}{{Q}_{x}}^{'}{{({{\Lambda }_{x}}{{^{'}}^{-1}}+{{Q}_{x}}^{'H}{{\Phi }_{in}}^{-1}{{Q}_{x}}^{'})}^{-1}}{{Q}_{x}}^{'H}{{\Phi }_{in}}^{-1} \]
\[
\to {{\Phi }_{y}}^{-1}={{\Phi }_{in}}^{-1}-{{\Phi }_{in}}^{-1}{{Q}_{x}}^{'}{{({{\Lambda }_{x}}{{^{'}}^{-1}}+{{Q}_{x}}^{'H}{{\Phi }_{in}}^{-1}{{Q}_{x}}^{'})}^{-1}}{{Q}_{x}}^{'H}{{\Phi }_{in}}^{-1} \]
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% Q4 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{6.4}
Show that the MVDR filter is given by
\begin{eqnarray*}
\mathbf{h}_{\mathrm{MVDR}} =
\mathbf{\Phi}_{\mathrm{in}}^{-1} \mathbf{Q}_{\mathbf{x}}' \left( \mathbf{Q}_{\mathbf{x}}'^H
\mathbf{\Phi}_{\mathrm{in}}^{-1} \mathbf{Q}_{\mathbf{x}}' \right)^{-1} \mathbf{Q}_{\mathbf{x}}'^H \mathbf{i}_{\mathrm{i}} .
\end{eqnarray*}
\textbf{\uline{Solution}}\textbf{:}
in order to find the MVDR filter we will solve the following minimization:
$ {{\min }_{h}}[{{J}_{n}}(h)+{{J}_{i}}(h)]$ subject to ${{h}^{H}}{{Q}_{x}}^{'}={{i}_{i}}{{Q}_{x}}^{'}$
using Lagrange multiplier we define the next function:
$$L(h,\lambda )=f(n)-\lambda g(h)$$
where $\lambda $ is a $1\times {{R}_{x}}$ vector and :
\[
f(h)={{J}_{n}}(h)+{{J}_{i}}(h)={{\Phi }_{vo}}{{h}^{H}}h+{{h}^{H}}{{\Phi }_{v}}h={{h}^{H}}{{\Phi }_{in}}h \]
\[ g(h)={{i}_{i}}{{Q}_{x}}^{'}-{{h}^{H}}{{Q}_{x}}^{'} \\
\]
now we will find the minimum of L :
\[
\frac{\partial L(h,\lambda )}{\partial h}=2{{\Phi }_{in}}h-{{Q}_{x}}^{'}{{\lambda }^{T}}=0\to h=\frac{1}{2}{{\Phi }_{in}}^{-1}{{Q}_{x}}^{'}{{\lambda }^{T}} \]
\[ \frac{\partial L(h,\lambda )}{\partial \lambda }={{h}^{H}}{{Q}_{x}}^{'}-{{i}_{i}}^{T}{{Q}_{x}}^{'}=0\to {{h}^{H}}{{Q}_{x}}^{'}={{i}_{i}}^{T}{{Q}_{x}}^{'}\to {{Q}_{x}}^{'H}h={{Q}_{x}}^{'H}{{i}_{i}} \]
\[ {{Q}_{x}}^{'H}h=\frac{1}{2}{{Q}_{x}}^{'H}{{\Phi }_{in}}^{-1}{{Q}_{x}}^{'}{{\lambda }^{T}}={{Q}_{x}}^{'H}{{i}_{i}}\to {{\lambda }^{T}}=2{{({{Q}_{x}}^{'H}{{\Phi }_{in}}^{-1}{{Q}_{x}}^{'})}^{-1}}{{Q}_{x}}^{'H}{{i}_{i}} \]
\[ \to h=\frac{1}{2}{{\Phi }_{in}}^{-1}{{Q}_{x}}^{'}{{\lambda }^{T}}={{\Phi }_{in}}^{-1}{{Q}_{x}}^{'}{{({{Q}_{x}}^{'H}{{\Phi }_{in}}^{-1}{{Q}_{x}}^{'})}^{-1}}{{Q}_{x}}^{'H}{{i}_{i}} \]
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% Q5 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{6.5}
Show that the MVDR filter can be expressed as
\begin{eqnarray*}
\mathbf{h}_{\mathrm{MVDR}} =
\mathbf{\Phi}_{\mathbf{y}}^{-1} \mathbf{Q}_{\mathbf{x}}' \left( \mathbf{Q}_{\mathbf{x}}'^H
\mathbf{\Phi}_{\mathbf{y}}^{-1} \mathbf{Q}_{\mathbf{x}}' \right)^{-1} \mathbf{Q}_{\mathbf{x}}'^H \mathbf{i}_{\mathrm{i}} .
\end{eqnarray*}
\textbf{\uline{Solution}}\textbf{:}
the MVDR filter is given from the minimiazion of $[{{J}_{n}}(h)+{{J}_{i}}(h)]$
since $[{{J}_{d}}(h)]$ equals 0:
\[ [{{J}_{n}}(h)+{{J}_{i}}(h)]=[{{J}_{n}}(h)+{{J}_{i}}(h)+{{J}_{d}}(h)]= \]
\[ ={{\phi }_{x1}}+{{h}^{H}}{{\Phi }_{y}}h-{{h}^{H}}{{\Phi }_{x}}{{i}_{i}}-{{i}_{i}}^{T}{{\Phi }_{x}}h \]
after the derivative by h all the elements reduce/reset exept from $\frac{\partial {{h}^{H}}{{\Phi }_{y}}h}{d\partial }$
so we continue the previous algorithm with:
$$ f(x)={{h}^{H}}{{\Phi }_{y}}h $$
so the result is:
\[h={{\Phi }_{y}}^{-1}{{Q}_{x}}^{'}{{({{Q}_{x}}^{'H}{{\Phi }_{y}}^{-1}{{Q}_{x}}^{'})}^{-1}}{{Q}_{x}}^{'H}{{i}_{i}}\]
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% Q7 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{6.7}
Show that the tradeoff filter can be expressed as
\begin{eqnarray*}
\mathbf{h}_{\mathrm{T},\mu} = \mathbf{\Phi}_{\mathrm{in}}^{-1} \mathbf{Q}_{\mathbf{x}}' \left( \mu \mathbf{\Lambda}_{\mathbf{x}}'^{-1} + \mathbf{Q}_{\mathbf{x}}'^H
\mathbf{\Phi}_{\mathrm{in}}^{-1} \mathbf{Q}_{\mathbf{x}}' \right)^{-1} \mathbf{Q}_{\mathbf{x}}'^H \mathbf{i}_{\mathrm{i}} .
\end{eqnarray*}
\textbf{\uline{Solution}}\textbf{:}
we know that the tradeoff filter is:
\[{{h}_{T,\mu }}={{[{{\Phi }_{x}}+\mu {{\Phi }_{in}}]}^{-1}}{{\Phi }_{x}}{{i}_{i}}\]
we use the eigenvalue decomposition of ${{\Phi }_{x}}$ :
\[{{\Phi }_{x}}={{Q}_{x}}^{'}{{\Lambda }_{x}}^{'}{{Q}_{x}}^{'H}\]
so we get:
\[{{h}_{T,\mu }}={{[{{\Phi }_{x}}+\mu {{\Phi }_{in}}]}^{-1}}{{\Phi }_{x}}{{i}_{i}}={{[\mu {{\Phi }_{in}}+{{Q}_{x}}^{'}{{\Lambda }_{x}}^{'}{{Q}_{x}}^{'H}]}^{-1}}{{Q}_{x}}^{'}{{\Lambda }_{x}}^{'}{{Q}_{x}}^{'H}{{i}_{i}}\]
we will also use the following statement which we prove later:
\[{{(A+VCU)}^{-1}}U={{A}^{-1}}U{{({{C}^{-1}}+V{{A}^{-1}}U)}^{-1}}{{C}^{-1}}\]
which:
A a ${M}\times {M} $ reversible matrix
C a ${{R}_{x}}\times {{R}_{x}}$ reversible matrix
U a ${M}\times {{R}_{x}}$ matrix
V a ${{R}_{x}}\times {m}$ matrix
so we got:
\[
{{h}_{T,\mu }}={{[\mu {{\Phi }_{in}}+{{Q}_{x}}^{'}{{\Lambda }_{x}}^{'}{{Q}_{x}}^{'H}]}^{-1}}{{Q}_{x}}^{'}{{\Lambda }_{x}}^{'}{{Q}_{x}}^{'H}{{i}_{i}}=\frac{1}{\mu }{{\Phi }_{in}}^{-1}{{Q}_{x}}^{'}{{({{\Lambda }_{x}}{{^{'}}^{-1}}+\frac{1}{\mu }{{Q}_{x}}^{'H}{{\Phi }_{in}}^{-1}{{Q}_{x}}^{'})}^{-1}}{{\Lambda }_{x}}{{^{'}}^{-1}}{{\Lambda }_{x}}^{'}{{Q}_{x}}^{'H}{{i}_{i}} \]
\[ {{h}_{T,\mu }}={{\Phi }_{in}}^{-1}{{Q}_{x}}^{'}{{(\mu {{\Lambda }_{x}}{{^{'}}^{-1}}+{{Q}_{x}}^{'H}{{\Phi }_{in}}^{-1}{{Q}_{x}}^{'})}^{-1}}{{Q}_{x}}^{'H}{{i}_{i}} \]
\[
\blacksquare
\]
prove for the statement we used:
\[
{{(A+UCV)}^{-1}}U=({{A}^{-1}}-{{A}^{-1}}U{{({{C}^{-1}}+V{{A}^{-1}}U)}^{-1}}V{{A}^{-1}})U= \]
\[ ={{A}^{-1}}U-{{A}^{-1}}U{{({{C}^{-1}}+V{{A}^{-1}}U)}^{-1}}V{{A}^{-1}}U={{A}^{-1}}U{{({{C}^{-1}}+V{{A}^{-1}}U)}^{-1}}[{{C}^{-1}}+V{{A}^{-1}}U-V{{A}^{-1}}U]= \]
\[ ={{A}^{-1}}U{{({{C}^{-1}}+V{{A}^{-1}}U)}^{-1}}{{C}^{-1}} \]
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% Q8 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{6.8}
Show that the LCMV filter is given by
\begin{eqnarray*}
\mathbf{h}_{\mathrm{LCMV}} =
\mathbf{\Phi}_{\mathrm{in}}^{-1} \mathbf{C}_{\mathbf{x}\mathbf{v}_1} \left( \mathbf{C}_{\mathbf{x}\mathbf{v}_1}^H
\mathbf{\Phi}_{\mathrm{in}}^{-1} \mathbf{C}_{\mathbf{x}\mathbf{v}_1} \right)^{-1} \mathbf{i}_{\mathrm{c}} .
\end{eqnarray*}
\textbf{\uline{Solution}}\textbf{:}
in order to find the LCMV filter we will solve the following minimization:
$ {{\min }_{h}}[{{J}_{n}}(h)+{{J}_{i}}(h)]$ subject to ${{h}^{H}}{{C}_{xv1}}^{'}={{i}_{i}}$
using Lagrange multiplier we define the next function:
$$L(h,\lambda )=f(n)-\lambda g(h)$$
where $\lambda $ is a $1\times {{R}_{x}}$ vector and :
\[ f(h)={{J}_{n}}(h)+{{J}_{i}}(h)={{\Phi }_{vo}}{{h}^{H}}h+{{h}^{H}}{{\Phi }_{v}}h={{h}^{H}}{{\Phi }_{in}}h \]
\[g(h)={{i}_{i}}-{{h}^{H}}{{C}_{xv1}}^{'}\]
now we will find the minimum of L :
\[ \frac{\partial L(h,\lambda )}{\partial h}=2{{\Phi }_{in}}h-{{C}_{xv1}}^{'}{{\lambda }^{T}}=0\to h=\frac{1}{2}{{\Phi }_{in}}^{-1}{{C}_{xv1}}^{'}{{\lambda }^{T}} \]
\[ \frac{\partial L(h,\lambda )}{\partial \lambda }={{h}^{H}}{{C}_{xv1}}^{'}-{{i}_{i}}^{T}{{C}_{xv1}}^{'}=0\to {{h}^{H}}{{C}_{xv1}}^{'}={{i}_{i}}^{T}{{C}_{xv1}}^{'}\to {{C}_{xv1}}^{'H}h={{C}_{xv1}}^{'H}{{i}_{i}} \]
\[ {{C}_{xv1}}^{'H}h=\frac{1}{2}{{C}_{xv1}}^{'H}{{\Phi }_{in}}^{-1}{{C}_{xv1}}^{'}{{\lambda }^{T}}={{C}_{xv1}}^{'H}{{i}_{i}}\to {{\lambda }^{T}}=2{{({{C}_{xv1}}^{'H}{{\Phi }_{in}}^{-1}{{C}_{xv1}}^{'})}^{-1}}{{C}_{xv1}}^{'H}{{i}_{i}} \]
\[ \to {h}_{LCMV}=\frac{1}{2}{{\Phi }_{in}}^{-1}{{C}_{xv1}}^{'}{{\lambda }^{T}}={{\Phi }_{in}}^{-1}{{C}_{xv1}}^{'}{{({{C}_{xv1}}^{'H}{{\Phi }_{in}}^{-1}{{C}_{xv1}}^{'})}^{-1}}{{C}_{xv1}}^{'H}{{i}_{i}}
\]
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% Q10 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{6.10}
Show that the LCMV filter can be expressed as
\begin{eqnarray*}
\mathbf{h}_{\mathrm{LCMV}} =
\mathbf{Q}_{\mathbf{v}_1}'' \mathbf{\Phi}_{\mathrm{in}}'^{-1} \mathbf{Q}_{\mathbf{v}_1}''^H \mathbf{Q}_{\mathbf{x}}'
\left( \mathbf{Q}_{\mathbf{x}}'^H \mathbf{Q}_{\mathbf{v}_1}''
\mathbf{\Phi}_{\mathrm{in}}'^{-1} \mathbf{Q}_{\mathbf{v}_1}''^H \mathbf{Q}_{\mathbf{x}}' \right)^{-1} \mathbf{Q}_{\mathbf{x}}'^H \mathbf{i}_{\mathrm{i}} .
\end{eqnarray*}
\textbf{\uline{Solution}}\textbf{:}
in order to find the LCMV filter a we will solve the following minimization:
$ {{\min }_{h}}[{{J}_{n}}(a)+{{J}_{i}}(a)]$ subject to ${{i}_{i}}^{T}{{Q}_{x}}^{'}={{a}^{H}}{{Q}_{v1}}^{''H}{{Q}_{x}}^{'}$
using Lagrange multiplier we define the next function:
$$L(h,\lambda )=f(n)-\lambda g(h)$$
where $\lambda $ is a $1\times {{R}_{x}}$ vector and :
\[ f(a)={{J}_{n}}(a)+{{J}_{i}}(a)={{\Phi }_{vo}}{{a}^{H}}a+{{a}^{H}}{{\Phi }_{v}}h={{h}^{H}}{{\Phi }_{in}}h \]
\[ g(a)={{i}_{i}}^{T}{{Q}_{x}}^{'}-{{a}^{H}}{{Q}_{v1}}^{''H}{{Q}_{x}}^{'} \]
now we will find the minimum of L :
\[ \frac{\partial L(a,\lambda )}{\partial a}=2{{\Phi }_{in}}a-{{Q}_{v1}}^{''H}{{Q}_{x}}^{'}{{\lambda }^{T}}=0\to a=\frac{1}{2}{{\Phi }_{in}}^{-1}{{Q}_{v1}}^{''H}{{Q}_{x}}^{'}{{\lambda }^{T}} \]
\[ \frac{\partial L(a,\lambda )}{\partial \lambda }=0\to g(a)=0\to {{a}^{H}}{{Q}_{v1}}^{''H}{{Q}_{x}}^{'}={{i}_{i}}^{T}{{Q}_{x}}^{'}\to {{Q}_{v1}}^{''}{{Q}_{x}}{{^{'}}^{H}}a={{Q}_{x}}^{'H}{{i}_{i}} \]
\[ {{Q}_{v1}}^{''}{{Q}_{x}}{{^{'}}^{H}}a=\frac{1}{2}{{Q}_{v1}}^{''}{{Q}_{x}}{{^{'}}^{H}}{{\Phi }_{in}}^{-1}{{Q}_{v1}}^{''H}{{Q}_{x}}^{'}{{\lambda }^{T}}={{Q}_{x}}^{'H}{{i}_{i}}\to {{\lambda }^{T}}=2{{({{Q}_{v1}}^{''}{{Q}_{x}}{{^{'}}^{H}}{{\Phi }_{in}}^{-1}{{Q}_{v1}}^{''H}{{Q}_{x}}^{'})}^{-1}}{{Q}_{x}}^{'H}{{i}_{i}} \]
\[ \to {{a}_{LMCV}}={{\Phi }_{in}}^{-1}{{Q}_{v1}}^{''H}{{Q}_{x}}^{'}{{({{Q}_{v1}}^{''}{{Q}_{x}}{{^{'}}^{H}}{{\Phi }_{in}}^{-1}{{Q}_{v1}}^{''H}{{Q}_{x}}^{'})}^{-1}}{{Q}_{x}}^{'H}{{i}_{i}} \]
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% Q11 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{6.11}
Show that the maximum SINR filter with minimum distortion is given by
\begin{align*}
\mathbf{h}_{\mathrm{mSINR}} &= \frac{\mathbf{t}_1 \mathbf{t}_1^H \mathbf{\Phi}_{\mathbf{x}} \mathbf{i}_{\mathrm{i}}}{\lambda_1} \\
&= \mathbf{t}_1 \mathbf{t}_1^H \mathbf{\Phi}_{\mathrm{in}} \mathbf{i}_{\mathrm{i}} .
\end{align*}
\textbf{\uline{Solution}}\textbf{:}
we know the maximum SINR filter is given by:
\[{{h}_{mSINR}}={{t}_{1}}\varsigma \]
where $\varsigma$ is an arbitrary complex number, determine by solving the following minimization :
\[ {{J}_{d}}({{h}_{mSINR}})={{\Phi }_{x1}}+{{\lambda }_{1}}{{\left| \varsigma \right|}^{2}}-{{\varsigma }^{*}}{{t}_{1}}^{H}{{\Phi }_{x}}{{i}_{i}}-\varsigma {{i}_{i}}^{T}{{\Phi }_{x}}{{t}_{1}} \]
\[ \frac{\partial {{J}_{d}}}{\partial {{\varsigma }^{*}}}=2{{\lambda }_{1}}\varsigma -{{t}_{1}}^{H}{{\Phi }_{x}}{{i}_{i}}-{{({{i}_{i}}^{T}{{\Phi }_{x}}{{t}_{1}})}^{H}}=0 \]
\[ 2{{\lambda }_{1}}\varsigma -{{t}_{1}}^{H}{{\Phi }_{x}}{{i}_{i}}-{{t}_{1}}^{H}{{\Phi }_{x}}{{i}_{i}}=0\to \varsigma =\frac{{{t}_{1}}^{H}{{\Phi }_{x}}{{i}_{i}}}{{{\lambda }_{1}}} \]
so the maximum SINR filter is:
\[{{h}_{sSINR}}=\frac{{{t}_{1}}{{t}_{1}}^{H}{{\Phi }_{x}}{{i}_{i}}}{{{\lambda }_{1}}}\]
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% Q13 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{6.13}
Show that the output SINR can be expressed as
\begin{align*}
\mathrm{oSINR}\left( \mathbf{a} \right) &= \frac{ \mathbf{a}^H \mathbf{\Lambda} \mathbf{a} } { \mathbf{a}^H \mathbf{a} } \\
&= \frac{ \sum_{i=1}^{R_x} \left| a_i \right|^2 \lambda_i }{ \sum_{m=1}^{M} \left| a_m \right|^2 } .
\end{align*}
\textbf{\uline{Solution}}\textbf{:}
let's remember the definition of oSINR:
\[oSINR=\frac{{{h}^{H}}{{\Phi }_{x}}h}{{{h}^{H}}{{\Phi }_{in}}h}\]
where h writed in a basis formed:
$$ h=Ta $$
from (6.83) and (6.84):
\[ {{T}^{H}}{{\Phi }_{x}}T=\Lambda \]
\[ {{T}^{H}}{{\Phi }_{in}}T={{I}_{M}} \]
we use all of that and substituting at the definition of oSINR:
\[ \frac{{{h}^{H}}{{\Phi }_{x}}h}{{{h}^{H}}{{\Phi }_{in}}h}=\frac{{{a}^{H}}{{T}^{H}}{{\Phi }_{x}}Ta}{{{a}^{H}}{{T}^{H}}{{\Phi }_{in}}Ta}=\frac{{{a}^{H}}\Lambda a}{{{a}^{H}}{{I}_{M}}a} \]
\[ \to oSINR=\frac{{{a}^{H}}\Lambda a}{{{a}^{H}}a} \]
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% Q14 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{6.14}
Show that the transformed identity filter, $\mathbf{i}_{\mathbf{T}}$, does not affect the observations, i.e., $z= \mathbf{i}_{\mathbf{T}}^H \mathbf{T}^H \mathbf{y} = y_1$ and $\mathrm{oSINR}\left( \mathbf{i}_{\mathbf{T}} \right) = \mathrm{iSINR}$.
\textbf{\uline{Solution}}\textbf{:}
we know that z is :
\[z={{a}^{H}}{{T}^{H}}y\]
for $a={{i}_{T}}$ we get:
\[ z={{i}_{T}}^{H}{{T}^{H}}y={{({{T}^{-1}}{{i}_{i}})}^{H}}{{T}^{H}}y={{i}_{i}}^{H}{{T}^{-1H}}{{T}^{H}}y={{i}_{i}}y \]
\[ \to z={{y}_{1}} \]
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% Q16 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{6.16}
Show that the MSE can be expressed as
\begin{align*}
J\left( \mathbf{a} \right) = \left( \mathbf{a} - \mathbf{i}_{\mathbf{T}} \right)^H \mathbf{\Lambda}
\left( \mathbf{a} - \mathbf{i}_{\mathbf{T}} \right) + \mathbf{a}^H \mathbf{a} .
\end{align*}
\textbf{\uline{Solution}}\textbf{:}
as we know from (6.83):
\[ {{T}^{H}}{{\Phi }_{x}}T=\Lambda \to {{\Phi }_{x}}={{T}^{H-1}}\Lambda {{T}^{-1}} \]
\[ {{\phi }_{x1}}={{i}_{i}}^{H}{{\Phi }_{x}}{{i}_{i}}\to {{\phi }_{x1}}={{i}_{i}}^{H}{{T}^{H-1}}\Lambda {{T}^{-1}}{{i}_{i}} \]
now we will simplify the MSE from section 6.15:
\[J(a)={{\phi }_{x1}}-{{a}^{H}}\Lambda {{i}_{T}}-{{i}_{T}}\Lambda a+{{a}^{h}}(\Lambda +{{I}_{M}})a\]
as we prove before:
\[ {{\phi }_{x1}}={{i}_{i}}^{H}{{T}^{H-1}}\Lambda {{T}^{-1}}{{i}_{i}}={{({{T}^{-1}}{{i}_{i}})}^{H}}\Lambda \left( {{T}^{-1}}{{i}_{i}} \right) \]
\[ \to {{\phi }_{x1}}={{i}_{T}}^{H}\Lambda {{i}_{T}} \]
\[ \to J(a)={{\phi }_{x1}}-{{a}^{H}}\Lambda {{i}_{T}}-{{i}_{T}}\Lambda a+{{a}^{h}}(\Lambda +{{I}_{M}})a={{i}_{T}}^{H}\Lambda {{i}_{T}}-{{a}^{H}}\Lambda {{i}_{T}}-{{i}_{T}}\Lambda a+{{a}^{h}}\Lambda a+{{a}^{h}}{{I}_{M}}a \]
\[ ={{a}^{H}}\Lambda (a-{{i}_{T}})-{{i}_{T}}^{H}\Lambda (a-{{i}_{T}})+{{a}^{H}}a=({{a}^{H}}\Lambda -{{i}_{T}}^{H}\Lambda )(a-{{i}_{T}})+{{a}^{H}}a= \]
\[ =({{a}^{H}}-{{i}_{T}}^{H})\Lambda (a-{{i}_{T}})+{{a}^{H}}a={{(a-{{i}_{T}})}^{H}}\Lambda (a-{{i}_{T}})+{{a}^{H}}a \]
\[ J(a)={{(a-{{i}_{T}})}^{H}}\Lambda (a-{{i}_{T}})+{{a}^{H}}a \]
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% Q17 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{6.17}
Show that the maximum SINR filter with minimum MSE is given by
\begin{eqnarray*}
\mathbf{h}_{\mathrm{mSINR,2}} = \frac{\lambda_1}{1+\lambda_1} \mathbf{t}_1 \mathbf{t}_1^H \mathbf{\Phi}_{\mathrm{in}} \mathbf{i}_{\mathrm{i}} .
\end{eqnarray*}
\textbf{\uline{Solution}}\textbf{:}
first of all we know from the definition of T :
\[ (1).T{{i}_{i}}={{t}_{1}} \]
\[ (2).{{T}^{H}}{{\Phi }_{in}}T={{I}_{M}} \]
\[ \to {{i}_{i}}^{T}={{i}_{i}}^{T}{{I}_{M}}={{i}_{i}}^{T}{{T}^{H}}{{\Phi }_{in}}T={{(T{{i}_{i}})}^{H}}{{\Phi }_{in}}T={{t}_{1}}^{H}{{\Phi }_{in}}T \]
\[ \to {{i}_{i}}^{T}{{T}^{-1}}={{t}_{1}}^{H}{{\Phi }_{in}}T{{T}^{-1}}={{t}_{1}}^{H}{{\Phi }_{in}} \]
as we know about ${{a}_{mSINR}}$ and the conclusions we shown before:
\[ {{a}_{mSINR}}=\frac{{{\lambda }_{1}}}{1+{{\lambda }_{1}}}{{i}_{i}}{{i}_{i}}^{T}{{T}^{-1}}{{i}_{i}}=\frac{{{\lambda }_{1}}}{1+{{\lambda }_{1}}}{{i}_{i}}{{t}_{1}}^{H}{{\Phi }_{in}}{{i}_{i}} \]
\[ {{h}_{mSINR}}=T{{a}_{mSINR}}=\frac{{{\lambda }_{1}}}{1+{{\lambda }_{1}}}T{{i}_{i}}{{t}_{1}}^{H}{{\Phi }_{in}}{{i}_{i}} \]
now we use the identity (1) that we mention earlier:
\[{{h}_{mSINR}}=\frac{{{\lambda }_{1}}}{1+{{\lambda }_{1}}}{{t}_{1}}{{t}_{1}}^{H}{{\Phi }_{in}}{{i}_{i}}\]
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% Q19 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{6.19}
Show that the Wiener filter can be expressed as
\begin{eqnarray*}
\mathbf{h}_{\mathrm{W}} = \sum_{i=1}^{R_x} \frac{\lambda_i}{1+\lambda_i}
\mathbf{t}_i \mathbf{t}_i^H \mathbf{\Phi}_{\mathrm{in}} \mathbf{i}_{\mathrm{i}} .
\end{eqnarray*}
\textbf{\uline{Solution}}\textbf{:}
first of all we know from the definition of T :
\[ (1).T{{i}_{i}}={{t}_{1}} \]
\[ (2).{{T}^{H}}{{\Phi }_{in}}T={{I}_{M}} \]
\[ \to {{i}_{i}}^{T}={{i}_{i}}^{T}{{I}_{M}}={{i}_{i}}^{T}{{T}^{H}}{{\Phi }_{in}}T={{(T{{i}_{i}})}^{H}}{{\Phi }_{in}}T={{t}_{1}}^{H}{{\Phi }_{in}}T \]
\[ \to {{i}_{i}}^{T}{{T}^{-1}}={{t}_{1}}^{H}{{\Phi }_{in}}T{{T}^{-1}}={{t}_{1}}^{H}{{\Phi }_{in}} \]
as we know about ${{a}_{W}}$ and the conclusions we shown before:
\[ {{a}_{W}}=\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}{{i}_{i}}{{i}_{i}}^{T}{{T}^{-1}}{{i}_{i}}}=\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}{{i}_{i}}{{t}_{1}}^{H}{{\Phi }_{in}}{{i}_{i}}} \]
\[ {{h}_{w}}=T{{a}_{W}}=T\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}{{i}_{i}}{{t}_{1}}^{H}{{\Phi }_{in}}{{i}_{i}}}=\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}T{{i}_{i}}{{t}_{1}}^{H}{{\Phi }_{in}}{{i}_{i}}} \]
now we use the identity (1) that we mention earlier:
\[{{h}_{w}}=\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}{{t}_{1}}{{t}_{1}}^{H}{{\Phi }_{in}}{{i}_{i}}}\]
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% Q20 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{6.20}
Show that with the Wiener filter $\mathbf{h}_{\mathrm{W}}$, the MMSE is given by
\begin{align*}
J\left( \mathbf{h}_{\mathrm{W}} \right) &= \mathbf{i}_{\mathbf{T}}^H \mathbf{\Lambda} \mathbf{i}_{\mathbf{T}} -
\sum_{i=1}^{R_x} \frac{\lambda_i^2}{1+\lambda_i} \left| \mathbf{i}_{\mathbf{T}}^H \mathbf{i}_i \right|^2 \\
&= \sum_{i=1}^{R_x} \frac{\lambda_i}{1+\lambda_i} \left| \mathbf{i}_{\mathbf{T}}^H \mathbf{i}_i \right|^2 .
\end{align*}
\textbf{\uline{Solution}}\textbf{:}
As was shown before:
\[J({{h}_{W}})=J({{a}_{W}})\]
we also know :
\[ {{a}_{W}}=\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}}{{i}_{i}}{{i}_{i}}^{T}{{i}_{T}} \]
\[ {{a}_{W}}^{H}=\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}}{{i}_{T}}^{H}{{i}_{i}}{{i}_{i}}^{T} \]
so we will calculate $J({{a}_{W}})$ :
\[ J({{a}_{W}})={{({{a}_{w}}-{{i}_{T}})}^{H}}\Lambda ({{a}_{w}}-{{i}_{T}})+{{a}_{W}}^{H}{{a}_{W}}= \]
\[ ={{i}_{T}}^{H}\Lambda {{i}_{T}}+{{a}_{W}}^{H}\Lambda {{a}_{W}}-{{i}_{T}}^{H}\Lambda {{a}_{W}}-{{a}_{W}}^{H}\Lambda {{i}_{T}}+{{a}_{W}}^{H}{{a}_{W}} \]
now let's calculate each part separately:
\[ {{a}_{W}}^{H}{{a}_{W}}=\sum\limits_{i=1}^{{{R}_{X}}}{(\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}}{{)}^{2}}{{i}_{T}}^{H}{{i}_{i}}{{i}_{i}}^{T}{{i}_{i}}{{i}_{i}}^{T}{{i}_{T}}=\sum\limits_{i=1}^{{{R}_{X}}}{(\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}}{{)}^{2}}{{\left| {{i}_{T}}^{H}{{i}_{i}} \right|}^{2}} \]
\[ {{i}_{T}}^{H}\Lambda {{a}_{W}}=\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}}{{i}_{T}}^{H}\Lambda {{i}_{i}}{{i}_{i}}^{T}{{i}_{T}}=\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}^{2}}{1+{{\lambda }_{i}}}}{{i}_{T}}^{H}{{i}_{i}}{{i}_{i}}^{T}{{i}_{i}}{{i}_{i}}^{T}{{i}_{T}}=\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}^{2}}{1+{{\lambda }_{i}}}}{{\left| {{i}_{T}}^{H}{{i}_{i}} \right|}^{2}} \]
\[ {{a}_{W}}^{H}\Lambda {{i}_{T}}=\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}}{{i}_{T}}^{H}{{i}_{i}}{{i}_{i}}^{T}\Lambda {{i}_{T}}=\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}^{2}}{1+{{\lambda }_{i}}}}{{i}_{T}}^{H}{{i}_{i}}{{i}_{i}}^{T}{{i}_{i}}{{i}_{i}}^{T}{{i}_{T}}=\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}^{2}}{1+{{\lambda }_{i}}}}{{\left| {{i}_{T}}^{H}{{i}_{i}} \right|}^{2}} \]
\[ {{a}_{W}}^{H}\Lambda {{a}_{W}}=\sum\limits_{i=1}^{{{R}_{X}}}{(\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}}{{)}^{2}}{{\lambda }_{i}}{{i}_{T}}^{H}{{i}_{i}}{{i}_{i}}^{T}{{i}_{i}}{{i}_{i}}^{T}{{i}_{T}}=\sum\limits_{i=1}^{{{R}_{X}}}{(\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}}{{)}^{2}}{{\lambda }_{i}}{{\left| {{i}_{T}}^{H}{{i}_{i}} \right|}^{2}} \]
We will put everything into our expression:
\[ J({{a}_{W}})={{i}_{T}}^{H}\Lambda {{i}_{T}}+\sum\limits_{i=1}^{{{R}_{X}}}{\left( {{\left( \frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}} \right)}^{2}}+{{\lambda }_{i}}{{\left( \frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}} \right)}^{2}}-2\frac{{{\lambda }_{i}}^{2}}{1+{{\lambda }_{i}}} \right)}{{\left| {{i}_{T}}^{H}{{i}_{i}} \right|}^{2}}={{i}_{T}}^{H}\Lambda {{i}_{T}}+\sum\limits_{i=1}^{{{R}_{X}}}{\left( {{\left( \frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}} \right)}^{2}}(1+{{\lambda }_{i}})-2(1+{{\lambda }_{i}}){{\left( \frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}} \right)}^{2}} \right)}{{\left| {{i}_{T}}^{H}{{i}_{i}} \right|}^{2}}= \]
\[ ={{i}_{T}}^{H}\Lambda {{i}_{T}}+\sum\limits_{i=1}^{{{R}_{X}}}{\left( \frac{{{\lambda }_{i}}^{2}}{1+{{\lambda }_{i}}}-2\frac{{{\lambda }_{i}}^{2}}{1+{{\lambda }_{i}}} \right)}{{\left| {{i}_{T}}^{H}{{i}_{i}} \right|}^{2}}={{i}_{T}}^{H}\Lambda {{i}_{T}}-\sum\limits_{i=1}^{{{R}_{X}}}{\left( \frac{{{\lambda }_{i}}^{2}}{1+{{\lambda }_{i}}} \right)}{{\left| {{i}_{T}}^{H}{{i}_{i}} \right|}^{2}} \]
finally let's simplify the expression:
\[J({{h}_{W}})={{i}_{T}}^{H}\Lambda {{i}_{T}}-\sum\limits_{i=1}^{{{R}_{X}}}{\left( \frac{{{\lambda }_{i}}^{2}}{1+{{\lambda }_{i}}} \right)}{{\left| {{i}_{T}}^{H}{{i}_{i}} \right|}^{2}}=\sum\limits_{i=1}^{{{R}_{X}}}{{{\lambda }_{i}}{{\left| {{i}_{T}}^{H}{{i}_{i}} \right|}^{2}}}-\sum\limits_{i=1}^{{{R}_{X}}}{\left( \frac{{{\lambda }_{i}}^{2}}{1+{{\lambda }_{i}}} \right)}{{\left| {{i}_{T}}^{H}{{i}_{i}} \right|}^{2}}=\sum\limits_{i=1}^{{{R}_{X}}}{\left( {{\lambda }_{i}}-\frac{{{\lambda }_{i}}^{2}}{1+{{\lambda }_{i}}} \right)}{{\left| {{i}_{T}}^{H}{{i}_{i}} \right|}^{2}}=\sum\limits_{i=1}^{{{R}_{X}}}{\frac{{{\lambda }_{i}}}{1+{{\lambda }_{i}}}}{{\left| {{i}_{T}}^{H}{{i}_{i}} \right|}^{2}}\].
\[
\blacksquare
\]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%% Q22 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection*{6.22}
Show that the class of filters $\mathbf{a}_Q$ compromises in between large values of the output SINR and small values of the MSE, i.e.,
\[ (a)iSNR\le oISNR({{a}_{{{R}_{X}}}})\le oISNR({{a}_{{{R}_{X}}-1}})\le \cdots \le oISNR({{a}_{1}})={{\lambda }_{1}} \]
\[ (b)J({{a}_{{{R}_{X}}}})\le J({{a}_{{{R}_{X}}-1}})\le \cdots \le J({{a}_{1}}) \]
\textbf{\uline{Solution}}\textbf{:}
first of all we will use the following property:
\\
Let ${{\lambda }_{1}}\ge {{\lambda }_{2}}\ge \cdot \cdot \cdot \ge {{\lambda }_{M}}\ge 0$
\[\frac{\sum\limits_{i=1}^{M}{{{\left| {{a}_{i}} \right|}^{2}}{{\lambda }_{i}}}}{\sum\limits_{i=1}^{M}{{{\left| {{a}_{i}} \right|}^{2}}}}\le \frac{\sum\limits_{i=1}^{M-1}{{{\left| {{a}_{i}} \right|}^{2}}{{\lambda }_{i}}}}{\sum\limits_{i=1}^{M-1}{{{\left| {{a}_{i}} \right|}^{2}}}}\le \cdot \cdot \cdot \le \frac{\sum\limits_{i=1}^{2}{{{\left| {{a}_{i}} \right|}^{2}}{{\lambda }_{i}}}}{\sum\limits_{i=1}^{2}{{{\left| {{a}_{i}} \right|}^{2}}}}\le {{\lambda }_{1}}\]
now we can define a class of filters that have the following form:
\[{{a}_{Q}}=\sum\limits_{q=1}^{Q}{\frac{{{\lambda }_{q}}}{1+{{\lambda }_{q}}}{{i}_{q}}{{i}_{q}}^{T}{{T}^{-1}}{{i}_{i}}}\]
where $1\le Q\le {{R}_{X}}$
we can easly see:
\[ {{h}_{1}}={{h}_{mSINR,2}} \]
\[ {{h}_{{{R}_{X}}}}={{h}_{W}} \]
from the property we shown earlier it is easy to see that:
\[iSNR\le oSNR({{a}_{{{R}_{X}}}})\le oSNR({{a}_{{{R}_{X}}-1}})\le \cdots \le oSNR({{a}_{1}})={{\lambda }_{1}}\]
\[
\blacksquare
\]
now it is easy to compute the MSE:
\[J({{a}_{Q}})={{i}_{T}}^{H}\Lambda {{i}_{T}}-\sum\limits_{q=1}^{Q}{\frac{{{\lambda }_{q}}^{2}}{1+{{\lambda }_{q}}}}{{\left| {{i}_{T}}^{H}{{i}_{q}} \right|}^{2}}=\sum\limits_{q=1}^{Q}{\frac{{{\lambda }_{q}}^{2}}{1+{{\lambda }_{q}}}}{{\left| {{i}_{T}}^{H}{{i}_{q}} \right|}^{2}}+\sum\limits_{i=Q+1}^{{{R}_{X}}}{{{\lambda }_{i}}}{{\left| {{i}_{T}}^{H}{{i}_{q}} \right|}^{2}}\]
so finally we can deduce that:
\[J({{a}_{{{R}_{X}}}})\le J({{a}_{{{R}_{X}}-1}})\le \cdots \le J({{a}_{1}})\]
\[
\blacksquare
\]
\end{document}