\documentclass[11pt]{article}
\usepackage{verbatim}
\usepackage{amsmath,amsfonts}
\usepackage[mathscr]{eucal}
\usepackage{amssymb,amsmath}
\usepackage{latexsym}
\usepackage{amsfonts}
\usepackage{amscd}
\usepackage{amsthm}
\newcommand {\C}{\mathbb {C}}
\newcommand {\R}{\mathbb {R}}
\newcommand {\T}{\mathbb {T}}
\newcommand {\D}{\mathcal {D}}
\newcommand {\I}{\mathcal {I}}
\newcommand {\G}{\mathcal {G}}
\newcommand {\N}{\mathcal {N}}
\newcommand {\Z}{\mathbb {Z}}
\topmargin=-0.20in
\textheight=8.5in
\begin{document}
\noindent{\bf Math 273}:
\noindent{\bf Homework \#5: due on Dec. 3rd or during the week of finals, and no later than December 10. You can leave your homework at my office, in my mailbox, or with Babette Dalton from MS 8719.}
{\bf [1]} Assume that $V$ and $V^*$ are normed vector spaces in duality. Let
$F:V\rightarrow \overline{\R}$ and let $F^*:V^*\rightarrow \overline{\R}$ be the polar or conjugate of $F$. We define $F^*$ by
$$F^*(u^*)=\sup_{u\in V}\Big\{\langle u^*,u\rangle-F(u)\Big\}.$$
Show
(i) $F^*(0)=-inf _{u\in V} F(u)$.
(ii) $(\lambda F)^*(u^*)=\lambda F^*(\frac{1}{\lambda}u^*)$ for every $\lambda >0$.
\medbreak
{\bf [2]} Let $V=V^*=\R^n$. Let $Q$ be a symmetric positive definite $n\times n$ matrix, $b\in\R^n$, and consider $f(x):=\frac{1}{2}\langle x,Qx\rangle+\langle b,x\rangle$, for all $x\in\R^n$. Find the polar (or the conjugate) $f^*$ and deduce that, in particular, $\frac{1}{2}\|\cdot\|^2$ is its own polar (or conjugate).
\medbreak
{\bf [3]} Let $F:V\rightarrow \R$ and $F^*$ its polar. Then $u^*\in \partial F(u)$ if and only if $F(u)+F^*(u^*)=\langle u^*,u\rangle$.
\medbreak
{\bf [4]} Show that the polar $F^*$ is convex.
\medbreak
{\bf [5]} Let $F:\R^n\rightarrow\R$ be continuous and coercive with respect to the Euclidean norm $\|x\|$, for $x\in\R^n$. Then there is $x_0\in\R^n$ such that $F(x_0)=\inf_{x\in \R^n} F(x)$. If in addition $F$ is strictly convex, then the minimizer $x_0$ is unique.
\medbreak
{\bf [6]} Let $f\in \R^{N^2}$ be given, and let $u\in\R^{N^2}$ be an unknown minimizer of the functional (already seen before)
$$E(w)=\sum_{i,j=0}^{N-1}|\nabla w_{i,j}|^2+\lambda \sum_{i,j=0}^{N-1}(w_{i,j}-f_{i,j})^2,$$
for $w\in \R^{N^2}$, where
$$\nabla w_{i,j}=\left(
\begin{array}{cc}
(D_xw)_{i,j}\\
(D_yw)_{i,j}
\end{array}
\right)=
\left(
\begin{array}{cc}
w_{i+1,j}-w_{i,j}\\
w_{i,j+1}-w_{i,j}
\end{array}
\right),
$$
for $(i,j)\in \{0,...,N-1\}^2$ (we assume that all vectors $f$, $u$, $w$ are periodic outside of their support).
(a) Find the adjoint operators $D_x^*$ and $D^*_y$ of $D_x$ and $D_y$.
(b) Find a linear operator $B:\R^{N^2}\rightarrow\R^{N^2}$, a $c\in \R^{N^2}$, and $C(f)$, independent of $w$, such that for all $w\in \R^{N^2}$,
$$E(w)=\langle Bw,w\rangle+\langle c,w\rangle+C(f).$$
(c) Show that $B$ is self-adjoint.
(d) Find the Gateaux differential of $E(w)$ in the direction $v$ and thus give a necessary (and sufficient) condition for $u$ to be a minimizer, by setting this differential to zero (as the zero functional).
\bigbreak
\noindent{\bf Optional Problems}
\medbreak
{\bf [7]} Note that a function $F:V\mapsto \overline{\R}$, with $V$ a normed vector space, is lower semi-continuous (l.s.c.) on $V$ by the equivalent definition:
$$\forall a\in \R: \ \ \Big\{u\in V| \ F(u)\leq a\Big\} \mbox{ is closed.}$$
Using this, show that $F$ is l.s.c. iff its epigraph is closed (hint: consider
the function on $V\times \R$ defined by $f(u,a)=F(u)-a$).
\medbreak
{\bf [8]} Show that the function
$$f(x)=\frac{\|Ax-b\|_2^2}{1-x^tx}$$
is convex on $\{x: \ \|x\|_2\leq 1\}$ (hint: show that its epigraph is a convex set).
\medbreak
{\bf [9]} Infimal convolution. Let $f_1$, . . . , $f_m$ be convex functions on $\R^n$. Their infimal convolution,
denoted $g = f_1 \circ ... \circ f_m$
(several other notations are also used), is defined as
$g(x) = \inf{f_1(x_1) + ... + f_m(x_m) | x_1 + ... + x_m = x}$,
with the natural domain (i.e., defined by $g(x) < \infty$).
(The name ``convolution'' presumably comes from the
observation that if we replace the sum above with the product, and the infimum above with integration, then we obtain the normal convolution.)
(a) Show that $g$ is convex.
(b) Show that $g^*= f_1^*+...+f_m^*$.
\medbreak
{\bf [10]} {\it Approximate total variation denoising}
Let $y\in \R^n$ be a given (corrupted, noisy) vector, $x\in \R^n$ is the denoised vector to be computed by minimizing the 1D-discrete TV function with data fidelity term:
$$f(x)=\|x-y\|^2_2 + \lambda \sum_{i=1}^{n-1}|x_{i+1}-x_i|.$$
We can make the second term twice differentiable by approximation with regularization:
$$TV(x)=\sum_{i=1}^{n-1}|x_{i+1}-x_i| \approx \sum_{i=1}^{n-1}\Big(\sqrt{\epsilon^2 +(x_{i+1}-x_i)^2}-\epsilon\Big),$$
where $\epsilon>0$ is a small parameter.
Apply gradient descent method and Newton's method for the minimization using the approximate formula for TV (simlar problem with the previous length minimization). Define a noisy vector $y$ as data.
\bigbreak
{\bf [11]} Solve a constrained optimization problem of your choice using the Augmented Lagrangean Method.
\end{document}