\documentclass[12pt]{article}
\def\rit{ \hbox{\it I\hskip -2pt R} }
\topmargin=-0.5in
\textheight=8.5in
\begin{document}
{\bf Math 273}: {\bf Homework \#2, due on Monday, October 25}
\medbreak
\noindent{\bf [1]} Consider the minimization problem
$$\inf_{u}F(u)=\int_{x_0}^{x_1} L(x,u(x),u'(x),u''(x))dx,$$
with $u(x_0)=u_0$, $u(x_1)=u_1$, $u'(x_0)=U_0$, $u'(x_1)=U_1$ given, and $L$ is a sufficiently smooth
function. Obtain the Euler-Lagrange equation of the minimization problem that is satisfied by a smooth optimal $u$. Choose test functions $v$ in $C^{\infty}[x_0,x_1]$ that satisfy $v(x_0)=v(x_1)=v'(x_0)=v'(x_1)=0$, and proceed as in HW1, problem [5] (you should obtain a fourth-order differential equation).
\medbreak
\noindent{\bf [2]} Consider the 1D length functional minimization problem
$$\mbox{Min}_u F(u)=\int_{0}^1 L(u'(x))dx, \mbox{ or }\mbox{Min}_{u}\int_0^1 \sqrt{1+(u'(x))^2}dx,
$$
over functions $u:[0,1]\rightarrow \rit$ with boundary conditions $u(0)=0$, $u(1)=1$.
(a) Find the exact solution of the problem.
(b) Show that the functional $u\mapsto F(u)$ is convex.
(c) Consider a discrete version of the problem: let
$$x_0=0 -\infty $
(ii) $A\geq O$ and $b\in \mbox{Im} A$.
(iii) the problem $\inf\{q(x):\ x\in \rit^n\} > -\infty $ has a solution
\noindent are equivalent. When they hold, characterize the set of minimum points of $q$, in terms of the pseudo-inverse of $A$.
\medbreak
\noindent{\bf [4]} Recall the BFGS update formula for the Hessian approximation:
$$B_{k+1}=B_k-\frac{B_ks_ks_k^tB_k}{s_k^tB_ks_k}+\frac{y_ky_k^t}{y_k^ts_k}$$
(where $B_k$ is symmetric and positive definite), and the formula to directly update the inverse of Hessian approximation:
$$H_{k+1}=(I-\rho_ks_ky_k^t)H_k(I-\rho_ky_ks_k^t)+\rho_ks_ks_k^t$$
(where $H_k$ is symmetric and positive definite, as inverse of $B_k$, and $\rho_k=\frac{1}{y_k^ts_k}$).
Using the following Sherman-Morrison-Woodbury formula, show that $H_{k+1}$ is the inverse of $B_{k+1}$.
If $A$ is an $n\times n$ nonsingular matrix, and $a,b$ vectors in $\rit^n$, let $\overline{A}=A+ab^t$. Then the following (SMW) formula holds:
$$\mbox{(SMW)} \ \ \ \ \ \ \ \overline{A}^{-1}=A^{-1}-\frac{A^{-1}ab^tA^{-1}}{1+b^tA^{-1}a}.$$
{\bf Notes:}
\medbreak
$\bullet$ If $A$ is a symmetric (or self-adjoint) linear operator on $X$, then
$\mbox{Im} A^{\perp}=\mbox{Ker}A$. Let $p_{\mbox{Im}A}$ be the operator of orthogonal projection onto $\mbox{Im}A$. For given $y\in X$, there is a unique $x=x(y)$ in $\mbox{Im}A$ such that $Ax=p_{\mbox{Im}A}y$. Forthermore, the mapping $y\mapsto x(y)$ is linear. This mapping is called the pseudo-inverse, or generalized inverse of $A$.
\medbreak
$\bullet$ Let $\Omega$ be an open and bounded subset of $R^d$, with Lipschitz-continuous (or sufficiently smooth) boundary $\partial\Omega$. Let $\vec{n}=(n_1,n_2,...,n_d)$ be the exterior unit normal to $\partial\Omega$.
Recall the following fundamental Green's formula, or integration by parts formula: given two functions $u,v$ (with $u$, $v$, and all their 1st order partial derivatives belonging to $L^2(\Omega)$, or $u,v\in H^1(\Omega)$), then
$$\int_{\Omega}u v_{x_i} dx=-\int_{\Omega} u_{x_i} v dx +\int_{\partial\Omega} u v n_i dS.$$
\end{document}