\documentclass[twoside]{article}
\usepackage{amssymb} % used for R in Real numbers
\pagestyle{myheadings}
\markboth{\hfil On multi-lump solutions \hfil EJDE--1998/29}
{EJDE--1998/29\hfil Robert Magnus\hfil}
\begin{document}
\title{\vspace{-1in}\parbox{\linewidth}{\footnotesize\noindent
{\sc Electronic Journal of Differential Equations},
Vol. {\bf 1998}(1998), No.~29, pp. 1--24. \newline
ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu
\newline ftp 147.26.103.110 or 129.120.3.113 (login: ftp)}
\vspace{\bigskipamount} \\
On multi-lump solutions to the non-linear Schr\"{o}dinger equation
\thanks{ {\em 1991 Mathematics Subject Classifications:} 35J60, 35Q55.
\hfil\break\indent
{\em Key words and phrases:} Non-linear Schr\"{o}dinger equation,
semi-classical bound state, \hfil\break\indent nonlinear-elliptic equation.
\hfil\break\indent
\copyright 1998 Southwest Texas State University and University of
North Texas. \hfil\break\indent
Submitted June 23, 1998. Published November 15, 1998.} }
\date{}
\author{Robert Magnus}
\maketitle
\begin{abstract}
We present a new approach to proving the existence of semi-classical bound
states of the non-linear Schr\"{o}dinger equation which are concentrated near
a finite set of non-degenerate critical points of the potential function.
The method is based on considering a system of non-linear elliptic equations.
The positivity of the solutions is considered. It is shown how the same method
yields ``multi-bump'' solutions ``homoclinic'' to an equilibrium point for
non-autonomous Hamiltonian equations. The method provides a calculable
asymptotic form for the solutions in terms of a small parameter.
\end{abstract}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}{Lemma}
\section{Introduction}
In this paper we study a system of non-linear elliptic equations which can yield
the existence of multilump solutions to the non-linear Schr\"{o}dinger equation
(NLS)
$$
i\hbar \frac{\partial \psi}{\partial t} = -\frac{\hbar^2}{2}\Delta \psi
+V(x)\psi -\gamma |\psi|^{p-1}\psi\,.
$$
If we seek standing wave solutions
over the whole of $n$-dimensional Euclidean space ${\mathbb R}^n$
of the form $\psi = e^{-iEt/\hbar}
v(x)$, we find that the function $v(x)$ satisfies the equation
$$
-\frac{\hbar^2}{2}\Delta v +(V(x)-E)v - \gamma|v|^{p-1}v=0.
$$
Now set $\hbar^2/2=\epsilon^2$, rename $V(x)-E$ as $V(x)$
and put $\gamma=1$ to obtain the equation
$$
-\epsilon^2\Delta v + V(x)v-|v|^{p-1}v=0\,. \eqno{(1)}
$$
We shall assume that $V(x)$ is bounded from below by a positive constant.
Floer and A. Weinstein \cite{FW} showed that, given a
non-degenerate critical point $b$ of $V(x)$, equation (1) has a
positive solution for all sufficiently small $\epsilon$ which
concentrates at $b$, in the sense that, as $\epsilon\to 0$ the
solution tends to 0 uniformly in the complement of any given
neighbourhood of $b$. (We omit some technical conditions on $V(x)$
which the reader can look up in \cite{FW}). Y.~G.~Oh \cite{Oh1}
showed that a similar result holds in which $b$ is replaced by
finitely many non-degenerate critical points $b_1$, \dots, $b_m$.
These are the multi-lump (-bump or -hump) solutions. Oh showed that
they are positive, and furthermore, that the corresponding standing wave
solution is unstable if there is more than one hump.
The method used by Oh is a generalization of the method of Floer and
Weinstein. Although the main idea, based on Liapunov-Schmidt
splitting, is simple enough, the details are rather difficult,
involving many subtle estimates. In this paper we propose an alternative
method. This has several points of contact with the previous method,
but we hope the reader will agree that it is somewhat simpler.
Moreover it provides a computable asymptotic form for the solution.
This will be clarified in the course of the paper.
In a recent paper \cite{ABC}, Ambrosetti, Badiale and Cingolani showed
how to obtain single-hump states by an attractive method which is
simpler than that of Floer and Weinstein. It is not clear whether
their method can be used to prove the results presented in this paper.
A number of treatments have appeared based on variational principles
and they typically do not require non-degeneracy of the critical
points of the potential function \cite{Gui1, Gui2, PFP1, PFP2, Rab}.
As in previous approaches we transform the independent variable,
setting $y= x/\epsilon$. Thus, renaming $y$ as $x$ and dropping the
absolute-value signs, we have the equation in the form in which we shall
treat it
$$
-\Delta v +V(\epsilon x)v -v^p=0. \eqno{(2)}
$$
Let $b_1$, \dots, $b_m$ be non-degenerate critical points of $V(x)$.
Now we seek solutions that are concentrated near
to the points $b_1/\epsilon$, \dots, $b_m/\epsilon$.
These points draw apart as
$\epsilon$ tends to 0. Let $v_1$, \dots, $v_m$ be approximate
single-hump solutions with
$v_k$ concentrated near to $b_k/\epsilon$. Then approximately
$$
-\Delta\Big(\sum v_i\Big) +V(\epsilon x)\Big(\sum v_i\Big)
-\Big(\sum v_i\Big)^p \approx \sum \Big({-\Delta v_i} +V(\epsilon x)v_i
-v_i^p\Big)\approx 0\,.
$$
for small $\epsilon$. We have approximate additivity of the non-linear
operator as the products of the $m$ functions
$v_1$, \dots, $v_m$ grow small due to their maxima drawing apart.
We exploit this by writing $m$ equations, one for each $v_i$, and
coupling them by products of the variables $v_1,\ldots,v_m$, in such
a way that the sum $v_1+\cdots+ v_m$ satisfies (2).
In fact there is an obvious way to write $m$ equations so that the sum
$v_1+\cdots+ v_m$ satisfies (2), namely
$$
-\Delta v_i +V(\epsilon x)v_i -\Big(\sum_{k=1}^m v_k\Big)^{p-1} v_i
=0, \quad i=1,\ldots,m.
$$
Unfortunately there is a hidden degeneracy here which causes technical
problems (see section 2.1 and the condition $ND$).
These can be overcome by distributing the polynomial
$\Big(\sum_{k=1}^m v_k\Big)^{p}$ in a different fashion over the
right-hand sides of the $m$ equations.
We propose therefore to study a system of elliptic equations
$$
-\Delta v_i +V(\epsilon x)v_i -G_i(v)=0, \quad i=1,\ldots,m,
$$
where $v=(v_1,\ldots,v_m)$, and the functions $G_i$ are homogeneous
polynomials of degree $p$ in the $m$ variables $v_1$, \dots, $v_m$
with real coefficients.
The results which we obtain concern existence of
solutions of such systems. We also investigate the positivity
of the solutions in a systematic way.
To obtain multilump solutions of the
non-linear Schr\"{o}dinger equation we can choose the functions
$G_i$ so that
$$
\sum_{i=1}^m G_i(v)= \Big(\sum_{i=1}^m v_i\Big)^p
$$
and such that the non-degeneracy condition referred to above is
satisfied.
We are going to assume a polynomial non-linearity throughout this
paper. This means that $p$ will be an integer greater than 1, and
imposes some restriction as we will also need the upper bound $p <
(n+2)/(n-2)$. The reason for taking $p$ to be an integer is to
facilitate an algebraic treatment of functions of sums of the form
$G_i(u+v)$. It is likely that a homogeneous non-linearity of
non-integer degree can be treated by an analytic method (similar to a
Taylor expansion). We shall not attempt this here.
However it means that $p$ is not restricted from above if $n=1$ or
2; for $n=3$ we have $p=2$, 3 or 4; for $n=4$ or 5 we have only $p=2$;
while for $n>5$ there are no cases.
We shall show how the same approach can be used to study the equation
$$
-\Delta v +v-(1+\epsilon h(x))v^p=0
$$
where $h$ is a bounded measurable function.
We view this as a small perturbation of
$$
-\Delta v +v-v^p=0\,,
$$
which possesses a spherically symmetric positive
solution $\phi$ in the space $H^2({\mathbb R}^n)$.
All translates of the function $\phi$ are solutions. However the
perturbation breaks the translational symmetry of the equation and by
\cite{Mag3} we
should look for solutions near to a translate $\phi(x-c)$ where $c\in
{\mathbb R}^n$ is a critical point of the function
$$
F(s)= \int h(x)\phi(x+s)^{p+1}\,d^nx.
$$
Such a solution has one hump. We assume that $h$ is periodic and
find solutions near a linear combination of translates $\sum_{i=1}^m
\phi(x-c_i)$ provided the separations $\|c_i-c_j\|$ are large enough.
In one dimension ($n=1$) these are the homoclinic solutions
investigated in \cite{ZR} and \cite{Sere} by variational methods.
Our solutions are like homoclinic solutions in that they decay at
infinity and we are able to give calculable
asymptotic forms.
Here is a brief summary of the contents of the sections. In section~2
we study the existence of solutions to the system of elliptic
equations; in section~3 we apply this to the non-linear
Schr\"{o}dinger equation; in section 4 we consider positivity of the
solutions of the system of elliptic equations studied in section~2,
with an application to the non-linear Schr\"{o}dinger equation; in
section~5 we study multi-dimensional ``homoclinics''; in section~6 we provide
a few technical results needed in previous sections,
including a version of the implicit
function theorem adapted to our needs.
\section{A system of equations}
\subsection{Hypotheses and statement of theorem}
We consider the system
$$
-\Delta v_i +V(\epsilon x)v_i -G_i(v)=0, \quad i=1,\ldots,m
\eqno{(3)}
$$
where $v=(v_1,\ldots,v_m)$ and the functions $G_i$ are homogeneous
polynomials of degree $p$ in the $m$ variables $v_1$, \dots, $v_m$
with real coefficients. We seek solutions $v_i(x)$ belonging to the
real Hilbert space $H^2({\mathbb R}^n)$, hereafter abbreviated to $H^2$, of
real-valued functions on all of ${\mathbb R}^n$ which have square-integrable
derivatives up to the second order.
We assume that $V$ is a bounded, $C^2$, real-valued function, with
bounded first and second derivatives. These conditions can
be relaxed somewhat (Oh employs weaker conditions), but they
allow our method to run smoothly. We assume that there exists
$\delta>0$ such that $V(x)>\delta$ for all $x$.
We emphasize that we are concerned with real-valued solutions. If the
non-linear term in (2) appears as $|v|^{p-1}v$ then for every solution $v$ we
obtain another by multiplying by a phase-factor $e^{i\alpha}$. It then
makes sense to seek complex-valued solutions which look like a sum of
lumps with possibly different phase factors. This may be taken up in
another paper.
If we assume that $2\le p< \infty$ in case $n=1$, 2, 3 or 4 and $1\le p \le
n/(n-4)$ in case $n\ge 5$, then the Sobolev Embedding
Theorem \cite{Ad} guarantees that the space $H^2$ is continuously embedded in
$L^{2p}$. Thus if $v\in H^2$ then $G_i(v)\in L^2$.
However we have to make the further restriction that $p<(n+2)/(n-2)$.
This is to ensure the existence of the positive, radially symmetric
solution $v=\phi(x)$ to the equation
$$
-\Delta v + v - v^p =0\,.
$$
The existence and uniqueness of this solution was
shown by M. Weinstein \cite{MW} in the case $n=1$ and in the
cases $n=3$, $1
0$, which depends continuously
on $\epsilon$ in the $H^2$-norm, and is
such that $v_i(\cdot +\frac{b_i}{\epsilon})$ tends to $u_i$ in the
$H^2$-norm as $\epsilon\to 0$.
We have moreover the following asymptotic information. The solution
has the form
$$
v_i(x)=
u_i\Big(x-\frac{b_i}{\epsilon}+s_i\Big)+
\epsilon^2w_i\Big(x-\frac{b_i}{\epsilon}+s_i\Big)
$$
where $s_i$ is a vector in ${\mathbb R}^n$ and the function $w_i$ is orthogonal in $L^2$ to
the partial derivatives of $u_i$. Both $s_i$ and $w_i$ depend on
$\epsilon$ in such a way that:
{\rm(i)} $\lim_{\epsilon\to 0+} s_i=0$;
{\rm(ii)} $\lim_{\epsilon\to 0+}w_i =\eta_i$ in $H^2$;
\noindent
where $\eta_i$ is the unique solution of
$$
-\Delta \eta_i(x) +a_i\eta_i(x) -pu_i(x)^{p-1}\eta_i(x)
= -\frac12V''(b_i)(x,x)u_i(x)
$$
which is orthogonal to the partial derivatives of $u_i$.
\end{theorem}
Note that the function $\eta_i$ is in principle calculable and it is
to that extent that we consider the asymptotic form of the solution
calculable.
\subsection{First part of the proof}
In this subsection we give the proof of Theorem 1 apart from a technical lemma,
and derive much other useful information about the asymptotic form of $v$ as
$\epsilon\to 0$.
The system of equations (3) defines a non-linear operator
$$
v\mapsto F(\epsilon,v) : (H^2)^m\to (L^2)^m
$$
which depends on a parameter $\epsilon>0$. Our strategy is to solve
the equations for sufficiently small $\epsilon$ by means of a
substitution, careful consideration of the limiting problem as
$\epsilon\to 0$, and the implicit function theorem.
Introduce the
subspaces
$$
W_k= \Big\{w\in H^2: \int w D_ju_k = 0,\quad j=1,\ldots, n\Big\}
$$
and set
$$
W=W_1\times\cdots\times W_m\,.
$$
Introduce the variable vectors $s_1,\ldots,s_m$, each in ${\mathbb R}^n$, and let
$$
\xi_k= -\frac{b_k}{\epsilon}+s_k\,.
$$
Note that $\|\xi_i-\xi_j\|\to \infty$ as $\epsilon \to 0$ provided $i\not=j$.
We let $s=(s_1,\ldots,s_m)$ but we emphasize that each component of the
$m$-tuple $s$ is a vector in ${\mathbb R}^n$.
We shall now use the substitution
$$
v_i= u_i(x+\xi_i)+\epsilon^2w_i(x+\xi_i), \quad i=1,\ldots,m
$$
where $w_i\in W_i$.
The independent variables are the functions $w_i\in W_i$ and the vectors
$s_i$ implicit in $\xi_i$. We shall prove the existence of a solution
for each sufficiently small $\epsilon$, and as $\epsilon\to 0$ we shall
see that $s\to 0$ and $w_i\to \eta_i$ in the norm topology of $H^2$,
where $\eta_i$ are the functions referred to in Theorem~1.
We make the substitution and, for each $i$, translate the $i^{\rm th}$
equation, replacing $x$ by $x-\xi_i$. Divide each equation by
$\epsilon^2$. The result is a new operator equation
$f(\epsilon,s,w)=0$ involving an operator
$$
(s,w)\mapsto f(\epsilon,s,w) : ({\mathbb R}^n)^m\times W \to (L^2)^m
$$
which we proceed to describe.
To ease the exposition we split the description into three parts:
\begin{description}
\item[(A)] terms not involving $w=(w_1,\ldots,w_m)$;
\item[(B)] terms linear in $w$;
\item[(C)] terms quadratic or higher in $w$.
\end{description}
We consider each part separately with a view to taking the limit as
$\epsilon\to 0$.
\medskip
\noindent
{\bf(A)} Before division by $\epsilon^2$ the $i^{\rm th}$ component is
as follows:
$$
\displaylines{\quad
-\Delta u_i(x)+V(\epsilon(x-\xi_i))u_i(x) - G_i\Big(u_1(x+\xi_1-\xi_i),
\ldots, u_m(x+\xi_m-\xi_i)\Big)
\hfill\cr \hfill =
\Big(V(\epsilon(x-\xi_i))-a_i\Big)u_i(x) + u_i(x)^p - G_i\Big(u_1(x+\xi_1-\xi_i),
\ldots, u_m(x+\xi_m-\xi_i)\Big)
\quad\cr\hfill =
\Big(V(\epsilon(x-\xi_i))-a_i\Big)u_i(x) - g_i\Big(u_1(x+\xi_1-\xi_i),
\ldots, u_m(x+\xi_m-\xi_i)\Big)\cdot u_i(x).
\quad\cr }
$$
The second term in the last line consists of sums of monomials. In
each we have the unshifted factor $u_i(x)$ together with at least one
other {\em shifted\/} factor of the form $u_k(x+\xi_k-\xi_i)$ for
which $k\not=i$. Such a monomial tends to 0 in the $L^2$-norm as
$\epsilon\to 0$. In fact the convergence to 0 is faster than that of
any power of $\epsilon$ because of the exponentially fast decrease of
the function $\phi$ at infinity.
Division by $\epsilon^2$ leads therefore to the limit
$$
\frac12 V''(b_i)(x-s_i,x-s_i) u_i(x),
$$
where the second derivative $V''(b_i)$ is regarded as a
symmetric, bilinear form. Note that the limit is attained in the
$L^2$-norm thanks to the boundedness of the second derivatives of $V$.
But in fact, owing to the rapid decrease of $u_i(x)$ we get the same
result if the second derivatives of $V$ have polynomial growth.
The expression (A) defines a mapping
$$
f_0:{\mathbb R}_+ \times ({\mathbb R}^n)^m \to (L^2)^m
$$
where ${\mathbb R}_+$ denotes the interval $[0,\infty[$ and the $i^{\rm th}$ component
of $f_0$ is given by
$$
\displaylines{\quad
\Big(f_0(\epsilon, s)\Big)_i=
\epsilon^{-2}\Big(V(\epsilon(x-\xi_i))-a_i\Big)u_i(x)
\hfill\cr\hfill
{} - \epsilon^{-2}g_i\Big(u_1(x+\xi_1-\xi_i),
\ldots, u_m(x+\xi_m-\xi_i)\Big)\cdot u_i(x)
\quad\cr}
$$
if $\epsilon>0$, and
$$
\Big(f_0(0, s)\Big)_i =
\frac12 V''(b_i)(x-s_i,x-s_i) u_i(x)\,.
$$
Note that the derivative of $f_0(\epsilon, s)$ with respect to $s$
converges to the corresponding derivative of $f_0(0, s)$ as
$\epsilon\to 0$. Convergence occurs in the uniform operator topology
(the operator norm) thanks to the boundedness of the second
derivatives of $V$ (and, as before, polynomial growth would suffice).
\medskip
\noindent
{\bf (B)} After division by $\epsilon^2$ the $i^{\rm th}$ component comprises
the following terms:
$$
\displaylines{\quad
-\Delta w_i(x) +V(\epsilon(x-\xi_i))w_i(x) -pu_i(x)^{p-1}w_i(x)
\hfill\cr\hfill
{}-g_i\Big(u_1(x+\xi_1-\xi_i),\ldots, u_m(x+\xi_m-\xi_i)\Big)w_i(x)
\hfill\cr\hfill
-\sum_{k=1}^m\Big(D_{v_k}g_i\Big)\Big(u_1(x+\xi_1-\xi_i),\ldots, u_m(x+\xi_m-\xi_i)\Big)
u_i(x)w_k(x+\xi_k-\xi_i).
\quad\cr}
$$
(Note that $D_{v_k}g_i$ denotes the partial derivative of $g_i$ with respect to the variable $v_k$.)
This expression may be thought of as
$$
f_1(\epsilon, s) w
$$
where $ f_1(\epsilon, s)$ is a linear mapping from
$W$ to $(L^2)^m$ for each $\epsilon>0$ and $s$.
The main difficulty we have to face is the fact that $f_1(\epsilon,
s)$ does not behave well in the operator norm as $\epsilon\to 0$, as
we now proceed to see.
The last two terms in the expression consist of sums of monomials of the
form
$$
u_{k_1}(x+\xi_{k_1}-\xi_i)\cdots u_{k_{p-1}}(x+\xi_{k_{p-1}}-\xi_i)w_i(x)
$$
and of the form
$$
u_{k_1}(x+\xi_{k_1}-\xi_i)\cdots u_{k_{p-2}}(x+\xi_{k_{p-2}}-\xi_i)
u_i(x)w_k(x+\xi_k-\xi_i)
$$
Let us consider these as linear maps acting on the functions $w_i$.
A monomial of the first kind defines a linear map that tends to 0 with
$\epsilon$ in the operator norm provided at least two distinct shifts
are present to cause the function multiplying $w_i(x)$ to converge uniformly
to 0. This occurs unless $k_1=\cdots=k_{p-1}$. Similarly a
monomial of the second kind defines a linear map that tends to 0 with
$\epsilon$ in the operator norm unless $k_1=\cdots=k_{p-2}=i$.
Throwing out terms that tend to 0 in the operator norm leaves
$$\displaylines{\quad
-\Delta w_i(x) +V(\epsilon(x-\xi_i))w_i(x) -pu_i(x)^{p-1}w_i(x)
\hfill\cr\hfill
-\sum_{k=1,k\not=i}^m\lambda_{ik}u_k(x+\xi_k-\xi_i)^{p-1}w_i(x)
\hfill\cr\hfill
-\sum_{k=1,k\not=i}^m\gamma_{ik}u_i(x)^{p-1}w_k(x+\xi_k-\xi_i)
\quad\cr}
$$
for certain constants $\gamma_{ik}$. This defines a linear map acting
on the functions $w_i$ but it is plain that it does not attain a limit in the
norm topology, but only in the strong operator topology, as $\epsilon\to
0$.
In fact the strong operator limit is the linear mapping $f_1(0,s)$
given by
$$
f_1(0,s)w= -\Delta w_i(x) +a_iw_i(x) -pu_i(x)^{p-1}w_i(x).
$$
Note that it is independent of $s$.
\medskip
\noindent
{\bf(C)} This may be written as $\epsilon^2f_2(\epsilon,s,w)$
and tends to zero, along with any derivatives it possesses, as
$\epsilon\to 0$. The convergence is uniform for $s$ and $w$ in bounded sets.
\medskip
The limiting problem is the following.
$$\displaylines{\quad
\frac12 V''(b_i)(x-s_i,x-s_i) u_i(x)
-\Delta w_i(x) +a_iw_i(x) -pu_i(x)^{p-1}w_i(x)=0,
\hfill\cr\hfill
i=1,\ldots,m.\quad\cr}
$$
It has a non-degenerate solution $s_i=0$, $w_i=\eta_i$, $i=1,\ldots, m$,
where $\eta_i(x)$ is the unique solution in $W_i$ of
$$
-\Delta \eta_i(x) +a_i\eta_i(x) -pu_i(x)^{p-1}\eta_i(x)
= -\frac12V''(b_i)(x,x)u_i(x)
$$
(see section 6.2).
For $\epsilon>0$ our problem takes the form
$$
f(\epsilon,s,w)=
f_0(\epsilon, s)
+f_1(\epsilon,s)w
+\epsilon^2f_2(\epsilon,s,w)=0\,. \eqno{(4)}
$$
At this point we would like to apply the implicit function
theorem to derive a solution for all sufficiently small
$\epsilon>0$. But this requires that the derivative w.r.t.~$(s,w)$
converges in the operator-norm as $\epsilon\to 0$. This fails for
terms (B). However we can still use the implicit function theorem
via a modification which is discussed in the appendix (see Theorem 4
in section 6).
For convenience let us denote the space $({\mathbb R}^n)^m\times W$ by $E$
and the space $(L^2)^m$ by $F$. Define an operator-valued function
$A:{\mathbb R}_+\to L(E,F)$ given by
$$
A(\epsilon)(\sigma,z)= D_sf_0(0,0)\sigma +f_1(\epsilon,0)z. \eqno{(5)}
$$
To apply Theorem 4 we have to
check the following properties of $A$.
\begin{description}
\item{(1)} $A$ is continuous for $\epsilon>0$ w.r.t.~the strong
operator-topology. (This is needed to ensure that the solution depends
continuously on $\epsilon$; see Theorem~4.)
\item{(2)} The limit $\lim_{\epsilon\to
0}A(\epsilon)=D_{(s,w)}f(0,0,\eta)$ is attained in the strong
operator topology. (The importance of $s=0$, $w=\eta$ is that it is
the solution of the limiting problem. Compare condition (c) of
Theorem 4.)
\item{(3)} The limit $\lim_{\epsilon\to 0,s\to
0,w\to\eta}\Big(A(\epsilon)-D_{(s,w)}f(\epsilon,s,w)\Big)=0$ is attained in
the operator-norm topology. (Compare condition (d) of Theorem 4.)
\item{(4)} There exist $M>0$ and $\epsilon_0>0$ such that
$A(\epsilon)$ is invertible for $0\le\epsilon<\epsilon_0$ and
$\|A(\epsilon)^{-1}\|0$ equation (4) has a unique solution
$(s,w)$ which tends to $(0,\eta)$ as $\epsilon\to 0$
and depends continuously on $\epsilon$.
\subsection{Proof of property 4}
It suffices to show that if we have sequences
$\epsilon_\nu\in{\mathbb R}_+$,
$\sigma_\nu \in ({\mathbb R}^n)^m$ and
$ z_\nu\in W$ %,\quad s_\nu\in ({\mathbb R}^n)^m
such that
$$
\epsilon_\nu\to 0,
\quad \|\sigma_\nu\|_{({\mathbb R}^n)^m}
+\|z_\nu\|_{(H^2)^m}\le 1
$$
whilst
$$
A(\epsilon_\nu)(\sigma_\nu, z_\nu) =
D_sf_0(0,0)\sigma_\nu +f_1(\epsilon_\nu,0)z_\nu \to 0
$$
in the norm topology of $(L^2)^m$, then a subsequence of
$(\sigma_\nu, z_\nu)$ tends to 0 in the norm topology of
$({\mathbb R}^n)^m\times W$. This will prove that $A(\epsilon)$ is injective
for sufficiently small $\epsilon$ and that its inverse has a uniform
bound. Using the fact that $A(\epsilon)$ is a Fredholm operator
of index 0 we see that $A(\epsilon)$ is invertible in the normal sense.
The $i^{\rm th}$ component of $Df_0(0,0)\sigma
+f_1(\epsilon,s)z$ can be written as
$$
\displaylines{\quad
-V''(b_i)(x,\sigma_i)u_i(x) -\Delta
z_i(x)+V(\epsilon(x-\xi_i))z_i(x)
\hfill\cr\hfill
{}-pu_i(x)^{p-1}z_i(x)
-\sum_{k=1,k\not=i}^m \lambda_{ik}u_k(x+\xi_k-\xi_i)^{p-1}z_i(x)
\hfill\cr\hfill
-\sum_{k=1,k\not=i}^m \gamma_{ik}u_i(x)^{p-1}z_k(x+\xi_k-\xi_i)
+J_i(\epsilon,s)z
\quad (6)\cr}
$$
where the remainder term is expressed in terms of an operator
$J_i(\epsilon,s)$ which tends to 0 in norm with $\epsilon$. This term
may be safely discarded.
Introduce sequences as above with subscript $\nu\in{\mathbb N}$. Recalling that $s=0$
we have
$$
\xi_{\nu,k}= -\frac{b_k}{\epsilon_\nu}\,.
$$
Recall that in a Hilbert space a bounded sequence has a weakly
convergent subsequence. By going to a subsequence we may assume that
\begin{description}
\item[(a)] $\lim_{\nu\to\infty}\sigma_\nu=\sigma_{\infty}$;
\item[(b)] $\lim_{\nu\to\infty}z_{\nu,i}= z_{\infty,i}$ weakly in $W_i$
for each $i$;
\item[(c)] $\lim_{\nu\to\infty}z_{\nu,i}(\cdot+\xi_{\nu,i}-\xi_{\nu,j}) =
y_{ij}$ weakly in $H^2$ for each $i$.
\end{description}
We recall the following facts. If a sequence is weakly convergent in
$H^2$ it is convergent in the sense of distributions. Its restriction to
a bounded set is norm convergent in $L^2$ on that set. If we multiply
by a fixed function which tends to 0 at infinity the resulting sequence is
norm convergent in $L^2$ over all of ${\mathbb R}^n$.
Let $j\not=i$ and translate the expression (6) by replacing $x$ by
$x+\xi_i -\xi_j$. The resulting expression
$$\displaylines{\quad
-V''(b_i)(x+\xi_{\nu,i} -\xi_{\nu,j},\sigma_{\nu,i})u_i(x+\xi_{\nu,i}
-\xi_{\nu,j})
\hfill\cr\hfill
{} -\Delta z_{\nu,i}(x+\xi_{\nu,i} -\xi_{\nu,j})
+V(\epsilon_\nu(x-\xi_{\nu,j}))z_{\nu,i}(x+\xi_{\nu,i}-\xi_{\nu,j})
\hfill\hfill\hfill\cr\hfill
{}-pu_i(x+\xi_{\nu,i} -\xi_{\nu,j})^{p-1}z_{\nu,i}(x+\xi_{\nu,i} -\xi_{\nu,j})
\hfill\cr\hfill\hfill\hfill
-\sum_{k=1,k\not=i}^m
\lambda_{ik}u_k(x+\xi_{\nu,k}-\xi_{\nu,j})^{p-1}
z_{\nu,i}(x+\xi_{\nu,i} -\xi_{\nu,j})
\hfill\cr\hfill
-\sum_{k=1,k\not=i}^m \gamma_{ik}u_i(x+\xi_{\nu,i}
-\xi_{\nu,j})^{p-1}
z_{\nu,k}(x+\xi_{\nu,k}-\xi_{\nu,j})
\quad\cr}
$$
tends to 0 in $L^2$. It therefore tends to 0
in the sense of distributions; but, recalling that $j\not=i$, this
implies that
$$
-\Delta y_{ij}(x) +a_{j}y_{ij}-\lambda_{ij}u_j(x)^{p-1}y_{ij}(x)=0\,.
$$
This is where the assumption that $\lambda_{ij}\not\in\Sigma$ is brought into
play. It implies that $y_{ij}=0$. In particular the distribution limit
of $z_{\nu,k}(\cdot+\xi_{\nu,k}-\xi_{\nu,i})$ is 0 if $k\not=i$.
Next we consider the distribution limit without translation.
Using what we have just proved we obtain
$$
V''(b_i)(x,\sigma_{\infty,i})u_i(x)
-\Delta z_{\infty,i}(x) +a_i
z_{\infty,i}(x)-pu_i(x)^{p-1}z_{\infty,i}(x) =0.
$$
From this we deduce that $\sigma_{\infty,i}=0$ and
$z_{\infty,i}=0$. (The non-degeneracy of $b_i$ is needed here; the
calculation needed to verify this is similar to that in section~6.2.)
We now have that the weak limit of $z_{\nu,i}(\cdot+\xi_{\nu,i}
-\xi_{\nu,j})$ is 0 for any pair $(i,j)$. Since $u_i$ decays at infinity
we have that both
$$
u_i(\cdot)^{p-1}z_{\nu,j}(\cdot+\xi_{\nu,j} -\xi_{\nu,i})
$$
and
$$
u_i(\cdot+\xi_{\nu,i} -\xi_{\nu,j})^{p-1}z_{\nu,j}(\cdot)
$$
tend to 0 in $L^2$. Hence, also
$$
-\Delta z_{\nu,i}+V(\epsilon_\nu(x-\xi_{\nu,i}))z_{\nu,i}
$$
tends to 0 in $L^2$. By Wang's Lemma \cite{Wang} (see Appendix
section 6.4)
this implies that $z_{\nu,i}$ tends to 0 in
$L^2$.
\section{Deductions from Theorem 1}
\subsection{Multi-lump solutions of NLS}
We seek a solution to
$$
-\Delta v +V(\epsilon x)v + v^p=0
$$
for which $v=\sum_{i=1}^m v_i$ and $v_i$ is near $a_i^{\frac{1}{p-1}}
\phi(\sqrt{a_i}(x-\frac{b_i}{\epsilon}))$.
We write
$$
\Big(\sum_{i=1}^m v_i\Big)^p = \sum_{i=1}^m G_i(v)=
\sum_{i=1}^m (v_i^p+g_i(v)v_i)
$$
where the functions $g_i$ are chosen so that the constants
$\lambda_{ij}$ fall outside $\Sigma$ (see Section 2 for the definition
of $\Sigma$). In fact if $p\ge 3$ we can arrange things so that
$\lambda_{ij}=0$. For
$$
\Big(\sum_{i=1}^m v_i\Big)^p= \sum_{i=1}^m v_i^p
+\sum_{i\not=j}pv_iv_j^{p-1}
+ \hbox{ other monomials.}
$$
If $p\ge 3$ we can split this into the sum of $m$ polynomials.
We group $v_iv_j^{p-1}$ with $v_j^p$. For any other monomial
choose one of its variables $v_k$ arbitrarily
and group it with $v_k^p$.
If a concise prescription is required we could use the following,
although it does not recommend itself above any other method.
Using the usual multi-indices we write
$$
\Big(\sum_{i=1}^m v_i\Big)^p=\sum_{|\alpha|=p}b_\alpha v^\alpha.
$$
For each multi-index $\alpha$ let $m(\alpha)$ be the highest subscript
at which the maximum coordinate occurs, that is,
$$
m(\alpha)=\max\{j:\alpha_j=\max\alpha\}.
$$
Then we set
$$
G_i(v)=\sum_{m(\alpha)=i}b_\alpha v^\alpha\,.
$$
This works only for $p\ge 3$ and so we are left with the
case $p=2$. To handle this we choose $g_i(v)=\sum_{j=1}^m
\alpha_{ij}v_j$ where $\alpha_{ii}=0$, $\alpha_{ij}+\alpha_{ji}=2$ for
$i\not=j$ and $\alpha_{ij}\not\in\Sigma$.
\subsection{Multilump solutions with sign}
Now we seek real-valued solutions to
$$
-\Delta v +V(\epsilon x)v + v^p=0
$$
for which $v=\sum_{i=1}^m y_i$ and $y_i$ is near $\kappa_ia_i^{\frac{1}{p-1}}
\phi(\sqrt{a_i}(x-\frac{b_i}{\epsilon}))$, where $\kappa_i=\pm 1$.
Let us assume that $p$ is odd. Then we seek solutions $v_i$ to the
system
$$
-\Delta v_i +V(\epsilon x)v_i -v_i^p -g_i(v)v_i=0, \quad i=1,\ldots,m
$$
for which $v_i$ is near $a_i^{\frac{1}{p-1}}
\phi(\sqrt{a_i}(x-\frac{b_i}{\epsilon}))$, choosing the polynomials
$g_i(v)$ so that
$$
\Big(\sum_{i=1}^m \kappa_iv_i\Big)^p = \sum_{i=1}^m (\kappa_iv_i^p+\kappa_ig_i(v)v_i)
$$
and so that $\lambda_{ij}\not\in\Sigma$. This is clearly possible since
$p$ is odd.
The required solution is then $\sum_{i=1}^m\kappa_iv_i$.
\section{Positivity}
\subsection{Positive solutions of the system}
In this section we prove that solutions of (3) are positive under
appropriate conditions. Throughout the section we let $v_i$ be the
solutions the existence of which were established in section 2.
They depend on $\epsilon$ but this dependence will not be explicitly
indicated. We maintain all the conditions of section 2. In
particular we recall the non-degeneracy condition $ND$. In the
following general result we impose a further restriction on the
constants~$\lambda_{ij}$.
\begin{theorem}
Assume that the constants $\lambda_{ij}$ all satisfy $\lambda_{ij}<
1$. Then the solutions $v_i$ are all positive and without zeros.
\end{theorem}
We begin by noting that $v_i$ satisfies the linear differential equation
$$
L_\epsilon y: = -\Delta y +V(\epsilon x)y - (v_i^{p-1}+g_i(v))y = 0\,.
$$
Our strategy is the usual one of showing that $v_i$ is the
ground-state eigenfunction of the operator $L_\epsilon$; in other
words $L_\epsilon$ has no negative eigenvalues if $\epsilon$ is
sufficiently small. For operators of this kind it is known that the
ground state is positive up to a numerical factor (see, for example,
\cite{RS}).
\begin{lemma} There exist $\epsilon_0>0$ and $\rho_0>0$ such that for
$0<\epsilon<\epsilon_0$ the eig\-en\-value $0$ of $L_\epsilon$ is
simple and is the only eigenvalue in the interval
${]}{-\rho_0,\rho_0}{[}$.
\end{lemma}
\noindent Note: For the lemma the assumption that $\lambda_{ij}<1$ is
not needed.
\medskip
\noindent{\it Proof of Lemma 1.}
Consider the linear mapping
$$
T_\epsilon : (\lambda,z)\mapsto\lambda v_i(x)+L_\epsilon z
$$
from ${\mathbb R}\times Z_\epsilon$ to $L^2$ where
$$
Z_\epsilon=\Big\{y\in H^2:\int yv_i=0\Big\}.
$$
Here we have
$$
v_i(x)=u_i(x+\xi_i)+\epsilon^2w_i(x+\xi_i)
$$
where, as before,
$$
\xi_i= -\frac{b_i}{\epsilon}+s_i\,,
$$
$w_i$ and $s_i$ having values, depending on $\epsilon$, which give a
solution of the system~(3).
We shall show that $T_\epsilon$ is invertible for all sufficiently small
$\epsilon$ and that its inverse satisfies a bound in norm independent of
$\epsilon$.
It suffices to show that if we have sequences
$\epsilon_\nu\in{\mathbb R}_+$,
$\lambda_\nu \in {\mathbb R}$
and $ z_\nu\in Z_{\epsilon_\nu}$
such that
$$
\epsilon_\nu\to 0, \quad |\lambda_\nu|+\|z_\nu\|_{H^2}\le 1
$$
whilst
$$
\lambda_\nu v_{\nu,i}(x)+ L_{\epsilon_\nu}z_\nu \to 0 \eqno{(7)}
$$
in $L^2$ as $\nu\to\infty$, then a subsequence of $(\lambda_\nu,
z_\nu)$ converges to 0 in norm. Note that we have written $v_\nu$ for
the solution of (3) with $\epsilon=\epsilon_\nu$; we use likewise the
notation $\xi_{\nu,i}$. Going to a subsequence we may suppose that the
following limits exist in the weak topology of $H^2$:
$$
\lim_{\nu\to\infty}z_\nu(\cdot-\xi_{\nu,j})=
z_{\infty,j}\quad j=1,\ldots,m;
$$
also that
$$
\lim_{\nu\to\infty}\lambda_\nu=\lambda_\infty.
$$
Shifting the left-hand side of (7) by replacing $x$ by $x-\xi_{\nu,i}$,
taking the distribution limit and recalling the form of $v(x)$ for
small $\epsilon$ we find
$$
\lambda_\infty u_i(x)-\Delta z_{\infty,i}(x)+a_iz_{\infty,i}(x)-u_i(x)^{p-1}z_{\infty,i}(x)=0.
$$
Since $\int z_\nu v_{\nu,i}=0$ we have that $\int z_{\infty,i} u_i=0$ and we
deduce that
$\lambda_\infty=0$ and $z_{\infty,i}=0$.
Shifting the left-hand side of (7) by replacing $x$ by $x-\xi_{\nu,j}$
where $j\not=i$, and
taking the distribution limit we find
$$
-\Delta z_{\infty,j}(x) +a_jz_{\infty,j}(x) -\lambda_{ij}u_{\infty,j}(x)^{p-1}z_{\infty,j}(x)=0
$$
which, since $\lambda_{ij}\not\in\Sigma$, implies $z_{\infty,j}=0$.
Returning to (7) we see that
$$
-\Delta z_\nu +V(\epsilon_\nu x)z_\nu \to 0
$$
in the $L^2$-norm. By Wang's Lemma (section 6.4) this implies $z_\nu\to 0$ in
the $H^2$-norm.
Thus there exists $\epsilon_0>0$ and $K>0$ such that $T_\epsilon$ is
invertible for $0<\epsilon<\epsilon_0$ and $\|T_\epsilon^{-1}\|0$ such that the map
$$
(\lambda,z)\mapsto T_\epsilon(\lambda,z)+\rho z
$$
from ${\mathbb R}\times Z_\epsilon$ to $L^2$ is invertible for
$0<\epsilon<\epsilon_0$ and $|\rho|<\rho_0$. Suppose a value of $\rho$
in this range is an eigenvalue of $L_\epsilon$ with eigenfunction $y$.
Then
$$
-\Delta y+V(\epsilon x)y -(v_i(x)^{p-1}+g_i(v(x)))y = \rho y\,.
$$
Write
$$
y= \lambda v_i + z
$$
where $z\in Z_\epsilon$.
Since $L_\epsilon v_i=0$ we have that
$$
T_\epsilon(-\rho\lambda,z)-\rho z=0\,.
$$
But then $\rho\lambda=0$ and $z=0$ whence $\rho=0$. Thus
the only eigenvalue in the range $|\rho|<\rho_0$ is 0
provided $0<\epsilon<\epsilon_0$.
This ends the proof.
\bigskip
\noindent{\it Proof of Theorem 2.}
We prove that for all sufficiently small $\epsilon>0$ the operator
$L_\epsilon$ has no negative eigenvalues. Suppose the contrary holds.
Then we can find sequences
$$
\epsilon_\nu\to 0,\quad \lambda_\nu<0,\quad y_\nu\in H^2
$$
such that $\|y_\nu\|_{H^2}=1$ and
$$L_{\epsilon_\nu} y_\nu = -\Delta y_\nu +V(\epsilon_\nu x)y_\nu -
(v_{\nu,i}^{p-1}+g_i(v_\nu))y_\nu
=\lambda_\nu y_\nu.
$$
It is clear that $\lambda_\nu$ is bounded below, and by the lemma
$\lambda_\nu \le -\rho_0<0$ for sufficiently large $\nu$.
Going to a subsequence we may assume that
$$
\lambda_\nu\to\lambda_\infty<0, \quad y_\nu(\cdot-\xi_{\nu,k})\to z_k
$$
weakly in $H^2$, for $k=1$, \dots, $m$.
Replace $x$ by $x-\xi_{\nu,k}$ and take the limit in the sense of
distributions. For $k\not=i$ we obtain
$$
-\Delta z_k+a_kz_k -\lambda_{ik}u_k(x)^{p-1}z_k = \lambda_{\infty}z_k.
$$
But the operator $-\Delta +a_k -u_i(x)^{p-1}$ has no negative
spectrum, $\lambda_{ik}<1$ and $\lambda_\infty<0$. We deduce that
$z_k=0$. (For future reference we note
that this would also be true if $\lambda_{ik}=1$.) For $k=i$ we obtain
$$
-\Delta z_i+a_iz_i -u_i(x)^{p-1}z_i = \lambda_{\infty}z_i\,.
$$
From this we deduce that $z_i=0$ (note that here we need the fact that
$\lambda_\infty$ is strictly negative).
Now we consider the limit in $L^2$. We find that
$$
-\Delta y_\nu +(V(\epsilon x)-\lambda_\nu)y_\nu \to 0
$$
in the $L^2$-norm. By Wang's Lemma (section 6.4) this
implies that $y_\nu\to 0$ in the $H^2$-norm,
which is a contradiction.
\bigskip
For future reference we note that without using Lemma 1 the arguments of
the last paragraphs show that if $L_\epsilon$ has negative eigenvalues
then the lowest eigenvalue tends to 0 as $\epsilon\to 0$.
This even works if $\lambda_{ij}=1$.
\subsection{Deductions from Theorem 2}
Theorem 2 indicates that in the case $p\ge 3$
the multilump solution to
$$
-\Delta v +V(\epsilon x)v + v^p=0
$$
for which $v=\sum_{i=1}^m v_i$ and $v_i$ is near $a_i^{\frac{1}{p-1}}
\phi(\sqrt{a_i}(x-\frac{b_i}{\epsilon}))$ is positive. In these cases
we can arrange for $\lambda_{ij}$ to be 0 and the individual components
$v_i$ are all positive.
The case $p=2$ is somewhat different. For simplicity let us consider the
case of two humps.
Here we take $\lambda_{12}
<1$ and $\lambda_{21}>1$. Theorem 2 indicates that $v_1$ is positive
but says nothing about $v_2$.
In fact in this case the function $v_2$ cannot be positive.
Recall that $v=(v_1,v_2)$ satisfies the system
$$
\begin{array}{r@{\hspace{.15cm}} l}
-\Delta v_1 +V(\epsilon x)v_1-(v_1+\lambda_{12}v_2)v_1&=0\cr
-\Delta v_2 +V(\epsilon x)v_2-(v_2+\lambda_{21}v_1)v_2&=0\cr
\end{array}
$$
where $\lambda_{12}+\lambda_{21}=2$ and $\lambda_{12}\not=1$.
Multiply the first equation by $v_2$, the second by $v_1$, subtract and
integrate. The result is $\int v_1^2v_2+v_2^2v_1=0$ so that $v_2$ cannot
be everywhere positive.
\subsection{The case $m=2$, $p=2$}
Even though $v_2$ is not everywhere positive, more subtle arguments
suffice to show
that the sum $v_1+v_2$ is positive. Similar arguments can handle the
case of more than two humps.
For technical reasons we shall suppose that $n$ (the dimension of the
ambient space) is at most 3. This is because we need $H^2$ to be
embedded in the space of bounded continuous functions.
Let $v=(v_1,v_2)$ be the solution of the system
$$
\begin{array}{r@{\hspace{.15cm}} l}
-\Delta v_1 +V(\epsilon x)v_1-(v_1+\frac12v_2)v_1&=0\cr
\noalign{\smallskip}
-\Delta v_2 +V(\epsilon x)v_2-(v_2+\frac32v_1)v_2&=0\cr
\end{array}
$$
for small $\epsilon>0$ given by Theorem 1. We shall not indicate the
dependence on $\epsilon$ explicitly. The function $v_1+v_2$ satisfies
the equation $-\Delta y+V(\epsilon x)y+y^2=0$.
Define the operator
$$
S_\epsilon y: = -\Delta y +V(\epsilon x)y-(v_1(x)+v_2(x))y.
$$
We shall show that $S_\epsilon$ has no negative eigenvalues if
$\epsilon$ is sufficiently small. So $v_1+v_2$ is positive, being the
ground state of a Schr\"{o}dinger operator.
Introduce the linear mapping (cf.~the proof of Lemma 1)
$$
T_\epsilon :(\lambda,\mu,z)\mapsto \lambda v_1 +\mu v_2 + S_\epsilon z
$$
from ${\mathbb R}^2\times Z_\epsilon$ to $L^2$,
where $Z_\epsilon$ now denotes the space
$$
Z_\epsilon = \Big\{y\in H^2: \int yv_1=\int yv_2=0\Big\}.
$$
\begin{lemma}
$T_\epsilon$ is invertible for all sufficiently small
$\epsilon>0$ and its inverse
satisfies a bound in norm independent of $\epsilon$.
\end{lemma}
\noindent
{\it Proof.} It suffices to show that if we have sequences
$\epsilon_\nu\in{\mathbb R}_+$,
$\lambda_\nu \in {\mathbb R}$, $\mu_\nu \in {\mathbb R}$
and $ z_\nu\in Z_{\epsilon_\nu}$
such that
$$
\epsilon_\nu\to 0,\quad |\lambda_\nu|+|\mu_\nu|+\|z_\nu\|_{H^2}\le 1
$$
whilst
$$
\lambda_\nu v_{\nu,1}+\mu_\nu v_{\nu,2}
+S_{\epsilon_\nu}z_\nu \to 0
$$
in $L^2$ as $\nu\to\infty$, then a subsequence of $(\lambda_\nu,
\mu_\nu, z_\nu)$ converges to 0 in norm. We have written
$(v_{\nu,1},v_{\nu,2})$ for the solution corresponding to the value
$\epsilon_\nu$. A similar notation, $\xi_{\nu,1}$, $\xi_{\nu,2}$, is
used for the relevant shifts.
By going to a subsequence we may assume that the limits
$$
\lim_{\nu\to\infty}z_\nu(\cdot+\xi_{\nu,1})=z_{\infty,1},\quad
\lim_{\nu\to\infty}z_\nu(\cdot+\xi_{\nu,2})=z_{\infty,2}
$$
exist in the weak $H^2$-topology. Following the proof of Lemma 1 we
now deduce that $\lambda_\nu\to 0$, $\mu_\nu\to 0$, $z_{\infty,1}=0$
and $z_{\infty,2}=0$.
We then find $z_\nu\to 0$ in the norm topology of
$H^2$ using the same argument as in Lemma 1. This ends the proof.
\bigskip
We note that the arguments of the last paragraphs of subsection 4.1 suffice
to show that, if $S_\epsilon$ has negative eigenvalues, then the lowest
eigenvalue must tend to 0 as $\epsilon\to 0$. It suffices therefore
to prove the following lemma.
\begin{lemma}
There exist $\epsilon_1>0$ and $\rho_1>0$ such that $S_\epsilon$ has
no eigenvalues in the interval ${]}{-\rho_1},0{[}$ for
$0<\epsilon<\epsilon_1$.
\end{lemma}
\noindent{\it Proof.}
Suppose that $S_\epsilon$ has a negative eigenvalue. Let $\rho$ be its
{\em lowest} eigenvalue and choose a {\em positive} normalized
eigenfunction $y$.
Write $y=\lambda v_1 + \mu v_2 + z$ where $z\in Z_\epsilon$. Since
$v_1+v_2$ is an eigenfunction with eigenvalue 0 we have
$$
0=\int y(v_1+v_2) = \lambda\int v_1(v_1+v_2) + \mu \int v_2(v_1+v_2).
$$
If $\epsilon$ is sufficiently small both the integrals are positive; in
fact
$$
\lim_{\epsilon\to 0}\int v_i(v_1+v_2)=\int u_i^2,
\quad i=1,2.
$$
We conclude that $\lambda$ and $\mu$ have opposite signs.
Let $\beta(\epsilon)=\int v_1(v_1+v_2)$ and $\gamma(\epsilon)= \int
v_2(v_1+v_2)$.
Then $\lambda\beta(\epsilon)+\mu\gamma(\epsilon)=0$. We shall assume
that $\lambda\ge 0$ and $\mu\le 0$. A similar argument will dispose of
the other possibility.
Substituting $y=\lambda v_1+\mu v_2 +z $ into the equation $S_\epsilon y=
\rho y$ we find
$$
\frac12(\lambda-\mu)v_1v_2+S_\epsilon z -\rho\lambda v_1-\rho\lambda v_2
-\rho z=0.
$$
Hence applying Lemma 2 and again assuming $\epsilon$ sufficiently small
we can write
$$
(-\rho \lambda,-\rho\mu,z)=T_\epsilon^{-1}
\Big(\rho z-\frac12(\lambda-\mu)v_1v_2\Big)\,.
$$
It follows that
$$
|\rho\lambda|+|\rho\mu|+\|z\|_{H^2} \le K\|\rho z\|_{L^2}+
\frac12K|\lambda-\mu|\cdot \|v_1v_2\|_{L^2}.
$$
Now assume that $|\rho|<1/2K$.
We deduce
$$
\frac12\|z\|_{L^2}\le \Big(\frac12K\|v_1v_2\|_{L^2}-|\rho|\Big)(|\lambda|+|\mu|)
$$
If $\epsilon$ is small enough the right-hand side becomes negative,
which is a contradiction. The problem is that how small $\epsilon$
should be depends on $\rho$. We need to deduce a contradiction from
making $\epsilon$ small in a way not depending on $\rho$.
Dropping $\rho$ from the inequality we may write
$$
\frac12\|z\|_{L^2}\le\frac12K\|v_1v_2\|_{L^2}(|\lambda|+|\mu|)=
\frac12K\|v_1v_2\|_{L^2}\left(\frac{\gamma(\epsilon)}{\beta(\epsilon)}+
1\right)|\mu|. \eqno{(8)}
$$
Consider the ball $I_\epsilon$ of volume 1 centred at $-\xi_2$, the
point of maximum of $u_2(x+\xi_2)$.
We know that $v_i(\cdot-\xi_i)\to u_i$ in the
$H^2$-norm. It is here that we need to limit the number of dimensions
to 3, for this implies that $v_i(\cdot-\xi_i)\to u_i$ uniformly. So we
can find $M>0$ such that $v_2(x)>M$ for all $x\in I_\epsilon$,
and for any $\delta>0$ we can ensure that
$|v_1(x)|<\delta$ for all $x\in I_\epsilon$ provided only that
$\epsilon$ is small enough. Fix $\delta$ so that
$M-\frac{\gamma(\epsilon)}{\beta(\epsilon)}\delta>0$ for
sufficiently small $\epsilon$. Since $y\ge 0$ we have
$$
\lambda v_1(x) + \mu v_2(x) + z(x)\ge 0
$$
which implies
$$
z(x)\ge -\lambda v_1(x) + |\mu|v_2(x)\ge -\lambda\delta +|\mu|M
= \Big(M-\frac{\gamma(\epsilon)}{\beta(\epsilon)}\delta\Big)|\mu|
$$
for all $x\in I_\epsilon$.
Integrating we deduce
$$
\|z\|_{L^2}\ge
\Big(M-\frac{\gamma(\epsilon)}{\beta(\epsilon)}\delta\Big)|\mu|.
$$
But this is inconsistent with (8) if $\epsilon$ is small enough.
This concludes the proof that $v_1+v_2$ is positive.
\section{Application to homoclinics}
The equation
$$
-\Delta v + v -v^p=0
$$
considered over all of ${\mathbb R}^n$ has a manifold of positive solutions.
These may be described as the
$n$-dimensional plane of functions $\phi(x-c)$
parametrized by \hbox{$c\in{\mathbb R}^n$}, where
$\phi$ is the positive, radially-symmetric solution introduced in
section~2.
We perturb the equation to
$$
-\Delta v + v -(1+\epsilon h(x))v^p=0 \eqno{(9)}
$$
where $h$ is measurable and periodic in ${\mathbb R}^n$.
Now we can seek multi-bump solutions looking like linear combinations
of translates of $\phi$.
The method of section~2 carries through
easily enough if we assume that $h$ has bounded
second derivatives. (This is much stronger than is needed.
Another approach is possible which does not even require $h$ to be
continuous, and yet gives more precise asymptotic information.
The calculations are too unwieldy to present here.)
Let
$$
F(s)= \int h(x) \phi(x+s)^{p+1}\,d^nx
$$
for $s\in{\mathbb R}^n$. Let $c_1$, \dots, $c_m$ be non-degenerate critical
points of $F$ (not necessarily distinct).
We seek solutions to (9) near to $\sum_{i=1}^m
\phi(x+k_i+c_i)$, where the vectors $k_i$ are periods of $h$ for which
the separations $\|k_i-k_j\|$ are sufficiently large.
We consider therefore a system
$$
-\Delta v_i + v_i - (1+\epsilon h(x))G_i(v)=0, \quad i=1,\ldots,
m,
$$
where the polynomials $G_i(v)$ satisfy the same conditions as in
section 2. We use the substitution
$$
v_i = \phi(x+s_i+k_i) + \epsilon w_i(x+s_i+k_i)
$$
where, for each $i$, $s_i$ is a variable vector in ${\mathbb R}^n$, $k_i$ is a
period of $h$, and the function $w_i$ belongs to the subspace
$$
W_i= \Big\{w\in H^2: \int w D_j\phi = 0,\quad j=1,\ldots, n\Big\}.
$$
We let
$$
W=W_1\times\cdots\times W_m\,.
$$
Make the substitution, translate the $i^{\rm th}$ equation by
replacing $x$ by $x-s_i-k_i$, divide by $\epsilon$ with a view to
taking the
limit as $\epsilon\to 0$ and $\|k_i-k_j\|\to\infty$, $i\not=j$.
The result of this is
$$
\displaylines{\quad
\epsilon^{-1}g_i\Big(\phi(x+s_1+k_1-s_i-k_i),\ldots,
\phi(x+s_n+k_n-s_i-k_i)\Big)\phi(x)
\hfill\cr\hfill
{}-h(x-s_i)\phi(x)^p
-\Delta w_i(x) +w_i(x) -p\phi(x)^{p-1}w_i(x)
\hfill\cr\hfill
-\sum_{j=1,j\not=i}^m\lambda_{ij}\phi(x+s_j+k_j-s_i-k_i)^{p-1}w_i(x)
\hfill\cr\hfill
-\sum_{j=1,j\not=i}^m\gamma_{ij}\phi(x)^{p-1}w_j(x+s_j+k_j-s_i-k_i)
\quad}
$$
where we have thrown out all terms of order $\epsilon$ and all terms
containing a product of two distinct translates of $\phi$, with the
exception that we
have retained the first term since it also
involves division by $\epsilon$.
As usual the details of the limit are a bit tricky. The first term
will converge exponentially fast to 0 provided the separations
$\|k_i-k_j\|$ do not grow too slowly compared to $1/\epsilon$. With
this proviso we obtain the limiting problem in the variables $s_i$, $w_i$:
$$
-h(x-s_i)\phi(x)^p -\Delta w_i + w_i -p\phi(x)^{p-1}w_i=0,\quad
i=1,\ldots,m\,.
$$
This has the non-degenerate solution $s_i=c_i$, $w_i=\eta_i$, $
(i=1,\ldots,m)$, where $\eta_i$ is the unique solution in $W_i$ of
$$
-\Delta w_i + w_i -p\phi(x)^{p-1}w_i= h(x-c_i)\phi(x)^p.
$$
The existence of solutions for sufficiently small $\epsilon$ follows
much as in section~2. We have to use the implicit function theorem in
the form given in section 6.1 (Theorem 4). Let $A(\epsilon)$ be the operator from
$({\mathbb R}^n)^m\times W$ to $(L^2)^m$ for which the $i^{\rm th}$ component
of $A(\epsilon)(\sigma, z)$
is
$$
\displaylines{\quad
(\nabla h(x-c_i)\cdot \sigma_i)\phi(x)^p
-\Delta z_i(x) +z_i(x) -p\phi(x)^{p-1}z_i(x)
\hfill\cr\hfill
-\sum_{j=1,j\not=i}^m\lambda_{ij}\phi(x+s_j+k_j-s_i-k_i)^{p-1}z_i(x)
\hfill\cr\hfill
-\sum_{j=1,j\not=i}^m\gamma_{ij}\phi(x)^{p-1}z_j(x+s_j+k_j-s_i-k_i).
\quad\cr}
$$
The condition that $\lambda_{ij}\not\in\Sigma$
is used, as in section 2.3,
to verify that $\|A(\epsilon)^{-1}\|$ has a uniform upper
bound as $\epsilon\to 0$. Another detail to note is that as the $k_i$
are periods of $h$ they depend discontinuously on $\epsilon$.
The other conditions of Theorem 4 are straightforward to verify.
\section{Appendix}
\subsection{The implicit function theorem}
Let $E$ and $F$ be real Banach spaces and let $f:{\mathbb R}_+\times E\to F$,
where ${\mathbb R}_+$ denotes the interval $[0,\infty[$. We write
$f_\epsilon(x) = f(\epsilon,x)$ to emphasize the distinct role of
$\epsilon$ as a small parameter. Assume that $f_\epsilon$ is
differentiable for each $\epsilon\ge 0$.
For reasons which should be clear from section 5 we do not assume that
$f$ is a continuous function of $\epsilon$.
We say that a
solution $x_0$ of $f_\epsilon(x)=0$ is non-degenerate if the
derivative $Df_\epsilon(x_0)$ is an invertible linear mapping of $E$ onto
$F$. The following is just the implicit function theorem (in a
slightly non-standard form but its proof is just the standard one).
\begin{theorem}
Make the assumptions:
{\rm(a)} the equation $f_0(x)=0$ has a
non-degenerate solution $x_0$;
{\rm(b)} the limit\/ $\lim_{\epsilon\to 0,x\to x_0}f_\epsilon(x)$
is $0$;
{\rm(c)} the limit\/ $\lim_{\epsilon\to
0,x\to x_0}Df_\epsilon(x)=Df_0(x_0)$ is attained in the
operator-norm topology.
Then for each sufficiently small
$\epsilon>0$ the equation $f_\epsilon(x)=0$ has a unique solution
near to $x_0$, which tends to $x_0$ as $\epsilon\to 0$.
If $f$ is jointly continuous in $\epsilon$ and $x$
then the solution depends continuously on $\epsilon$.
\end{theorem}
In the problems treated in this paper the third condition (c) fails.
The limit is only attained in the strong operator topology.
In this case we can use the following.
\begin{theorem}
Assume as before that:
{\rm(a)} the equation $f_0(x)=0$ has a
non-degenerate solution $x_0$; and
{\rm(b)} the limit\/ $\lim_{\epsilon\to 0,x\to x_0}f_\epsilon(x)$
is $0$.
\noindent Assume in addition that there exists an operator-valued function
$A:[0,\epsilon_0[\to L(E,F)$ such that:
{\rm(c)} the limit $\lim_{\epsilon\to 0}A(\epsilon)=A(0)=Df_0(x_0)$ is
attained in the strong operator topology;
{\rm(d)} the limit\/ $\lim_{\epsilon\to 0,x\to x_0}\Big(A(\epsilon)-
Df_\epsilon(x)\Big)=0$
is attained in the operator-norm topology.
{\rm(e)} $A(\epsilon)$ is invertible for $0\le \epsilon<\epsilon_0$ and
there exists a constant $M$ such that its inverse satisfies
$\|A(\epsilon)^{-1}\|0$ the equation $f_\epsilon(x)=0$ has a unique solution
near to $x_0$, which tends to $x_0$ as $\epsilon\to 0$.
If $A$ is continuous in $]0,\epsilon_0[$ (w.r.t.~the
strong operator-topology) and $f$ is jointly continuous in $\epsilon$
and $x$ then the solution depends continuously on $\epsilon$.
\end{theorem}
To prove Theorem 4 we apply Theorem 3 to the problem
$$
A(\epsilon)^{-1}f_\epsilon(x)=0\,.
$$
Condition (b) of Theorem 3
follows from the fact that the limit $\lim_{\epsilon\to
0}A(\epsilon)^{-1}=Df_0(x_0)^{-1}$ is attained in the strong
operator topology. To verify (c) of Theorem 3 we have
$$
\|A(\epsilon)^{-1}Df_\epsilon(x) - I\|\le
\|A(\epsilon)^{-1}\|\cdot\|Df_\epsilon(x)-A(\epsilon)\|
\le M\|Df_\epsilon(x)-A(\epsilon)\|\to 0
$$
as $x\to x_0$ and $\epsilon\to 0$.
\subsection{The solution of the limiting problem}
Here we solve the limiting problem
from section 2.2:
$$
\displaylines{\quad
\frac12 V''(b_i)(x-s_i,x-s_i) u_i(x)
-\Delta w_i(x) +a_iw_i(x) -pu_i(x)^{p-1}w_i(x)=0,
\hfill\cr\hfill
i=1,\ldots,m.\quad\cr}
$$
Recall that $s_i$ is a vector in ${\mathbb R}^n$ for each $i$.
By the Fredholm alternative we must have
$$
\frac12\int V''(b_i)(x-s_i,x-s_i)u_i(x)D_ku_i(x)\,dx = 0
$$
for $k=1,\ldots,n$, $i=1,\ldots, m$. Since $u_i$ is an even
function we have that $D_ku_i$ is an odd function and the condition
reduces to
$$
V''(b_i)\left(s_i, \int xu_i(x)D_ku_i(x)\,dx\right) =0\,.
$$
The integral $\int xu_i(x)D_ku_i(x)\,dx$ is a vector whose $j^{\rm
th}$ component is the integral $\int x_ju_i(x)D_ku_i(x)\,dx.$ Now $D_ku_i$ is an odd
function of $x_k$, but an even function of $x_j$ for $j\not=k$. Hence
$\int x_ju_i(x)D_ku_i(x)\,dx=0$ unless $j=k$ in which case the integral is
plainly non-zero. Hence the Fredholm alternative gives
$$
V''(b_i)(s_i,e_k)=0
$$
for $k=1,\ldots,n$, $i=1,\ldots, m$, where $e_k$ is the $k^{\rm
th}$ standard basis vector of ${\mathbb R}^n$. Since $V''(b_i)(x,y)$ is
a non-degenerate, symmetric, bilinear form we obtain the non-degenerate
solutions $s_i=0$, $i=1,\ldots, m$. For these values of $s_i$ we can
solve the limiting problem for the functions $w_i$, uniquely, in the
spaces $W_i$. The solutions are the functions $\eta_i$.
\subsection{A weighted eigenvalue problem}
The set $\Sigma$ of complex numbers $\lambda$, for which the
equation
$$
-\Delta v + v - \lambda \phi(x)^{p-1}v=0
$$
has a non-trivial solution in $H^2$, can be viewed as the set of
reciprocals of the spectrum of a compact, self-adjoint operator.
Let $A$ be the operator $-\Delta +1$. Then $A$ is a positive,
self-adjoint operator in $L^2({\mathbb R}^n)$ with spectrum $[1,\infty[$. Let
$A^{\frac12}$ be its positive, self-adjoint square-root and let
$v=A^{-\frac12}y$ where $y\in L^2({\mathbb R}^n)$. We write the weighted
eigenvalue problem as
$$
y= \lambda \Big(A^{-\frac12}\phi^{\,p-1}A^{-\frac12}\Big)y, \quad 0\not=y \in L^2.
$$
Now the operator $A^{-\frac12}\phi^{\,p-1}A^{-\frac12}$ is clearly
self-adjoint and positive, but it is also compact. The
reason is that $A^{-\frac12}$ can be viewed as a bounded operator from
$L^2$ to $H^1$, while the multiplication operator $\phi^{\,p-1}$ is
compact from $H^1$ to $L^2$ since the function $\phi$ decays at
infinity. It follows that $\Sigma$ is a sequence of positive numbers
tending to infinity.
We can easily show that the lowest eigenvalue is 1, corresponding to
the eigenfunction $\phi$. Let $L$ be the self-adjoint operator
$$
Lu = -\Delta u +u -\phi(x)^{p-1}u\,.
$$
The lowest eigenvalue of $L$ is 0 and corresponds to the positive ground
state $\phi$. Hence, using the inner-product and norm in $L^2$, we have
$$
0\le \langle Lu,u \rangle = \langle Au,u\rangle
-\langle\phi^{\,p-1}u,u\rangle
$$
for all $u$ in the domain of $L$.
Putting $u=A^{-\frac12}y$ and using self-adjointness we have
$$
0\le \|y\|^2-\langle A^{-\frac12}\phi^{\,p-1}A^{-\frac12}y,y\rangle.
$$
From this it follows that the lowest eigenvalue of the weighted
eigenvalue problem is 1.
\subsection{Wang's Lemma}
If $V(x)$ is bounded and satisfies $V(x) > \delta>0$ for some constant
$\delta$, then
$$
\|-\Delta v + V(\epsilon x)v\|_{L^2({\mathbb R}^n)} \ge K\|v\|_{H^2({\mathbb R}^n)}
$$
for all $u\in H^2$ and for some constant $K>0$ independent of $u$
and $\epsilon$. The proof is very short and is in the appendix of
\cite{Wang}.
\bigskip
\begin{thebibliography}{99}
\frenchspacing
\bibitem{Ad}
Adams, R. A.\ \ {\it Sobolev spaces.} Academic Press,
New York, London, Toronto, Sydney, San Francisco, 1975.
\bibitem{ABC}
Ambrosetti, M., Badiale, M. and Cingolani, S. Semiclassical states of
nonlinear Schr\"{o}dinger equations. To appear in Arch. Rat. Mech. Anal.
\bibitem{ZR}
Coti Zelati, V. and Rabinowitz, P. H.\ \ Homoclinic orbits for second
order Hamiltonian systems possessing superquadratic potentials.
J. Amer. Math. Soc. 4, 693--727, (1991).
\bibitem{FW} Floer, A. and Weinstein, A.\ \ Nonspreading wave packets
for the cubic Schr\"{o}dinger equation with a bounded potential. J.
Funct. Anal. 69, 397--408 (1986).
\bibitem{Gui1} Gui, C.\ \ Existence of multi-bump solutions for
nonlinear Schr\"{o}dinger equations via variational
method. Commun. Partial Differ. Equations 21, 787--820 (1996).
\bibitem{Gui2} Gui, C.\ \ Multi-bump solutions for nonlinear
Schr\"{o}dinger equations. C. R. Acad. Sci. Paris 322, 133--138
(1996).
\bibitem{Kw}
Kwong, M. K.\ \ Uniqueness of positive solutions of $\Delta u -u
+u^p=0$ in ${\mathbb R}^n$. Arch. Rational Mech. Anal. 105, 243--266 (1989).
\bibitem{Mag3}
Magnus, R. J.\ \ On perturbations of a translationally-invariant
differential equation.
Proc. Roy. Soc. Edin. 110A, 1--25 (1988).
\bibitem{Oh1}
Y. G. Oh.\ \ On positive multi-lump bound states of
nonlinear Schr\"{o}dinger equations under multiple well potential.\ \
Comm. Math. Phys. 131, 223--253 (1990).
\bibitem{PFP1}
Del Pino, M. and Felmer, P. L.\ \ Semi-classical states for nonlinear
Schr\"{o}dinger equations. J. Funct. Anal. 149, 245--265 (1997).
\bibitem{PFP2}
Del Pino, M. and Felmer, P. L.\ \
Multi-peak bound states for nonlinear Schr\"{o}dinger equations.
Ann. Inst. Henri Poincar\'{e}, Anal. non lin\'{e}aire 15, 127--149 (1998).
\bibitem{Rab}
Rabinowitz, P. H.\ \
On a class of nonlinear Schr\"{o}dinger
equations. Z. Angew. Math. Phys. 43, 270--291 (1992).
\bibitem{RS}
Reed, M. and Simon, B.\ \ {\it Methods of Modern Mathematical Physics II, IV.}
New York, Academic Press 1978.
\bibitem{Sere}
S\'{e}r\'{e}, E.\ \ Existence of infinitely many homoclinic orbits in
Hamiltonian systems. Math. Zeit. 209, 27--42 (1992).
\bibitem{Wang}
Wang, X.\ \ On concentration of positive bound states of nonlinear
Schr\"{o}d\-inger equations. Comm. Math. Phys. 153, 229--244 (1993).
\bibitem{MW}
Weinstein, M.\ \ Modulational stability of the ground
states of nonlinear Schr\"{o}dinger equations. SIAM J. Math. Anal. 16,
567--576 (1985).
\end{thebibliography}
\bigskip
\noindent
{\sc Robert Magnus}\\
The University Science Institute,\\
Dunhaga 3, 107--Reykjavik, Iceland.\\
E-mail: {\tt robmag@raunvis.hi.is}
\end{document}