%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%% This is a LaTeX R9P0 file %%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\documentclass[a4paper,11pt,leqno]{article}
\documentstyle[11pt,leqno]{article}
%\usepackage{amstex}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% inputs for unix UNIX %%%
%\usepackage{amsmath}
%\usepackage{amssymb} \usepackage{theorem}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\documentstyle[11pt]{article}
%\usepackage{amstex}
%\input AMSSYMB.STY
%\input amsmath
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% for PC-Zanolin
%\input amssym.def
%\input amssym.tex
%\def\boldsymbol\delta{\bf \delta}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\font\tenmsb=msbm10 \font\sevenmsb=msbm7 \font\fivemsb=msbm5
\catcode`\@=11 \ifx\amstexloaded@\relax\catcode`\@=\active
\endinput\else\let\amstexloaded@\relax\fi
\def\spaces@{\space\space\space\space\space}
\def\spaces@@{\spaces@\spaces@\spaces@\spaces@\spaces@}
\def\space@.{\futurelet\space@\relax} \space@.
\def\Err@#1{\errhelp\defaulthelp@\errmessage{AmS-TeX error: #1}}
\def\relaxnext@{\let\next\relax} \def\accentfam@{7}
\def\noaccents@{\def\accentfam@{0}}
\def\Cal{\relaxnext@\ifmmode\let\next\Cal@\else \def\next{\Err@{Use
\string\Cal\space only in math mode}}\fi\next}
\def\Cal@#1{{\Cal@@{#1}}} \def\Cal@@#1{\noaccents@\fam\tw@#1}
\def\Bbb{\relaxnext@\ifmmode\let\next\Bbb@\else \def\next{\Err@{Use
\string\Bbb\space only in math mode}}\fi\next}
\def\Bbb@#1{{\Bbb@@{#1}}} \def\Bbb@@#1{\noaccents@\fam\msbfam#1}
\newfam\msbfam \textfont\msbfam=\tenmsb \scriptfont\msbfam=\sevenmsb
\scriptscriptfont\msbfam=\fivemsb
\newcommand{\sgn}{\mbox{\hspace{0.6mm}\rm sgn\hspace{0.2mm}}}
\newcommand{\rot}{\mbox{\hspace{0.6mm}\rm rot\hspace{0.2mm}}}
\def\SS{{\Bbb S}}
\def\CC{{\Bbb C}}
\def\RR{{I\!\!R}}
\def\ZZ{{\Bbb Z}}
%\def\eop{\mbox{ \vrule height7pt width7pt depth0pt}}
\def\NN{{\Bbb N}}
\def\deg{{\mbox{\rm deg}}}
\def\sgn{{\mbox{\rm sgn}}}
\def\det{{\mbox{\rm det}}}
\def\rot{{\mbox{\rm rot}}}
\def\fr{\partial}
\def\m{\medskip}
\def\n{\noindent}
\def\sub{\subseteq}
\def\sups{\supseteq}
\def\R{\bold R}
\def\N{\bold N}
\def\P{\bold Z^+}
\def\Q{\bold Q}
\def\a{\alpha}
\def\b{\beta}
\def\d{\delta}
\def\e{\eta}
\def\eps{\varepsilon}
\def\f{\phi}
\def\g{\gamma}
\def\l{\lambda}
\def\o{\omega}
\def\r{\rho}
\def\s{\sigma}
\def\t{\tau}
\def\th{\theta}
\def\D{\Delta}
\def\F{\Phi}
\def\G{\Gamma}
\def\L{\Lambda}
\def\O{\Omega}
\def\Th{\Theta}
\def\V{\Vert}
\def\la{\langle}
\def\ra{\rangle}
\def\rr{\rightrightarrows}
\def\da{\downarrow}
\def\ua{\uparrow}
%\renewcommand{\theequation}{\thesection.\arabic{equation}}
%\@addtoreset{equation}{section}
%%%This one is added to avoid using \scout
\newcommand{\qed}{\mbox{}\nolinebreak\hfill\rule{2mm}{2mm}\par\medbreak}
\newcommand{\Proof}{{\sc Proof}\hspace{5 mm}}
\newtheorem{Theorem}{Theorem}
\newtheorem{Lemma}{Lemma}
\newtheorem{Corollary}{Corollary}
\newtheorem{Remark}{Remark}
\newtheorem{Definition}{Definition}
\newtheorem{Proposition}{Proposition}
%\textwidth18cm
%\hoffset-2.2cm
\begin{document}
\title{On a class of bounded trajectories for some non-autonomous
systems }
\author{A. Gavioli*\\
{\small Dipartimento di Matematica Pura ed Applicata}\\
{\small Via Campi, 213b, 41100 Modena, Italy}\\
L. Sanchez**\\
{\small Faculdade de Ci\^encias da Universidade de Lisboa}\\
{\small CMAF, Avenida Professor Gama Pinto, 2, 1649-003 Lisboa, Portugal}}
%\dedicatory{}%
%\subjclass{}%
%\smallskip
%\centerline{\small Fac. Ci\^encias de Lisboa and Centro de Matem\'atica e Aplica\c c\~oes Fundamentais}
%\centerline{\small Av. Prof. Gama Pinto 2, 1649-003 Lisboa, Portugal}
\maketitle
\smallskip
{\small
\begin{quote}
{\sl Abstract.} We prove, by variational arguments, the existence of a solution to the boundary value problem in the half line
\begin{equation}\label{intro}
\left\{
\begin{array}{l}
\ddot x+c\dot x=a(t)V'(x)\,\, \\
\\
x(0)=0,\,\,\,x(+\infty)=1.
\end{array}
\right.
\end{equation}
where $c\geq 0$ and $a$ belongs to a certain class of positive functions. The existence of such a solution in the case $c=0$ means that the system (\ref{intro})
behaves in significantly different way from its autonomous counterpart.
Math. subject classification: 34B40, 34C37.
Keywords: bounded solution, heteroclinic, non-autonomous equation.\\
**Supported by CNR, Italy.
\\ ***Supported by GRICES and Funda\c c\~ao para a Ci\^encia e Tecnologia, program POCTI (Portugal/FEDER-EU).
\end{quote}
$~$
\\
%\\
%\\
%AMS (MOS) Subject Classification: Primary 34 C 25, 58 F 22
}
\section{Introduction}
Consider a smooth scalar potential $V(x),\,\,x\in\RR,\,$ which is positive in $]0,1[$ and such that $V(0)=V(1)=0,$ a scalar function $a(t)$ which has a positive infimum in $\RR$ and, finally, let $c\geq 0.$
In this paper we are concerned with the existence of solutions to the following boundary value problem on the interval $]0,\infty[$:
\begin{equation}\label{eq}
\ddot x+c\dot x=a(t)V'(x)\,\,
\end{equation}
\begin{equation}\label{bc}
x(0)=0,\,\,\,x(+\infty)=1.
\end{equation}
More precise assumptions on the data will be given below. We would like first to make some comments about the problem.
Our interest in this problem has two motivations, according to whether $c=0$ or $c>0$. The first is that, in the case $c=0$, if $a$ and $V$ are even functions, our problem is equivalent to that of finding a heteroclinic connection between non-consecutive equilibria $\pm1$ of a potential $V$ having three minima at the same level; such a problem has no solution if $a$ is a constant because energy is conserved. Hence it is meaningful to investigate conditions on the time dependence of the coefficient $a(t)$ under which the mentioned connection appears. Striking differences between autonomous and non-autonomous systems have been investigated by other authors, as an example see \cite{cr}. It follows from our results that with respect to (\ref{intro}) there are differences of this kind for $c=0$ but not for $c>0$. In fact, in the case $c>0$ a solution of our boundary value problem exists in the autonomous case as well.
Several authors have considered the problem of finding trajectories
between equilibria of non-autonomous equations: we refer the reader to the
recent paper by Malaguti, Marcelli and Partsvania \cite{mmp} and the references therein.
We shall present two existence theorems, under distinct sets of conditions on the data. It turns out that the way the (increasing) function $a(t)$ approaches its limit plays an important role in the sufficient conditions. The approach is variational in both cases.
More precisely, in Theorem 1 problem (\ref{intro}) will be solved in a situation where weak regularity assumptions on $V$ and its minima are assumed; while with respect to $a(t)$ it is required that $a(t)$ tends to its limit $l$ in such a way that, if $l<\infty$, $l-a(t)$ is slower than $1/t$. On the other hand, in Theorem 2, dealing with the case $c=0$ only, we prove the existence of solutions for a wider class of functions $a(t)$, while confining ourselves to the class of $C^2$ potentials $V$.
The description of our assumptions follows.
\begin{itemize}
\item[($H_1$)] $V\in C^1(\RR)$ is a non negative function, $V(0)=V(1)=0$ and $V>0$ in $]0,1[$.
\item[($H_2$)] There exist $\delta>0$ and $A_1$, $A_2>0$ such that $A_1 x^2\leq V(x)\leq A_2 x^2$ for $|x|<\delta$.
\item[($H_3$)] The function $a: [0,+\infty[\to]0,+\infty[$ is such that
there exists $t_0\ge 0$ with the property that $a$ is increasing in $[t_0,+\infty[$.
\end{itemize}
\begin{Theorem}
Assume that ($H_1$) holds and $a: [0,+\infty[\to]0,+\infty[$ is a continuous function.
If $c=0$ assume that ($H_2$) and ($H_3$) hold as well and in addition that $\eta:=\inf_{t\ge0}a(t)>0$ and
$l:=\lim_{t\to+\infty}a(t)$ has the property
\begin{equation}\label{gap}
\lim_{t\to+\infty}t(l-a(t))=+\infty
\end{equation}
then the boundary value problem (\ref{eq})-(\ref{bc}) has at least one solution that takes values in $[0,1]$.
\end{Theorem}
\begin{Remark}
If $l\in\RR^+\,$ it is easy to check that (\ref{gap}) holds if $a(t)$ is of the form $\, a(t)=l-\frac{\gamma}{(1+t)^\beta},\,\, 0<\gamma0$ there exists $\sigma_0>0$ such that if $0<\sigma<\sigma_0$
$$
a\left(\frac{C}{\sigma}+D\right)+E\sigma0$. Let $a:\RR\to]0,+\infty[$ be a function of bounded variation
with $\e:=\inf_{t\ge 0}a(t)>0$ satisfying ($H_3$) and the property
\begin{equation}\label{gap2}
\lim_{t\to+\infty}(l-a(t))e^{2\mu t}=+\infty,
\end{equation}
where $l:=\lim_{t\to+\infty}a(t)$ and $\mu=\sqrt{\e V''(0)}$.
Then the boundary value problem (\ref{eq})-(\ref{bc}) has at least one solution taking values in $[0,1]$.
\end{Theorem}
\begin{Corollary}
Let $c=0$, $a$ and $V$ be even functions satisfying the assumptions of Theorem 1 or Theorem 2. Moreover assume that $1$ is an isolated minimizer of $V$. Then equation (\ref{eq}) has a heteroclinic solution connecting the equilibria $-1$ and $1$.
\end{Corollary}
\
{\bf Aknowledgement.} The authors are indebted to Alessandro Margheri for many fruitful discussions on the subject of this paper.
\section{A comparison between autonomous and non automous problems}
First, note that if $\,c=0\,$ and $a(t)$ is constant, then
problem (\ref{intro}), which corresponds now to an autonomous
equation, has no solution. Then as a next step and still maintaining $c=0,$ the simplest non autonomous system one can discuss is one where $a(t)$ is a 'bang-bang' function with only one switch.
More precisely, if
$00,\, $ we define
\begin{equation}\label{v}
a(t):=\left\{
\begin{array}{l}
a,\quad 0\leq t\leq T\\
\\
b\quad t> T.
\end{array}
\right.
\end{equation}
Then, consider a $C^1$ potential $V(x)$ as above and which is locally bounded from above by $Ax^2$ and $A(x-1)^2,\,\,A>0,$ respectively around $x=0$ and $x=1.$
We note that problem (\ref{intro}) has a solution if and only if there exists a solution of $\ddot x=aV'(x)$ on $[0,T],$ with $x(0)=0$ such that the corresponding solution curve $(x(t),\dot x(t))$ in the phase plane $(x,\dot x)$ intersects at time $T$ the heteroclinic orbit between $(0,0)$ and $(0,1)$ corresponding to the equation $\ddot x=bV'(x).$
Set $\xi=x(T)\in]0,1[.\,$ By the conservation of energy, the heteroclinic solution of the second equation satisfies $\,\dot x=\sqrt{2bV(x)},\,$ whereas the solution of the first equation with $\,x(0)=0\,$ satisfies $\,\dot x=\sqrt{2aV(x)+C}$ for some constant $C$. Then, imposing that the two solution curves intersect at time $T$ in the phase plane, we get
\begin{equation}\label{conser}
\,C=2(b-a)V(\xi)\,
\end{equation}
and, if our problem admits a solution, then the following representation holds for $T=T(\xi):$
\begin{equation}\label{T}
T(\xi)=\int_0^\xi \frac{dx}{\sqrt{2aV(x)+2(b-a)V(\xi)}}
\end{equation}
By the quadratic growth of $\,V(x)\,$ in a neighbourhood of $\,x=0,\,$ there exists a constant $\underline c$ such that
\begin{equation}
\underline c\int_0^{\xi} \frac{dx}{\sqrt{2a x^2+2(b-a){\xi}^2}}\leq T(\xi)
\end{equation}
for any sufficiently small $\xi>0.$
Since
$$\int_0^{\xi} \frac{dx}{\sqrt{2a x^2+2(b-a){\xi}^2}}=\frac{1}{\sqrt{2a}}\log\left(\frac{\sqrt a +\sqrt b}{\sqrt b}\right)$$
we infer that $T(\xi)$ is bounded away from zero in a right neighbourhood of $\xi=0.$ In a similar way it can be shown that $T(\xi)\to+\infty$ as $\xi\to 1^-$.
Then, since $T(\xi)$ is a continuous function of $\xi,$ we conclude that there exists $T_0>0$ such that $T(]0,1[)= [T_0,+\infty[$ or $T(]0,1[)= ]T_0,+\infty[$ and therefore problem $(\ref{intro})$ has no solution if $TT_0.$)
\medskip
In the simple example above the switch time $T_0 $ for the function $a(t)$ actually depends on $V(x)$ through $\underline c.$
This suggests that, generally speaking, despite the fact that the variables $\,x\,$ and $\,t\,$ are separate in the right-hand side of our equation, the conditions given on $a(t)$ to solve problem (\ref{intro}) when $c=0$ may naturally involve the potential $V.$ This is unlike the heteroclinic problem $\,x(-\infty)=0,\,\,\,x(+\infty)=1\,$ associated to the same equation for the class of potentials considered above. In fact, in this case a general result guarantees existence of solutions if $\,\lim_{|t|\to\+\infty}a(t)\to l\in\RR,\,$ and $\,a(t)\leq l\,$ with strict inequality holding on a set of positive measure. See \cite{bs}.
We now turn to consider the features of the case $c>0$. If we again
take $a$ to be a constant, the problem (1) may be related to a kind
of boundary value problems that arise in the theory of travelling
waves for reaction-diffusion (see \cite{aw}). Indeed, if one looks for
strictly monotone solutions then (1) is easily transformed into a
first order problem for the new unknown function $\psi=\Phi^2$ where
$\Phi$ describes the graph of the curve $\dot x=\Phi(x)$ in the phase
plane. See \cite{bs, gk, mm}. The new formulation may be written as
\begin{equation}\label{ordine1}
\left\{
\begin{array}{l}
\psi'=2(aV'(x)-c\sqrt \psi)\,\, \\
\\
\psi(1)=0,\,\,\,\psi(x)>0\;\; \forall x\in[0,1[.
\end{array}
\right.
\end{equation}
It is not difficult to conclude, directly from phase-plane analysis or by studying (\ref{ordine1}), that for any $c>0$ and $a>0$ constant, the problem (\ref{intro}) has a solution. In fact if we consider, for $\epsilon>0$, the Cauchy problem
\begin{equation}\label{Cauchy}
\left\{
\begin{array}{l}
\psi'=2(aV'(x)-c\sqrt{\psi_+ +\epsilon)}\,\, \\
\\
\psi(1)=0,\,\,\,
\end{array}
\right.
\end{equation}
it turns out that it has a solution in $[0,1]$ that stays above
$2aV(x)$. Then, by taking the limit as $\epsilon\to 0$, our claim
follows. From our main results we shall see that the solution still exists when $a$ depends on $t$.
\section{Proof of the main results}
{\bf Proof of theorem 1.}
We shall minimize the functional
\begin{equation}\label{fun}
{\cal F}(x)=\int_0^{+\infty} e^{ct} \left(\frac{\dot x(t)^2}{2}+a(t)V(x(t))\right )\,dt
\end{equation}
in the functional space
$$X:=\{ x\in C([0,+\infty[)\cap H^1_{loc}([0,+\infty[) \, : \, x(0)=0,\,\,\,x(+\infty)=1\}.$$
Let ${\cal I}= \inf_X {\cal F}$ and consider $x_n\in X$ such that ${\cal F}(x_n)\to {\cal I}.$
For $t\geq 0$ we define $x_0(t):=\min\{t, 1\}\in X, $ and we have ${\cal I} \leq K:={\cal F}(x_0)=\int_0^1 e^{ct}(1+a(t)) V(t))\,dt.$
It is clear that, for any $M>0$, $(x_n)_n$ is bounded in $H^1(0,M)$
and $(e^{ct/2}\dot x_n)_n$ is bounded in $L^2(0,+\infty)$.
Then we can take a subsequence, still denoted by $(x_n)_n$, which converges
to some absolutely continuous function $x$ uniformly on compact sets and
in such a way that
$$
e^{ct/2}\dot x_n\rightharpoonup e^{ct/2}\dot x
\;\;\;\mbox{in}\;\;\; L^2(0,+\infty).
$$
Moreover we may assume $0\le x_n(t)\le 1$, since otherwise we could replace
$x_n$ with $\min(\max(x_n,0),1)$, still obtaining a minimizing sequence.
{\bf Case 1.} $c>0$.
Let $T>0$: from the Cauchy-Schwarz inequality we obtain, for any $t\ge T$,
$$
|x_n(t)-1|\le\int_T^{+\infty}|\dot x_n(s)|\,ds\le
\frac{e^{-cT/2}}{\sqrt c}(\int_T^{+\infty}e^{cs}\dot x_n(s)^2\,ds)^{1/2}\le
\frac{e^{-cT/2}}{\sqrt c}(2K)^{1/2}.
$$
Hence, for any $\delta>0$, we can choose $T>0$ such that
$$
|x_n(t)-1|\le\delta \quad \forall t\ge T,\; \forall n.
$$
Then the limit function $x$ satisfies the same inequality, and since $\delta$ is arbitrary, $x\in X$. Moreover, by the weak lower semicontinuity
of the $L^2$-norm and Fatou's Lemma, we have
$$
{\cal F}(x)\le\liminf_n {\cal F}(x_n)=\cal I,
$$
so that $x$ actually minimizes $\cal F$ in $X$. Then, by standard arguments, $x$ is a solution of (\ref{eq})-(\ref{bc}).
\
{\bf Case 2.} $c=0$.
Let us define, for fixed $\alpha\in ]0,\min(\delta,1/4)[$,
$$
t_1(n)=\max\{t\ge 0|\, x_n(t)\le\alpha\},\quad
t_2(n)=\min\{t\ge t_1(n)|\, x_n(t)\ge 1-\alpha\},
$$
which will be simply denoted by $t_1$ and $t_2$.
Then from the Schwarz inequality we obtain
$$
1-2\alpha\le(\int_{t_1}^{t_2}\,dt)^{1/2}
(\int_{t_1}^{t_2}\dot x_n(t)^2\,dt)^{1/2}\le
(2K)^{1/2}\sqrt{t_2-t_1},
$$
so that
$$
t_2-t_1\ge\frac{(1-2\alpha)^2}{2K}.
$$
Now we can find $t_3$ and $t_4$ (depending on $n$) such that
$x_n(t_3)=1/4$, $x_n(t_4)=3/4$
$t_1a\left(\frac{K}{\eta A_1 \alpha^2}+t_0\right)+\frac{1}{\Delta}\left(\frac{1}{2t_0}+\frac{A_2 M_0t_0}{3}\right)\alpha^2,
\end{equation}
where $M_0=\sup_{0\le t\le t_0}a(t)$.
We will show that if $t_1>T,$ then we may replace the function $x_n$ of the minimizing sequence with a function $v_n$ for which $t_1\leq T.$
For consider the function:
\begin{equation}\label{vn}
v_n(t):=\left\{
\begin{array}{l}
\alpha t/t_0,\quad 0\leq t\leq t_0\\
\\
x_n(t+t_1-t_0)\quad t\geq t_0.
\end{array}
\right.
\end{equation}
We have
\begin{eqnarray}\nonumber
{\cal F}(v_n)&=&\frac{\alpha^2}{2t_0}+\int_{t_0}^{+\infty} \frac{\dot x(t+t_1-t_0)^2}{2}\,dt+\int_0^{t_0} a(t)V(\alpha t/t_0)\,dt+
\int_{t_0}^{+\infty} a(t)V(x_n(t+t_1-t_0))\,dt\\\nonumber
&=&\frac{\alpha^2}{2t_0}+\int_{t_1}^{+\infty} \frac{\dot x_n(t)^2}{2}\,dt+\int_0^{t_0} a(t)V(\alpha t/t_0)\,dt+
\int_{t_1}^{+\infty} a(t-t_1+t_0)V(x_n(t))\,dt.
\end{eqnarray}
Then, if $t_1>T,$ taking into account that $a(t)$ is increasing in $[t_0,+\infty[$ and (\ref{gap3}), it follows that
\begin{eqnarray}\nonumber
{\cal F}(v_n)-{\cal F}(x_n) &\leq& \frac{\alpha^2}{2t_0}+\int_0^{t_0}a(t)V(\alpha t/t_0)\,dt+
\int_{t_1}^{+\infty} \left(a(t-t_1+t_0)-a(t)\right)V(x_n(t))\,dt\\\nonumber
&\leq&\frac{\alpha^2}{2t_0}+\frac{M_0 A_2\alpha^2 t_0}{3}-
\int_{t_1}^{t_2} \left(a(t)-a(t-t_1+t_0)\right)V(x_n(t))\,dt\\\nonumber
&\leq&\left(\frac{1}{2t_0}+\frac{M_0 A_2 t_0}{3}\right)
\alpha^2-\Delta \left(a(t_1)-a(t_2-t_1+t_0)\right)\\\nonumber
&\leq&\left(\frac{1}{2t_0}+\frac{M_0 A_2 t_0}{3}\right)\alpha^2-
\Delta \left(a(T)-a(\frac{K}{\eta A_1\alpha^2}+t_0)\right)\leq 0.
\end{eqnarray}
Then, for any $x_n$ we may assume $t_1\leq T$: hence
$x_n(t)\ge \alpha$ for any $n$ and any $t\ge T$, and by pointwise convergence we get also $x(t)\ge\alpha$ for any $t\ge T$.
Now, as in case 1), by the weak lower semicontinuity we obtain
$$
{\cal F}(x)\le\liminf_n {\cal F}(x_n)=\cal I.
$$
Since ${\cal F}(x)<+\infty$ and $V>0$ in $]0,1[$, it is not difficult to see, using the arguments of Rabinowitz \cite{ra}, Prop. 3.11, in this simpler situation, that $\lim_{t\to +\infty}x(t)$ exists and is equal to 1. Then $x\in X$, so that it minimizes ${\cal F}$ in $X$. The fact that $x$ takes values in $[0,1]$ is a straightforward consequence of the construction of the minimizing sequence.
\qed
\
{\bf Proof of theorem 2.}
For $I\subset [0,+\infty[$ let
$$
{\cal F}(x,I):=\int_I \left(\frac{\dot x(t)^2}{2}+a(t)V(x(t))\right )\,dt.
$$
Consider the equation
\begin{equation}\label{tagg1}
\ddot x=a(t)V'(x)\end{equation}
with the boundary condition (\ref{bc}).
We put:
\begin{equation}
X(\xi)=\{x\in X|\,x(0)=\xi,\,\,x(+\infty)=1\},\quad\xi\in\RR.
\end{equation}
$X$ will be endowed with the norm
$x\mapsto (|x(0)|^2+\Vert \dot x\Vert_2^2)^{1/2}$.
\medskip\noindent
We shall make use of the following three Lemmas.
\medskip
\begin{Lemma} $(l-a(t))\g(t)^2\to+\infty$ as $t\to+\infty$, where
$\g$ is the solution of the Cauchy problem
\begin{equation}\label{tagg3}
\left\{
\begin{array}{l}
\g''(t)=a(t)V''(0)\g(t)
\\
\g(t_0)=0,\,\g'(t_0)=1
\end{array}
\right.
\end{equation}
\end{Lemma}
\Proof From (\ref{gap2}) we obviously get, as $t\to+\infty$,
\begin{equation}\label{tagg4}
(l-a(t))\r(t)^2\to+\infty,
\end{equation}
where $\r(t)=(e^{\mu(t-t_0)}-e^{-\mu(t-t_0)})/4\mu$.
On the other hand, since $\rho''(t)=\mu^2\rho(t)$,
$\rho(t_0)=0$, $\rho'(t_0)=1/2$, it is easy to see that $\g(t)>\r(t)$ for $t>t_0$. Indeed, from the initial conditions we obtain the assertion in a right neighbourhood of $t=t_0$. By contradiction, let $\t$ the first point
after $t_0$ at which $\g(\t)=\r(\t)$. Then we must have $\g''(\s)\r>0$ on $]t_0,+\infty[$, so that $\g^2>\r^2$, and our claim follows from (\ref{tagg4}).
\qed
\medskip
\begin{Lemma} For any $\xi\in ]0,1]$ ${\cal F}$ attains its minimum on the class $X(\xi)$.
\end{Lemma}
\Proof
Let $(y_k)_k$ be a minimizing sequence for ${\cal F}$ on $X(\xi)$, and let $t_1>t_0$ be fixed.
For any $k\in\ZZ^+$ the following properties may be assumed to hold:
(a) $0\le y_k(t)\le 1$,
(b) $y_k$ solves (\ref{tagg1}) on $J:=[0,t_1]$
\noindent
Indeed, if these conditions are not satisfied it is enough to replace $y_k$, respectively:
(a) by $\min(\max(y_k,0),1)$
(b) on $J$ by a function which minimizes
${\cal F}(\cdot,J)$ on $y_k+H^1_0(J)$;
\noindent
It is easy to check that
${\cal F}$ does not increase after these procedures. Furthermore, $(y_k)_k$ is bounded in
$H^1(J)$: then we can suppose,
up to a subsequence, that $(y_k)_k$ converges uniformly on $J$
to some absolutely continuous function $y$ and that $\dot y_k\to \dot
y$ weakly in $L^2(J)$. Then, on the interval $J$ where $y_k$ solves
(\ref{tagg1}), $\ddot y_k$ is uniformly bounded: since the sequence
$(\dot y_k)_k$ is bounded in $L^2$, we conclude that it is actually
bounded in $H^1(J)$ and, even more so, in $L^{\infty}(J)$. Then we may
suppose that $(\dot y_k(0))_k$ converges, so that the continuous
dependence on initial data of the solutions of a differential equation
ensures that the limit function $y$ solves (\ref{tagg1}) on $J$ (and
also the $C^1$- convergence on that interval). Furthermore, $0\le
y(t)\le 1$. On the other hand, if $y$ vanishes at $t_0$, we should also get $\dot y(t_0)=0$, since $t_0$ is
in the interior of $J$ and $y$ cannot take negative values. But the conditions $y(t_0)=\dot y(t_0)=0$, together with (\ref{tagg1}), would imply $y(t)\equiv 0$ on $J$, in contrast with $y(0)=\xi>0$. Hence $y(t_0)>0$, and we can find $\delta>0$ such that $y_k(t_0)\ge\delta$ for large $k$'s. We then redefine $y_k$
by replacing its restriction to $S:=[t_0,+\infty[$ by the shifted function
$t\mapsto y_k(t+\t_k)$, where $\t_k\ge t_0$ is the last point such that
$y_k(\t)=y_k(t_0)$. Since $a$ is increasing in $S$, this operation does not increase
the value of ${\cal F}$. If we still denote by $y_k$ the modified functions, we can actually suppose that $y_k(t)\ge y_k(t_0)$ for any $t\in S:=[t_0,+\infty[$.
Now we can apply the arguments of theorem 1 and take a subsequence, still denoted by $(y_k)_k$, which converges to some absolutely continuous function $x$ uniformly on compact sets and
in such a way that $\dot y_k\rightharpoonup \dot x$ in $L^2(0,+\infty)$. Of course,
$x\equiv y$ in $J$. By pointwise convergence,
\begin{equation}\label{stimadalbasso}
x(t)\ge\delta \;\;\; \mbox{for all} \;\;\; t\in S.
\end{equation}
Furthermore, ${\cal F}(x)<+\infty$: again by the arguments at the end of
the proof of Theorem 1, this entails that $x(+\infty)\in\{0,1\}$.
But (\ref{stimadalbasso}) excludes the case $x(+\infty)=0$, and actually $x\in X(\xi)$. Now, thanks again to the weak lower semicontinuity of ${\cal F}$, $x$ minimizes ${\cal F}$ on $X(\xi)$. Of course, $x$ takes values in $[0,1]$.
\qed
Now, let $\xi_i\to 0^+$ as $i\to+\infty$, and apply the previous Lemma on the class $X(\xi_i)$ for any $i\in\ZZ^+$, so as to
get functions $x_i$ such that
$$
{\cal F}(x_i)\le {\cal F}(y) \quad{\mbox{for any}}\quad y\in X(\xi_i)
$$
As before we can suppose, up to a subsequence, that $(x_i)_i$ converges
uniformly on compact sets to some function $x\in X$. Furthermore, the same arguments as in the proof of the previous Lemma allow to suppose that
\begin{equation}\label{tagg5}
\dot x_i(t_0)\to \dot x(t_0).
\end{equation}
Then $x$ solves (\ref{tagg1}), like $x_i$, and the following properties hold: $0\le x(t)\le 1$, $x(t)\ge x(t_0)$ on $S$, $x(0)=0$, $x(+\infty)\in\{0,1\}$.
\medskip
\begin{Lemma} $x(t_0)>0$
\end{Lemma}
\Proof
Let us suppose, by contradiction, $x(t_0)=0$: since $x\ge 0$, we have $\dot x(t_0)=0$ as well, and from (\ref{tagg1}) we actually get $x(t)\equiv 0$. In particular, $x_i(t_0)0$ is such that $V''>0$ in $[0,r]$. Furthermore $t_i\to+\infty$ as $i\to+\infty$, where $t_i$ is the first time at which $x_i$ reaches the value $r$.
We put $\r_i=x_i(t_0)$, $\e_i=\dot x_i(t_0)$ and recall that each $x_i$ solves (\ref{tagg1}). Then, for any $\t\ge t_0$:
\begin{eqnarray}\nonumber
\frac12\dot x_i(\t)^2-\frac12\dot x_i(t_0)^2&=
\int_{t_0}^{\t}\dot x_i(s)\ddot x_i(s)\,ds=
\int_{t_0}^{\t}a(s)V(x_i(s))\dot x_i(s)\,ds=
\\\nonumber
&=[a(s)V(x_i(s))]_{t_0}^{\t}-\int_{t_0}^{\t}V(x_i(s))\,da(s).
\end{eqnarray}
Since $\dot x_i(\t)\to 0$ and $V(x_i(\t))\to V(1)=0$ as $\t\to+\infty$, we get
$$
\frac12\e_i^2=a(t_0)V(x_i(t_0))+\int_{t_0}^{+\infty}V(x_i(s))\,da(s)
\ge\int_t^{t_i}V(x_i(s))\,da(s),
$$
for any $t\in[t_0,t_i]$. Now, let us denote by $t\mapsto\f(t;\xi,\e)$ the solution of (\ref{tagg1}) which fulfils the conditions $x(t_0)=\xi$, $\dot x(t_0)=\e$, so as to write $x_i(t)=\f(t;\r_i,\e_i)$. For $t\ge t_0$, and as long as
$\f(t;\xi,\e)\le r$, it is easy to check that the function
$V(\f(t;\xi,\e))$ is increasing with respect to all its arguments, so that $V(x_i(s))=V(\f(s;\r_i,\e_i))\ge W(t,\e_i)$ for any $s\in [t,t_i]$, where we put $W(s,\e)=V(\f(s;0,\e))$. Then
\begin{equation}\label{tagg6}
\frac12\ge\frac{W(t,\e_i)}{\e_i^2}(a(t_i)-a(t)),
\end{equation}
and we can let $i\to+\infty$. Since, by virtue of (\ref{tagg5}),
$\e_i\to \dot x(t_0)=0$, we look for the behaviour of $W(t,\e)/\e^2$ as $\e\to 0^+$, which depends on the partial derivatives of $W$ (hence of $\f$) with respect to $\e$. To this end we apply well-known results on differentiability with respect to initial data of the solution of a differential equation, which hold because the differential of the map $(x,y)\mapsto f(t,x,y)=(y,a(t)V'(x))$ is bounded uniformly with respect to $t$. Since $\f(t;0,0)\equiv 0$, the evolution of $\g(t)=\f'_{\e}(t;0,0)$ is ruled by (\ref{tagg3}). Hence
$$
\frac{\fr W}{\fr\e}(t,0)=V'(0)\g(t)=0,\quad
\frac{\fr^2 W}{\fr\e^2}(t,0)=V''(0)\g(t)^2,
$$
so that
$$
\lim_{\e\to 0^+}\frac{W(t,\e)}{\e^2}=\frac12 V''(0)\g(t)^2
$$
Now (\ref{tagg6}) entails $1\ge V''(0)\g(t)^2(l-a(t))$, in contrast with Lemma 1.
Then $x(t_0)>0$, as claimed.
\qed
{\bf Conclusion of the proof of Theorem 2.}
Since $x(t)\ge x(t_0)>0$ for $t\ge t_0$, the previous arguments show that
$x(+\infty)=1$, so that $x\in X$. Now, let $y\in X$, $i\in\ZZ^+$:
we can modify $y$ by putting $y_i=y+u_i$, where $u_i(0)=\xi_i$, $u\equiv 0$ in $[1,+\infty[$, $\dot u\equiv -\xi_i$
in $[0,1]$ , so that $y_i\in X(\xi_i)$ and
$\eps_i:=|{\cal F}(y_i)-{\cal F}(y)|\to 0$ as $i\to+\infty$. Then
${\cal F}(x_i)\le {\cal F}(y_i)\le {\cal F}(y)+\eps_i$, and the lower limit as $i\to+\infty$ yields ${\cal F}(x)\le {\cal F}(y)$. Hence $x$ minimizes ${\cal F}$ on $X$. Of course, $x$ takes values in $[0,1]$.
\
{\bf Proof of Corollary 1.} Under the assumptions of Corollary 1, (\ref{eq})-(\ref{bc}) with $c=0$ has a solution $x(t)$ taking values in $[0,1]$. Then the function
$$
w(t)=\left\{
\begin{array}{l}
x(t)\quad t\ge 0\\
\\
-x(-t)\quad t< 0.
\end{array}
\right.
$$
is a solution of (\ref{eq}) such that
$$
\lim_{t\to\pm\infty}w(t)=\pm 1.
$$
This is indeed a heteroclinic solution because
$\lim_{t\to\pm\infty}\dot w(t)=0$. In fact, integrating (\ref{eq}) between $0$ and $t>0$ we have
$$
\dot x(t)-\dot x(0)=\int_0^ta(s)V'(x(s))\,ds.
$$
Since there exists a sequence $t_n\to+\infty$ such that $\dot x(t_n)\to 0$
and the integrand in the right-hand side does not change sign in a neighbourhood of $+\infty$, we conclude that
$\int_0^{+\infty}a(s)V'(x(s))\,ds$ converges. Therefore
$\lim_{t\to+\infty}\dot x(t)=0$.
\
{\bf Final Remarks.} 1) As is shown by example (\ref{v}), condition
(\ref{gap2}) is not necessary for the existence of solutions to
problem (\ref{eq})-(\ref{bc}). We can also generalize this example, by simply requiring that $a(t)\equiv b$ for $t\ge T$. Then suitable computations show that ${\cal F}$ attains its minimum on $X$, provided that
the solution of the linear Cauchy problem
$$
\left\{
\begin{array}{l}
\g''(t)=a(t)V''(0)\g(t)
\\
\g(0)=0,\,\g'(0)=1
\end{array}
\right.
$$
satisfies the inequality $\dot\gamma(T)<\sqrt{bV''(0)}\gamma (T)$.
2) Suppose that $V$ has only one critical point in $]0,1[$. Then we assert that the solution of (\ref{eq})-(\ref{bc}) found in Theorem 1 or in Theorem 2 is monotone increasing. In order to see this, we argue by contradiction.
If $x(t)$ is not monotone, we can find $0\le s_1\min_{[s_1,s_4]}x=x(s_3)$, $x(s_4)=x(s_2)$ and $x(s_1)=x(s_3)$. Then, replacing
$x|_{[s_1,s_3]}$ or $x|_{[s_2,s_4]}$ respectively with the constants
$x(s_3)$ or $x(s_2)$ we would obtain a smaller value of the functional ${\cal F}$.
\begin{thebibliography}{99}
\bibitem{bs}
D. Bonheure, L. Sanchez
\emph{Heteroclinic orbits for some classes of second and fourth order differential equations}, Handbook of Differential Equations III, A. Ca\~nada, P. Drabek and A. Fonda (eds) (to appear)
\bibitem{aw} D. G. Aronson and H. F. Weinberger,
\emph{Multidimensional nonlinear diffusion arising in population genetics},
Adv. Math, \textbf{30} (1978) 33-76.
\bibitem{cr} V. Coti Zelati and P. H. Rabinowitz, \emph{Heteroclinic solutions between stationary points at different energy levels}, Top. Meth. Nonlinear Analysis, \textbf{17} (2001) 1-21.
%\bibitem{pFbP032}
% P. Fijalkowski, B. Przeradski,
% \emph{On a boundary value problem for a nonlocal elliptic equation},
% J. Appl. Anal., \textbf{9} (2003) Nš2 201-209.
%\bibitem{mK64}
% M. Krasnoselskii,
% ``Positive Solutions of Operator Equations'',
%Noordhoff, Gorningen
% 1964.
\bibitem{gk} B. H. Gilding and R. A. Kersner ``Travelling waves in nonlinear diffusion-convection reaction'', Birkhauser
Verlag, Basel, 2004, Progress in Nonlinear Differential Equations and their Applications, 60.
\bibitem{mm}
L. Malaguti and C. Marcelli,
\emph{Travelling wavefronts in reaction-diffusion equations with convection effects and non-regular terms},
Math. Nachr., \textbf{242} (2002) 148-164.
\bibitem{mmp}
L. Malaguti, C. Marcelli and N. Partsvania,
\emph{On transitional solutions of second order nonlinear differential equations}, J. Math. Anal. Appl. {\bf 303} (2005), 258-273.
\bibitem{ra}
P. H. Rabinowitz,
\emph{Periodic and heteroclinic orbits for a periodic hamiltonian system}, Ann. Inst. Henri Poincar\'e, {\bf 6}-5 (1989),
331-346.
\end{thebibliography}
\end{document}
\def\fr{\partial}
\def\m{\medskip}
\def\n{\noindent}
\def\sub{\subseteq}
\def\sups{\supseteq}
\def\R{\bold R}
\def\N{\bold N}
\def\P{\bold Z^+}
\def\Q{\bold Q}
\def\a{\alpha}
\def\b{\beta}
\def\d{\delta}
\def\e{\eta}
\def\eps{\varepsilon}
\def\f{\phi}
\def\g{\gamma}
\def\l{\lambda}
\def\o{\omega}
\def\r{\rho}
\def\s{\sigma}
\def\t{\tau}
\def\th{\theta}
\def\D{\Delta}
\def\F{\Phi}
\def\G{\Gamma}
\def\L{\Lambda}
\def\O{\Omega}
\def\Th{\Theta}
\def\V{\Vert}
\def\la{\langle}
\def\ra{\rangle}
\def\rr{\rightrightarrows}
\def\da{\downarrow}
\def\ua{\uparrow}
\section{}
Let $V\in C^2(\RR)$ be an even 1-periodic function such that $V(0)=0$,
$V''(0)>0$, $V>0$ on $\RR\setminus\ZZ$. Let $a:\RR\to]0,+\infty[$ be even and measurable. We are interested in heteroclinic solutions of the equation $\ddot x=a(t)V'(x)$ which connect the equilibrium points $x=-1$ and $x=1$. Since $a$ and $V$ are even, the given problem is easily reduced to the following one:
\begin{equation}\label{tag1}
\left\{
\begin{array}{l}
\ddot x(t)=a(t)V'(x(t))\,\, \\
\\
x(0)=0,\,\,\,x(+\infty)=1
\end{array}
\right.
\end{equation}
because the odd extension $x$ of a solution $x^+$ of (\ref{tag1}) to the whole real axis solves again the equation and $x(-\infty)=-1$. We remark that the problem above has no solution in the autonomous case. >From now on we suppose that,
for some $t_0\ge 0$, $a$ is increasing on $[t_0,+\infty[$, with bounded variation on $[0,+\infty[$. Then:
$$
K:=\int_0^{+\infty}|da|<+\infty,\quad
l:=\lim_{t\to+\infty}a(t)<+\infty,\quad
A:=\sup_{t\ge 0} a(t)<+\infty,
$$
where $da$ and $|da|$ denote respectively the distributional derivative of $a$ and its total variation. We also put
$$
M=\max V, \quad \a =\inf_{t\ge 0} a(t),\quad
\mu=\sqrt{\a V''(0)}.
$$
>From now on we suppose $\a>0$ (then $\mu>0$) and
\begin{equation}\label{tag2}
\lim_{t\to+\infty}(l-a(t))e^{2\mu t}=+\infty
\end{equation}
\begin{Proposition} Under the assumptions above, problem (1) has at least
one solution.
\end{Proposition}
\medskip
\Proof
For any $\e\ge 0$, let us consider the solution of the Cauchy problem
\begin{equation}\label{tag3}
\left\{
\begin{array}{l}
\ddot x(t)=a(t)V'(x(t))
\\
x(0)=0,\,\dot x(0)=\e
\end{array}
\right.
\end{equation}
For any $\xi>0$ we denote by $\t(\xi,\e)$ the first time $\t$ at which
$x(\t)=\xi$, if such a time exists. Otherwise we put
$\t(\xi,\e)=+\infty$. Since $\e=0$ yields $x(t)\equiv 0$, the continuous dependence from initial data allows to find, for any $T>0$, $\eps>0$, a value $\e(T,\eps)$ such that, for any $\e\le\e(T,\eps)$, the corresponding
solution fulfils the inequality $|x(t)|\le\eps$ for any $t\in [0,T]$.
In particular, we get $\t(\xi,\e)>T$ whenever $\e\le\e(T,\xi/2)$, so that,
for any $\xi>0$, $\t(\xi,\e)\to+\infty$ as $\e\to 0^+$.
Furthermore, for any $\t>0$,
\begin{eqnarray}\nonumber
\frac12x'(\t)^2-\frac12x'(0)^2&=&
\int_0^{\t}\dot x(t)\ddot x(t)dt=
\int_0^{\t}a(t)V(x(t))\dot x(t)dt=
\\\nonumber
&=&[a(t)V(x(t))]_{t=0}^{t=\t}-\int_0^{\t}V(x(t))\,da(t).
\end{eqnarray}
Since $V(x(0))=V(0)=0$, by putting $W(t,\e)=V(x(t))$ we get
\begin{equation}\label{tag4}
\frac12 \dot x(\t)^2=\frac12\e^2+
a(\t)W(\t,\e)-\int_0^{\t}W(t,\e)\,da(t),
\end{equation}
Now we are going to put forward some properties of the function $W$. Since $V''(0)>0$, we can find $\xi_0\in]0,1[$ such that $V'(x)>0$ for any $x\in ]0,\xi_0[$. It is easy to check that
$\t_0(\e):=\t(\xi_0,\e)$ is finite, but we already saw that
\begin{equation}\label{tag5}
\t_0(\e)\to+\infty\quad{\mbox{as}}\quad \e\to 0^+.
\end{equation}
Furthermore
\begin{equation}\label{tag6}
\frac{\fr W}{\fr t}(t,\e)>0,\quad
{\mbox{for}}\quad \e>0,\quad 00$. On the other hand,
$\dot x(0)=\e>0$ and $\ddot x(t)=a(t)V'(x(t))>0$, so that $\dot x(t)>0$. Hence the left-hand side of (\ref{tag6}), which is nothing but $V'(x(t))\dot x(t)$, is actually
positive. Now, let us point out the dependence of $x$ on $\e$, by putting
$x(t)=\f(t,\e)$. Since $\f(t,0)\equiv 0$, the function
$\g(t)=\frac{\partial\f}{\partial\e}(t,0)$ solves the variational problem
\begin{equation}\label{tag7}
\left\{
\begin{array}{l}
\g''(t)=a(t)V''(0)\g(t)
\\
\g(0)=0,\,\g'(0)=1
\end{array}
\right.
\end{equation}
Furthermore $W(t,\e)=V(\f(t,\e))$, so that
\begin{eqnarray}\label{tag8}
\frac{\fr W}{\fr\e}(t,0)&=V'(0)\g(t)=0
\\
\frac{\fr^2 W}{\fr\e^2}(t,0)&=V''(0)\g(t)^2.
\end{eqnarray}
The arguments above follow from well-known results on differentiability
with respect to initial data of the solution of a differential equation,
which hold, in this case, even if $a(t)$ is not continuous, since the
differential of the map $(x,y)\mapsto f(t,x,y)=(y,a(t)V'(x))$
is bounded uniformly with respect to $t$. From (\ref{tag8}) we get
\begin{equation}\label{tag9}
\lim_{\e\to 0^+}\frac{W(t,\e)}{\e^2}=\frac12 V''(0)\g(t)^2
\end{equation}
Now, a simple analysis of the given equation in the phase plane $xx'$
shows that three cases are possible for $\e$:
\medskip
1) $\t(1,\e)<+\infty$, that is to say: there exists
$\t=\t^-(\e)=\t(1,\e)>0$ such that $\dot x(\t)=0$ and $x(t)<1$ for any
$t\in [0,\t]$.
2) $x(t)\to 1$ and $\dot x(t)\to 0$ as $t\to+\infty$
3) There exists $\t=\t^+(\e)>0$ such that $x(\t)=1$ and $\dot x(t)>0$ for any
$t\in [0,\t]$.
\medskip
Let us respectively denote by $C^-$, $C^0$ and $C^+$ the (mutually disjoint) subsets of $]0,\infty[$ consisting of those $\e>0$ for which the three cases above respectively occur.
Our claim is that
$C^0\ne\emptyset$.
In fact, it is easy to see that $\e\in C^+$ whenever $\e$ is sufficiently large and $C^-$, $C^+$ are open. We claim that
\begin{equation}\label{tag10}
\inf C^+ > 0.
\end{equation}
Assuming that \ref{tag10} is proved, it follows that $C^0\cup C^-\neq\emptyset$ and, by a connectedness argument, that $C^0\neq\emptyset$.
:::::::::::::::::::::::::::::::::::::::::::::::::::::::
%Let us suppose, by contradiction, $C^0=\emptyset$. Then $C^-\cup C^+=]0,+\infty[$, and the following relations, that we are %going to show later,
%\begin{equation}\label{tag10}
%\sup C^- < +\infty,\quad \inf C^+ > 0,
%\end{equation}
%respectively entail $C^+\ne\emptyset$, $C^-\ne\emptyset$: since
%$C^-$ and $C^+$ are easily shown to be open, we should argue that
%$]0,+\infty[$ is not connected, a contradiction: then actually $C^0\ne\emptyset$.
%In order to prove the first statement of (\ref{tag10}), we take $\e\in C^-$ and put $\t=\t^-(\e)$ in (\ref{tag4}), so as to %let the left-hand side vanish and get
%$$
%\frac12\e^2\le M(A+K).
%$$
%Then $\sup C^-<+\infty$.
:::::::::::::::::::::::::::::::::::::::::::
To prove the claim, let us take $\e\in C^+$ and put $\t=\t^+(\e)$ in (\ref{tag4}). Since $W(\t^+(\e),\e)=0$ we get
\begin{equation}\label{tag11}
\frac12\e^2\ge\int_0^{\t^+(\e)}W(t,\e)\,da(t).
\end{equation}
If, by contradiction, $\inf C^+=0$, we should get from (\ref{tag11})
\begin{equation}\label{tag12}
\limsup_{\e\to 0^+}\frac1{\e^2}\int_0^{\t^+(\e)}W(t,\e)da(t)\le\frac12.
\end{equation}
But (\ref{tag12}) cannot hold, as the following arguments show.
Indeed, since we are supposing $\inf C^+=0$ and (\ref{tag5}) holds, we can
take $\e$ as small as to ensure $\t_0(\e)>t_0$. Then, for any
$s\in [t_0,\t_0(\e)]$,
\begin{eqnarray}\label{tag12a}
\int_0^{\t^+(\e)}&W(t,\e)da(t)
\ge\int_0^{t_0}W(t,\e)da(t)+\int_s^{\t_0(\e)}W(t,\e)da(t)\ge
\\\nonumber
&\ge-W(t_0,\e)\int_0^{t_0}|da|+W(s,\e)\int_s^{\t_0(\e)}da\ge
\\\nonumber
&\ge
-KW(t_0,\e)+(a(\t_0(\e))-a(s))W(s,\e),
\end{eqnarray}
where the second inequality holds because, thanks to (\ref{tag6}),
$W(t_0,\e)\le W(t,\e)\le W(\t_0(\e),\e)$ for any
$t\in [t_0,\t_0(\e)]$. Now, it is easy to see that $\g(t)>\r(t)$
for $t>0$, where
$\r(t)=(e^{\mu t}-e^{-\mu t})/4\mu$ solves the linear Cauchy problem
\begin{equation}
\left\{
\begin{array}{l}
\rho''(t)=\mu^2\rho(t)
\\
\rho(0)=0,\,\rho'(0)=1/2
\end{array}
\right.
\end{equation}
Indeed, from the initial conditions we argue the assertion in a right
neighbourhood of $t=0$. By contradiction, let $\t$ the first point
at which $\g(\t)=\r(\t)$. Then it should be $\g''(\t_0)\r>0$ on $]0,+\infty[$, so that $\g^2>\r^2$.
Since $\r^2(t)e^{-2\mu t}$ has a finite nonzero limit as $\t\to+\infty$, from (\ref{tag2}) we get
\begin{equation}\label{tag14}
\lim_{t\to+\infty}(l-a(t))\g(t)^2=+\infty
\end{equation}
Now, if we divide the extreme sides of (\ref{tag12a}) by $\e^2$ and take the
lower limit as $\e\to 0^+$ we get, thanks to (\ref{tag5}) and (\ref{tag9}),
\begin{eqnarray}\label{tag15}
\liminf_{\e\to 0^+}\frac1{\e^2}\int_0^{\t^+(\e)}W(t,\e)da(t)\ge\\\nonumber
-\frac12KV''(0)\g(t_0)^2+\frac12(l-a(s))V''(0)\g(s)^2.
\end{eqnarray}
Since $s$ is arbitrary large, we can take the limit as $s\to+\infty$,
and argue from (\ref{tag14}) that the left-hand side of (\ref{tag15}) is $+\infty$, in contrast with (\ref{tag12}). Then actually $\inf C^+>0$, and our claim is proved.
\bigskip
Now we are going to deal with a case which is not included in (\ref{tag2}), namely when $a(t)$ is definitively constant. So, let us suppose that
\begin{equation}\label{tag16}
a(t)\equiv l\quad{\mbox{for}}\quad t\ge t_0
\end{equation}
Under this assumption the condition $x(+\infty)=1$ of problem (1) can be replaced by
\begin{equation}\label{tag17}
\frac12x'(t_0)^2-lV(x(t_0))=0.
\end{equation}
Indeed, on the phase plane $xy$, the field
$f(t,x,y)=(y,a(t)V'(x))$ becomes autonomous for $t\ge t_0$, and (\ref{tag17}) can be written in the form
$(x(t_0),\dot x(t_0))\in S$, where $S$ is the stable manifold of the
equilibrium $(1,0)$ with respect to the field $f(x,y)=(y,lV'(x))$:
this actually means $x(+\infty)=1$.
Now we put $\nu=\sqrt{lV''(0)}$ and state the following assumption on the function $\g(t)=\f'_{\e}(t,0)$ we introduced before:
\begin{equation}\label{tag18}
\g'(t_0)<\nu\g(t_0).
\end{equation}
Of course, in order to verify (\ref{tag18}), we need to solve (\ref{tag7}), or at least evaluate in some way its solution. We remark, however, that in the particular case of a bang-bang function which takes a constant value
$\a\frac1{2\mu}\log\frac{\nu+\mu}{\nu-\mu}=
\frac1{2\sqrt{\a V''(0)}}\log\frac{\sqrt l+\sqrt{\a}}{\sqrt l-\sqrt{\a}}.
\end{equation}
\begin{Proposition}
Under assumptions (\ref{tag16}) and (\ref{tag18}), problem (1) has at least one solution.
\end{Proposition}
\Proof We put
$\psi(t,\e)=\frac12 \dot x(t)^2-lV(x(t))=\frac12\frac{\partial\f}{\partial t}(t,\e)^2-lW(t,\e)$,
where $x(t)=\f(t,\e)$
solves (\ref{tag3}) and $W$ is defined as in (\ref{tag4}). Let us differentiate twice $\psi$ with respect to $\e$ at $\e=0$: since we already got (\ref{tag8}), we only need to compute
\begin{eqnarray}\label{tag20}
\frac{\fr}{\fr\e}\frac12(\frac{\fr\phi}{\fr t})^2=
\frac{\fr\phi}{\fr t}\frac{\fr^2\phi}{\fr\e\fr t},
\quad
\frac{\fr^2}{\fr\e^2}\frac12(\frac{\fr\phi}{\fr t})^2=
(\frac{\fr^2\phi}{\fr\e\fr t})^2+
\frac{\fr\phi}{\fr t}\frac{\fr^3\phi}{\fr\e^2\fr t}.
\end{eqnarray}
But $\f(t,0)=\f'_t(t,0)=0$, so that, for $\e=0$, the right-hand sides of (\ref{tag20}) are respectively $0$ and $\g'(t)^2$. Now we get, from (\ref{tag9}),
$$
\frac{\fr\psi}{\fr\e}(t,0)=0,\quad
\frac{\fr^2\psi}{\fr\e^2}(t,0)=\g'(t)^2-lV''(0)\g(t)^2.
$$
Since $\psi(t,0)=0$ and $\nu^2=lV''(0)$, the function $\G(\e):=\psi(t_0,\e)$ fulfils the following conditions
\begin{equation}\label{tag21}
\G(0)=\G'(0)=0,\quad \G''(0)=\g'(t_0)^2-\nu^2\g(t_0)^2<0,
\end{equation}
where the last inequality follows from (\ref{tag18}), since $\g$ and $\g'$ are
easily shown to be positive on $]0,+\infty[$.
On the other hand, since $a(t_0)=l$, by putting $\t=t_0$ in (\ref{tag4})
we get
$$
\G(\e)=\frac12\e^2-\int_0^{t_0}W(t,\e)\,da(t)\ge
\frac12\e^2-MK,
$$
so that $\G(\e)>0$ for large $\e$'s: now (\ref{tag21}) ensures that
$\G(\e_0)=0$ for some $\e_0>0$, and the function $x(t)=\f(t,\e_0)$
fulfils (\ref{tag17}). Thanks to the previous arguments, $x$ actually solves (1).
\qed
Then, the following property holds: if $\,\,0\leq s_0 < s_1\,$ and $\,v(t):= x(t+s_1-s_0),\,\,\, t\geq s_0,\,$ we have
\begin{equation}\label{dis}
\,{\cal F}(v,[s_0,+\infty[)<
{\cal F}(x,[s_1,+\infty[).
\end{equation}
For observe that the change of variable $\tau=t+s_1-s_0$ leads to
$$\,{\cal F}(v,[s_0,+\infty[)=e^{-(s_1-s_0)}\int_{s_1}^{+\infty} e^{c\tau} \frac{\dot x(\tau)^2}{2}+
\int_{s_1}^{+\infty} e^{c(\tau-s_1+s_0)} a(\tau-s_1+s_0)V(x(\tau))\,d\tau$$
and (\ref{dis}) follows from assumption (A).
Now, without loss of generality, we can make the following assumptions on the minimizing sequence $x_n$:
\medskip
$a)$ for each $n,\,$ we have $\,0\leq x_n(t)\leq 1,\,\,t\in[0,+\infty[;$
$b)$ for each $n,\,$ $\,{\cal F}(x_n)\leq K;$
$c)$ each $x=x_n$ has a unique transition interval $[t_1,t_2]$ such that $0\leq x_n(t)\leq\alpha$ if $t\in [0,t_1],\,$ $x(t_1)=\alpha,\,$ $\alpha\leq x(t)\leq 1-\alpha$\, if $t\in [t_1,t_2],\,$ $\,x(t_2)=1-\alpha,$ and $1-\alpha\leq x_n(t)\leq 1$, if $t\in [t_2, +\infty]$ (this follows from (\ref{dis})).
\medskip
Next we provide some estimates on the lenght of the interval $[t_1,t_2].$
From
$$
1-2\alpha=\int_{t_1}^{t_2}\dot x(t)\,dt\leq \left (\int_{t_1}^{t_2}e^{-ct}\,dt\right )^{\frac{1}{2}}
\left (\int_{t_1}^{t_2}e^{ct}\dot x(t)^2\,dt\right )^{\frac{1}{2}}\leq e^{-\frac{ct_1}{2}}\sqrt{t_2-t_1}\,(2K)^{\frac{1}{2}}
$$
it follows that
\begin{equation}\label{liminf}
t_2-t_1\geq \frac{e^{ct_1}}{2K}.
\end{equation}
On the other hand, if we let $\,\underline V(\alpha):= \min_{[\alpha,1-\alpha]} V(x),$ we have
$$\int_{t_1}^{t_2}e^{ct}V(x(t))a(t)\,dt \geq e^{ct_1} a(t_1)(t_2-t_1) \underline V(\alpha)$$
so that,
\begin{equation}\label{limsup}
t_2-t_1\leq \frac{K e^{-ct_1}}{a(0) \underline V(\alpha)}.
\end{equation}
We will now show that we may assume $t_1$ bounded from above. If $c>0,$ this property follows since (\ref{liminf}) and (\ref{limsup}) imply that
$$
\frac{e^{2ct_1}}{2K}\leq\frac{K}{a(0)\underline V(\alpha)}.
$$
Consider now the case $c=0.$ Let $t_10$ the sequence $x_n$ is bounded in $H^1[0,B].$ Therefore, without loss of generality we may assume
$x_n\rightharpoonup x\,$ in $H^1[0,B]$ and $x_n\to x$ uniformly on $[0,B].$
As $\left(\int_0^{B} e^{ct} \frac{x_n'(t)^2}{2}\,dt\right)^{\frac{1}{2}}$ is a norm in $H^1[0,B],$ we have
$$\int_0^{B} e^{ct} \frac{\dot x(t)^2}{2}\,dt\leq {\mbox{liminf}}_{n\to +\infty} \int_0^{B} e^{ct} \frac{x_n'(t)^2}{2}\,dt$$
and, by Fatou's Lemma,
$$\int_0^{B} e^{ct} a(t)V(x(t))\,dt\leq {\mbox{liminf}}_{n\to +\infty} \int_0^{B} e^{ct}a(t)V(x_n(t))\,dt,$$
so that $$\,{\cal F}(x,[0,B])\leq {\mbox{liminf}}_{n\to +\infty}{\cal F}(x_n,[0,B])\leq {\mbox{liminf}}_{n\to +\infty}{\cal F}(x_n)={\cal I},\quad \forall B>0$$
On the other hand, $x(0)=0,$ and from $x_n(t)\geq 1-\alpha,\,\,\,t\geq t_2,$ we get
$x_n(t)\geq 1-\alpha,\,\,\,t\geq t_2.$ Since ${\cal F}(x) < +\infty, $
\begin{Remark} It is clear from the previous proof that Theorem 1 still holds replacing condition $iii)$ with the weaker, but cumbersome:
$iii') $ if $c=0,$ then there exists $0<\alpha<\min\{\frac{1}{4},\delta\}$ such that
\begin{equation}
a\left(\frac{K}{a(0)\underline V(\alpha)}+1\right)+\frac{1}{\Delta}\left(\frac{1}{2}+\frac{A_2a(1)}{3}\right)\alpha^2