\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{cite,amssymb}
\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2015 (2015), No. 114, pp. 1--16.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2015 Texas State University - San Marcos.}
\vspace{9mm}}
\begin{document}
\title[\hfilneg EJDE-2015/114\hfil Structure of the solution set]
{Structure of the solution set to differential inclusions with impulses
at variable times}
\author[A. Grudzka, S. Ruszkowski \hfil EJDE-2015/114\hfilneg]
{Agata Grudzka, Sebastian Ruszkowski}
\address{Agata Grudzka \newline
Faculty of Mathematics and Computer Science,
Nicolaus Copernicus University, \newline
Chopina 12/18, 87-100 Toru\'{n}, Poland}
\email{agata33@mat.uni.torun.pl}
\address{Sebastian Ruszkowski \newline
Faculty of Mathematics and Computer Science,
Nicolaus Copernicus University, \newline
Chopina 12/18, 87-100 Toru\'{n}, Poland}
\email{sebrus@mat.uni.torun.pl}
\thanks{Submitted October 22, 2014 Published April 28, 2015.}
\subjclass[2010]{34A37, 34A60, 34K45}
\keywords{Solution set; impulsive differential inclusions;
variable times; \hfill\break\indent $R_\delta$-set; topological structure}
\begin{abstract}
A topological structure of the solution set to differential inclusions with
impulses at variable times is investigated. In order to do that an appropriate
Banach space is defined. It is shown that the solution set is an
$R_{\delta}$-set. Results are new also in the case of differential equations
with impulses at variable times.
\end{abstract}
\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{example}[theorem]{Example}
\allowdisplaybreaks
\section{Introduction} \label{section:introduction}
Impulsive differential equations and inclusions have a lot of applications
in diverse fields. The moments of impulses can be chosen in various ways:
randomly, fixed beforehand, determined by the state of a system.
The problems with fixed time of impulses were recently investigated
\cite{BeRu, CarRub, DjebaliGor, DjebaliGorOua, GabGru, ObuYao}
and the results considering the structure of solution sets were summarised
in \cite{DjebaliGorOuaBook}.
The problems with impulses at variable times bring much more difficulties
and up to now there were only existence theorems \cite{BeBeOu,BeOu,BeOu2}.
Our results develop this research area. We need to use sophisticated
assumptions that would guarantee fixed amount of impulses and then we need
to deal with the estimations more carefully than in case with fixed times
of jumps. In the result we show that the solution set is an $R_{\delta}$-set.
It is worth mentioning that this result is new also in the case of differential
equation with impulses at variable times that has no uniqueness of solutions and
therefor has a nontrivial solution set.
There are many motivations to study the structure of solution sets of
differential equations and inclusions. One of them is considering the
Poincar\'{e} translation operator and discussing the problem of the
existence of periodic solutions \cite{KryPla,CS,HX}.
We have to have the space of functions which contains solutions of given
problem to study the structure of solution set. Obviously standard
Banach space with the supremum norm is insufficient to impulsive problem
with the times of jumps that depend on the state. B-topology on spaces
of solutions of impulsive differential inclusions is introduced in
\cite{AK}, however, it is only Hausdorff topology.
We use this concept to create Banach space that have the same topology
on common functions, and is sufficient to the considered problem.
In Section \ref{section:preliminaries} we describe the problem,
introduce the suitable Banach space
$CJ_m([0,a])$ and recall useful theorems.
In Section \ref{section:main} we present the main results of the paper.
The main idea is included in Theorem \ref{twr:dokladniejedenskokINKLUZJA}
in which we show that the solution set for the problem with exactly one
jump is an $R_{\delta}$-set. In the Theorem \ref{twr:drugieskonWym} we
are using the result from previous Theorem proving by induction
analogical statement for any fixed number of jumps. We also provide
the reader with a transparent example.
\section{Preliminaries} \label{section:preliminaries}
We study the problem
\begin{equation}\label{eq:zagadnienieINKLUZJA}
\begin{gathered}
\dot{y}(t)\in{F(t,y(t))}, \quad \text{for $t\in[0,a]$, $t\neq{\tau_j(y(t))}$,
$j=1,\dots,m$}, \\
y(0)=y_0, \\
y(t^{+})=y(t)+I_j(y{(t)}), \quad \text{for $t={\tau_j}(y(t))$, $j=1,\dots,m$},
\end{gathered}
\end{equation}
where $F:[0,a]\times\mathbb{R}^N\multimap\mathbb{R}^N$, $I_j:\mathbb{R}^N\to\mathbb{R}^N$,
$j=1,\dots,m$, are given impulse functions, $\tau_j\in{C^1(\mathbb{R}^N,\mathbb{R})}$ with
$0<\tau_j(y)0$ the set $A$ is contractible
in $O_{\epsilon}(A)=\{x\in{X} | dist(x,A)<{\epsilon}\}$,
\item [(c)] $A$ is an intersection of a decreasing sequence $\{A_n\}$
of compact contractible spaces,
\item [(d)] $A$ is an intersection of a decreasing sequence $\{A_n\}$
of closed contractible spaces, such that $\beta(A_n)\to 0$, where
$\beta$ is the Hausdorff measure of noncompactness.
\end{enumerate}
\end{theorem}
A multimap $F\colon{X}\multimap{E}$, where $E$ is real Banach space,
is called upper hemicontinuous (uhc) if for every functional
$p\in{E^*}$ the function
$X{\ni}x\mapsto\sigma_{F(x)}(p):=\sup_{y{\in}F(x)}\langle{p,y}
\rangle\in\mathbb{R} \cup\{+\infty\}$ is upper semicontinuous (usc).
\begin{theorem}[Convergence theorem \cite{AE}] \label{twr:convergenceinkluzje}
Let $E$ and $E'$ be Banach spaces, let the space
$(T,\Omega,\mu)$ be a measurable space,
and the multivalued map $F: T\times E\multimap E'$ has closed and convex
values and for a.e. $t\in T$ the map $F(t,\cdot) : E\multimap E'$ is
uhc. Let $(u_n : T\to E)$ be a sequence of functions such that
$u_n\to u$ in $L^p(T,E)$ and let sequence $(w_n)\subset L^p(T,E')$,
$1\leq p<\infty$ be such that $w_n\rightharpoonup{w}$ in $L^p(T,E')$.
If for a.e. $t\in T$ and for arbitrary $\varepsilon>0$, there exists
$N\in\mathbb{N}$ such that
$$
w_n(t)\in \operatorname{cl}\operatorname{conv} B(F(t,B(u_n(t),\varepsilon)),\varepsilon)
$$
for $n>N$, then $w(t)\in F(t,u(t))$ for a.e. $t\in T$.
\end{theorem}
We recall Arzela- Ascoli Theorem:
\begin{theorem}\label{twr:Arzela-Ascoliklasyczne}
If the family $\mathcal{F}\subset{C([a,b],\mathbb{R}^N)}$ of continuous functions
is equicontinuous and uniformly bounded, then there exists a subsequence
that converges uniformly.
\end{theorem}
A piecewise absolutely continuous function $y : [0,a]\to{\mathbb{R}^N}$ is a solution
of the problem with impulses \eqref{eq:zagadnienieINKLUZJA} if:
\begin{enumerate}
\item[(a)] $y(0)=y_0$,
\item[(b)] there exists a function $f\in{L^1([0,a],\mathbb{R}^N)}$ such that
$f(t)\in{F(t,y(t))}$ for a.e. $t\in[0,a]$ and
$y(t)=y_0+\sum_{j=1}^{m}I_j(y(t_j))+\int_{0}^{t}f(s)ds$, where
$t_j=\tau_{j}(y(t_j))$,
\item[(c)] the function $y$ is left continuous at $t={\tau}_j(y(t))\in[0,a]$
and the limit $y(t^+)$ exists and $y(t^+)=y(t)+I_j(y(t))$
for $t={\tau}_j(y(t))$, $j=1,\dots,m$.
\end{enumerate}
\section{Structure of the solution set}\label{section:main}
We assume the following conditions on the multivalued perturbation
$F:[0,a]\times{\mathbb{R}^N}\multimap{\mathbb{R}^N}$:
\begin{itemize}
\item[(F0)] $F$ has compact and convex values,
\item[(F1)] $F(\cdot,y):[0,a]\multimap{\mathbb{R}^N}$ has a measurable selection
for every $y\in\mathbb{R}^N$,
\item[(F2)] $F$ is almost uniformly with respect to $t$ H-usc, i.e.
for every $y\in\mathbb{R}^N$ and $\varepsilon>0$ there exists $\delta>0$ such that
for a.e. $t\in[0,a]$ and for all $x\in\mathbb{R}^N$ if $\|y-x\|<\delta$,
then $\sup_{\varphi\in{F(t,x)}} d(\varphi,F(t,y))<\varepsilon$,
\item[(F3)] $F$ has a sublinear growth, i.e., there exists
$\alpha\in{L^1([0,a])}$ such that
$$
\sup_{\varphi\in{F(t,y)}}\|\varphi\|\leq\alpha(t)(1+\|y\|) \quad \text{for a.e. }
t\in[0,a] \text{ and } y\in \mathbb{R}^N.
$$
\end{itemize}
Moreover, we assume the following hypotheses about impulse functions:
\begin{itemize}
\item[(H1)] $I_j\in{C(\mathbb{R}^N,\mathbb{R}^N)}$, $j=1,\dots,m$,
\item[(H2)] $\tau_j\in{C^1(\mathbb{R}^N,\mathbb{R})}$, $j=1,\dots,m$, \\
for $j=1,\dots,m-1$ we have:
\begin{gather*}
0<\tau_j(y)<\tau_{j+1}(y)0$ such that for a.e. $t\in[0,a]$
$$
\sup_{\varphi\in{F(t,y)}}{\tau}_j'(y)\cdot \varphi-1\leq{-p}<0
\quad \text{for all } y\in{\mathbb{R}^N}, j=1,\dots,m .
$$
\end{itemize}
Note that if ${\tau}'_j(y)=0$ for $j=1,\dots,m$, then the problem is
reduced to a problem with a fixed impulse time.
Assumption (F2) and compact values of the multivalued map $F$ implies
that $F(t,\cdot)$ is usc.
\begin{theorem}\label{twr:dokladniejedenskokINKLUZJA}
Let the assumptions {\rm (F0)--(F3)} hold, and {\rm (H1)-(H3)} hold for $m=1$.
Then every solution of the problem \eqref{eq:zagadnienieINKLUZJA}, where $m=1$,
meets $\Sigma_1$ exactly once and the solution set $S$ of this problem
is an $R_\delta$-set in the space $CJ_1([0,a])$.
\end{theorem}
\begin{proof}
To simplify notation we write $I$ and $\tau$ instead of $I_1$ and $\tau_1$.
We will proceed in several steps.
\smallskip
\noindent\textbf{Step 1.}
A Lipschitz selection and the uniqueness of jump.
For each $n$ let \\
$\{B(y,r_n(y))\}_{y\in{\mathbb{R}^N}}$ be an open covering
(open balls, such that $r_n(y)\leq{\frac{1}{n}}\to{0}$) of the space $\mathbb{R}^N$,
such that for every $x\in B(y,r_n(y))$ we have
\begin{equation} \label{eq:szacowanie}
\sup_{\varphi\in{F(t,x)}}d(\varphi,F(t,y))<\frac{1}{n}.
\end{equation}
There exists locally finite open point-star refinement
$\mathcal{U}_n=\{U_{n,s}\}_{s\in{S}}$ of the cover
$\{B(y,r_n(y))\}_{y\in{\mathbb{R}^N}}$, i.e. for every
$y\in{\mathbb{R}^N}$ there exists $x_{y,n}\in\mathbb{R}^N$ such that
$\operatorname{st}(y,\mathcal{U}{_n})\subset{B(x_{y,n},r_n(x_{y,n}))}$.
We can choose it in a way that $\mathcal{U}_{n+1}$ is a refinement of
the cover $\mathcal{U}_{n}$. Let $\{\lambda_s\}_{s\in{S}}$ be a
locally Lipschitz partition of unity subordinated to the cover
$\mathcal{U}_n$ i.e. for every $s\in{S}$ the function $\lambda_s:[0,a]\to\mathbb{R}^N$
satisfies the locally Lipschitz condition.
For every $y_s\in\mathbb{R}^N$, $s\in{S}$ let a function $q_s$ be a measurable
selection of ${F}(\cdot,y_s)$. We define
the function $g_n:[0,a]\times{\mathbb{R}^N}\to{\mathbb{R}^N}$ in the following way
$$
g_n(t,y):=\sum_{s\in{S}}\lambda_s(y)\cdot{q_s(t)}.
$$
The set $S(y):=\{s\in{S} | {\lambda}_s(y)\neq{0}\}$ is finite.
If $\lambda_s(y)>0$, so $s\in{S(y)}$, then
$y\in\operatorname{supp}\lambda_s\subset U_{n,s}
\subset{\operatorname{st}(y,\mathcal{U}_n)}$.
There exists ${x_{y,n}}\in\mathbb{R}^N$ such that
$\operatorname{st}(y,\mathcal{U}{_n})\subset B(x_{y,n},r_n(x_{y,n}))$.
We know that $y_s\in{\operatorname{st}(y,\mathcal{U}_n)}$.
We obtain
\begin{align*}
g_n(t,y)
&=\sum_{s\in{S}}\lambda_s(y)\cdot{q_s(t)}\in\operatorname{cnv}
F(t,\operatorname{st}(y,\mathcal{U}{_n}))\\
&\subset G_n(t,y):=\operatorname{cl\, conv}
F(t,\operatorname{st}(y,\mathcal{U}{_n})).
\end{align*}
Moreover,
$$
{G_n(t,y)}\subset\operatorname{cl\,conv}F(t,B(x_{y,n},r_n(x_{y,n}))),
$$
so from the inequality \eqref{eq:szacowanie} we have
\begin{equation} \label{eq:GnsiedziwOtoczceF}
G_n(t,y)\subset \operatorname{cl}O_{1/n}(F(t,x_{y,n})).
\end{equation}
We have
$$
F(t,y)\subset\cap_{n\geq{1}}G_n(t,y).
$$
From usc (the map $F$ has compact values) we obtain that for every $y\in\mathbb{R}^N$
and for every $\varepsilon>0$ there exists $\delta>0$ such that
$$
F(t,B(y,\delta))\subset{O_{\varepsilon}}(F(t,y)).
$$
We have
\begin{align*}
\cap_{n\geq{1}}G_n(t,y)&\subset \cap_{n\geq{1}}
\operatorname{cl}O_{1/n}(F(t,x_{y,n}))\\
&\subset\cap_{n\geq{1}}\operatorname{cl}O_{1/n}
\left(F\left(t,B(y,2r_n(x_{y,n}))\right)\right)\\
&\subset\cap_{n\geq{1}}\operatorname{cl}O_{\frac{1}{n}
+\varepsilon}\left(F(t,y)\right)=F(t,y).
\end{align*}
We obtain
$$
F(t,y)=\cap_{n\geq{1}}G_n(t,y).
$$
We have
$$
G_{n+1}(t,y)\subset G_n(t,y).
$$
Let the Nemitski\u{i} (substitution) operator
$P_{G_n} : CJ_1([0,a])\multimap L^1(J,\mathbb{R}^N)$ be defined by
$$
P_{G_n}(y):=\{\phi\in{L^1([0,a],\mathbb{R}^N)} | \phi(t)\in{G_n(t,y(t))}
\quad \text{ for a.e. } t\in[0,a] \}.
$$
Let $S_n$ denote the set of solutions of the problem
\begin{equation}\label{eq:zagadnieniezGnvariabletimes}
\begin{gathered}
\dot{y}(t)\in{G_n(t,y(t))}, \quad \text{for $t\in[0,a]$, $t\neq{\tau(y(t))},$} \\
y(0)=y_0, \\
y(t^{+})=y(t)+I(y{(t)}), \quad \text{for $t={\tau}(y(t))$}.
\end{gathered}
\end{equation}
It is obvious that the sets $S_n$ are nonempty, because the problem
\begin{equation}\label{eq:zagadnieniegnvariabletimes}
\begin{gathered}
\dot{y}(t)=g_n(t,y(t)), \quad \text{for $t\in[0,a]$, $t\neq{\tau(y(t))},$} \\
y(0)=y_0, \\
y(t^{+})=y(t)+I(y{(t)}), \quad \text{for $t={\tau}(y(t))$},
\end{gathered}
\end{equation}
for every $n\in\mathbb{N}$ has exactly one solution.
\smallskip
\noindent\textbf{Step 1a.}
We denote by $t^j_{y_n}$ the time of $j$-th jump for the function $y_n$ a
nd if the function $y_n$ has less that $j$ jumps we take $t^j_{y_n}=a$.
Let $y_n$ be an arbitrary solution of the system
\eqref{eq:zagadnieniezGnvariabletimes} for $0\leq t\leq t^2_{y_n}$.
For $t\leq{t^1_{y_n}}$ we obtain the following form of the solution
$$
y_n(t)=y_0+\int_{0}^{t}\phi_n(s)ds,
$$
where $\phi_n\in P_{G_n}(y_n)$.
There exists selection $f_n$ (not necessarily measurable) of the multivalued
map $F(\cdot,x_{y,n}(\cdot))$ such that for a.e. $t$ we obtain
$\|\phi_n(t)-f_n(t,x_{y,n}(t))\| \leq \frac{1}{n}$.
We have
$$
\|\phi_n(t)\|\leq \frac{1}{n}+\|f_n(t,x_{y,n}(t))\|.
$$
From the assumption $(F3)$ we obtain
$$
\|f_n(t,x_{y,n}(t))\|\leq \alpha(t)\big(1+\|x_{y,n}(t)\|\big)
\leq \alpha(t)\Big(1+\|y(t)\|+\frac{1}{n}\Big).
$$
So
$$
\|y_n(t)\|\leq \|y_0\|+\int_{0}^{t}\Big(\alpha(s)
\Big(1+\|y_n(s)\|+\frac{1}{n}\Big)+\frac{1}{n}\Big)ds.
$$
From Gronwall inequality, we have
\begin{equation} \label{eq:wspOgr}
\sup_{t\in[0,t^1_{y_{n}}]}\|y_n(t)\|
\leq{\Big(\|y_0\|+
\int_{0}^{a}2\alpha(s)ds+\frac{a}{n}\Big)e^{\int_0^a\alpha(s)ds}}:=K.
\end{equation}
By the continuity of $I$ there exists a constant $c>0$ such
that $\|I(y_n(t^1_{y_n}))\|\leq c$ for all $n$.
Next for $t^1_{y_n}0$ such that
$$
{\tau}'(y)\cdot \phi_n-1<-p'.
$$
\smallskip
\noindent\textbf{Step 1c.}
Let us fix $y_n$, where $n>N_0$, the solution of the problem
\eqref{eq:zagadnieniezGnvariabletimes}.
We define the function $w_n:[0,a]\to\mathbb{R}$ by
$$
w_n(t):=\tau(y_n(t))-t.
$$
The function $w_n$ has value $0$ in any time, in which the function $y_n$
has a jump.
By the condition (H2) with $m=1$, we obtain $w_n(0)=\tau(y_0)>{0}$ and
$w_n(a)=\tau(y_n(a))-a0,\quad \text{for }
t\in[0,t^1_{y_n}).
$$
By assumption (H2) with $m=1$ we obtain that
$$
w_n({t^1_{y_n}}^+)=\tau(y_n({t^1_{y_n}}^+))-{t^1_{y_n}}
= \tau\left(y_n(t^1_{y_n})+I(y_n(t^1_{y_n}))\right)-t^1_{y_n}\leq{0}.
$$
For a.e. $t\geq{t^1_{y_n}}$ we have
$$
w'_n(t)={\tau}'(y_n(t))\cdot{y_n'(t)}-1={\tau}'(y_n(t))\cdot{\phi}_n(t)-1<-p'<0,
$$
where $\phi_n\in{P_{G_n}(y_n)}$.
The function $w_n$ in $[t^1_{y_n},a]$ is decreasing, hence $y_n$ hits
the hypersurface $\Sigma_1$ exactly once and the time of this jump we denote
$t_{y_n}.$
\smallskip
\noindent\textbf{Step 2.}
Now we show that each sequence $(y_n)$, where $y_n\in{S_n}$,
has a convergent subsequence to the solution $\tilde{y}$
of the problem \eqref{eq:zagadnienieINKLUZJA}.
There exists exactly one jump, so from the previous estimations we have
$$
\|y_n(t)\|\leq\bar{K}.
$$
Consequently, the values of solutions of the problem
\eqref{eq:zagadnienieINKLUZJA} are contained in a ball $\text{cl\,}B(0,\bar{K})$,
which is convex, so in particular we know that function
$g_n|_{[0,a]\times\operatorname{cl}\,B(0,\bar{K})}$ has integrable
Lipschitz constant $\Lambda$.
For $t0$ there exists $N_0$ such that for $n_k>N_0$
we have $t_*-\varepsilon0$
and $00$ such that for all
$n_k>N_0$ and $tN_0}$ is equicontinuous and by
the inequality \eqref{eq:wspOgr} uniformly bounded.
By Arzela-Ascoli Theorem \ref{twr:Arzela-Ascoliklasyczne}
(possibly going to the subsequences) we can assume that
$y_{n_{k}}\to{y_{\varepsilon}}$ on $[0,t_*-\varepsilon]$, where
$y_{\varepsilon}$ is continuous function. This can be done in such a way
that for any ${\varepsilon}_1,{\varepsilon}_2>0$ such that
${\varepsilon}_1>{\varepsilon}_2$ functions $y_{{\varepsilon}_1}$,
$y_{{\varepsilon}_2}$ fulfil condition
$y_{\varepsilon_2}|_{[0,t_*-{\varepsilon}_1]}=y_{{\varepsilon}_1}.$
For ${\varepsilon}\searrow{0}$ we obtain an extension of the function
$y_{\varepsilon}$, i.e. the function $y_* : [0,t_*) \to \mathbb{R}^N$,
where $y_{n_k}$ converges pointwise to $y_{*}$.
Moreover
\begin{equation}
\label{eq:calkOgranphi_n} \|\phi_{n_k}(t)\|
\leq{\alpha(t)\Big(1+\|x_{y_{n_k},n_k}(t)\|\Big)}+\frac{1}{n_k}
\leq \alpha{(t)}(2+\bar{K})+1.
\end{equation}
We know that:
\begin{itemize}
\item ${\phi}_{n_k}(t)\in \operatorname{cl\,conv} F\left(t,\operatorname{st}(y_{n_k}(t),\mathcal{U}_{n_k})\right)
\subset{\operatorname{cl}}O_{1/n_k}
\Big(F\Big(t,B(y_{n_k}(t),\frac{1}{n_k})\Big)\Big)$,
by inclusion \eqref{eq:GnsiedziwOtoczceF},
\item $y_{n_k}(t)\to y_{\varepsilon}(t)$ a.e. on $[0,t_*-\varepsilon]$,
\item ${\phi}_{n_k}\in{L^1([0,t_*-\varepsilon],\mathbb{R}^N)}$,
\item by estimation \eqref{eq:calkOgranphi_n} and weak
compactness of the closed ball we obtain ${\phi}_{n_{k_l}}\rightharpoonup{\phi}$
on $[0, t_*-\varepsilon]$.
\end{itemize}
Thus by Theorem \ref{twr:convergenceinkluzje} we obtain
${\phi}(t)\in F(t,y_{\varepsilon}(t))$ for a.e.
$t\in[0,t_*-\varepsilon]$.
By analogy, we conclude that ${\phi}(t)\in F(t,y_*(t))$ a.e. on $[0,t_*)$.
By weak convergence
${\phi}_{n_{k_l}}\rightharpoonup{\phi}$ on $[0, t_*-\varepsilon]$,
for $\Psi(\phi_n):=\int_{0}^{t}\phi_n(s)ds$ we have
\begin{align*}
\int_{0}^{t}\phi(s)ds
&=\Psi(\phi)=\lim_{k\to\infty}\Psi(\phi_{n_k})
=\lim_{k\to\infty}\int_{0}^{t}{\phi}_{n_k}(s)ds\\
&=\lim_{k\to\infty}{y_{n_k}(t)}-y_0=y_*(t)-y_0.
\end{align*}
For an increasing sequence $(s_n)$ convergent to $t_*$ with $n0$. The function $\alpha(s)(2+\bar{K})+1$ is integrable,
so we can choose $t_{\epsilon}K_0$ we have
$\|y_*(t_{\epsilon})-y_{n_k}(t_{\epsilon})\|<\frac{\epsilon}{2}$.
We can estimate
\begin{align*}
&\|y_{n_k}(t_{y_{n_k}})-y_*(t_{*})\|\\
&\leq \|y_{n_k}(t_{\epsilon})+\int_{t_{\epsilon}}^{t_{y_{n_k}}}\phi_{n_k}(s)ds
-y_*(t_{\epsilon})-\int_{t_{\epsilon}}^{t_*}\phi(s)ds\|\\
&\leq \|y_{n_k}(t_{\epsilon})-y_*(t_{\epsilon})\|
+\int_{t_{\epsilon}}^{t_{y_{n_k}}}\|\phi_{n_k}(s)\|ds
+\int_{t_{\epsilon}}^{t_*}\|\phi(s)\|ds\\
&\leq \|y_{n_k}(t_{\epsilon})-y_*(t_{\epsilon})\|
+\int_{t_{\epsilon}}^{t_{y_{n_k}}}\left(\alpha(s)(2+\bar{K})+1\right)ds
+\int_{t_{\epsilon}}^{t_*}\left(\alpha(s)(2+\bar{K})+1\right)ds\\
&\leq \frac{\epsilon}{2}+\int_{t_{\epsilon}}^{t_*}2(\alpha(s)(2+\bar{K})+1)ds
\leq \epsilon.
\end{align*}
For $t_{y_{n_k}}>t_*$ we obtain
\begin{align*}
\|y_*(t_*)-y_{n_k}(t_{y_{n_k}})\|
&\leq \|y_*(t_*)-y_*(t_*-\varepsilon)\|
+\|y_*(t_*-\varepsilon)-y_{n_k}(t_*-\varepsilon)\|\\
&\quad +\|y_{n_k}(t_*-\varepsilon)-y_{n_k}(t_{y_{n_k}})\|,
\end{align*}
but it is easy to see that
\begin{align*}
\|y_{n_k}(t_*-\varepsilon)-y_{n_k}(t_{y_{n_k}})\|
&=\|\int_{t_*-\varepsilon}^{t_{y_{n_k}}}\phi_{n_k}(s)ds\|
\leq\int_{t_*-\varepsilon}^{t_{y_{n_k}}}\|\phi_{n_k}(s)\|ds\\
&\leq \int_{t_*-\varepsilon}^{t_{y_{n_k}}} \big(\alpha(s)(2+K)+1\big)ds\\
&= (2+K)\int_{t_*-\varepsilon}^{t_{y_{n_k}}} \alpha(s)ds
+t_{y_{n_k}}-t_*+\varepsilon ,
\end{align*}
so
\begin{align*}
\|y_*(t_*)-y_{n_k}(t_{y_{n_k}})\|
&\leq \|y_*(t_*)-y_*(t_*-\varepsilon)\|+\|y_*(t_*-\varepsilon)
-y_{n_k}(t_*-\varepsilon)\|\\
&\quad +(2+K)\int_{t_*-\varepsilon}^{t_{y_{n_k}}} \alpha(s)ds
+t_{y_{n_k}}-t_*+\varepsilon\\
&\to \|y_*(t_*)-y_*(t_*-\varepsilon)\|+(2+K)
\int_{t_*-\varepsilon}^{t_*} \alpha(s)ds +\varepsilon,
\end{align*}
as $k\to \infty$.
From the arbitrariness of $\epsilon$ and $\varepsilon$ we obtain
$\|y_*(t_*)-y_{n_k}(t_{y_{n_k}})\|\to 0$.
Summarising, we have that
$y_{n_k}(t_{y_{n_k}})\to y_*(t_*)$.
By the continuity of $\tau$ we obtain
$\tau(y_*(t_*))-t_*=\lim_{{n_k}\to\infty}(\tau(y_{n_k}(t_{y_{n_k}}))-t_{y_{n_k}})=0$
which means that $t_*$ is the time of jump for $y_*$.
\smallskip
\noindent\textbf{Step 2.b}
We make a similar reasoning with the part of segment $[0,a]$ after the jump.
Form inequalities \eqref{eq:wspOgrPosokou} and \eqref{eq:JednakowaCG2}
we conclude that the family $\{y_{n_k}\}$ is equicontinuous and equibounded
on $[t_*+\varepsilon,a]$.
Therefore, from Arzela-Ascoli \ref{twr:Arzela-Ascoliklasyczne}
theorem (passing to a subsequence if it is
needed) we can assume that $y_{n_k}\to{y^{\varepsilon}}$, where
$y^{\varepsilon}$ is a continuous function and we extend
it to a continuous function $y^* : (t_*,a] \to \mathbb{R}^N$ with $y_{n_k}$
convergent pointwise to $y^*$.
We know that:
\begin{itemize}
\item ${\phi}_{n_k}(t)\in \operatorname{cl\,conv}
F\big(t,\operatorname{st}(y_{n_k}(t),\mathcal{U}_{n_k})\big)
\subset{\operatorname{cl}}\,O_{1/n_k}\Big(F\big(t,B(y_{n_k}(t),\frac{1}{n_k})\big)
\Big)$.
\item $y_{n_k}(t)\to y^{\varepsilon}(t)$ a.e. on $[t_*+\varepsilon,a]$,
\item ${\phi}_{n_k}\in{L^1([t_*+\varepsilon,a],\mathbb{R}^N)}$,
\item by estimation \eqref{eq:calkOgranphi_n} and the weak compactness
of closed ball we obtain ${\phi}_{n_{k_l}}\rightharpoonup{\phi}$ on
$[t_*+\varepsilon,a]$.
\end{itemize}
Again, by theorem \ref{twr:convergenceinkluzje} we obtain
${\phi}(t)\in F(t,y^{\varepsilon}(t))$, for a.e.
$t\in{[t_*+\varepsilon,a]}$, so we obtain an information that
${\phi}(t)\in F(t,y^*(t))$ a.e. on $(t_*,a]$.
By weak convergence
${\phi}_{n_{k_l}}\rightharpoonup{\phi}$ on $[t_*+\varepsilon,a]$ for
$\Psi(\phi_n):=\int_{t}^{a}\phi_n(s)ds$ we have
\begin{align*}
\int_{t}^{a}\phi(s)ds
&=\Psi(\phi)=\lim_{k\to\infty}\Psi(\phi_{n_k})
=\lim_{k\to\infty}\int_{t}^{a}{\phi}_{n_k}(s)ds\\
&=\lim_{k\to\infty}{y_{n_k}(a)}-y_*(t)=y_*(a)-y_*(t).
\end{align*}
For decreasing sequence $(s_n)$ convergent to $t_*$ we obtain for $nt_*$ we have
$$
\tilde{y}(t)=\tilde{y}(t_*)+I(\tilde{y}(t_*))+\int_{t_*}^t \phi(s)ds
=y_0+\int_0^{t_*}\phi(s)ds+ I(\tilde{y}(t_*))+\int_{t_*}^t \phi(s)ds,
$$
where $\phi\in P_{G_n}(\tilde{y})$, so $\tilde{y}$ is the solution of
the problem \eqref{eq:zagadnienieINKLUZJA} for all $t\in[0,a]$, hence
$\tilde{y}\in{S}$. The function $\tilde{y}$ is
the limit of the sequence $(y_{n_k})$ in the space $CJ_1([0,a])$.
\smallskip
\noindent\textbf{Step 3.}
We show, for every $n\in\mathbb{N}$, the contractibility of the set
$\operatorname{cl} S_n$.
Fix $n$ such that we can define $p'$ (see Step 1.) and take
$\bar{y}\in\operatorname{cl}\,S_{n}$. We divide the interval
$[0,1]$ into two halves.
Let $r\in[0,\frac{1}{2}]$. We consider the problem
\begin{equation}
\begin{gathered}
\dot{y}(t)=g_n(t,y(t)), \quad \text{for } t\in[a-2r(a-t_{\bar{y}}),a] ,
t\neq{\tau(y(t)}) , \\
y(t)=\bar{y}(t), \quad \text{for } t\in[0,a-2r(a-t_{\bar{y}})] ,\\
y(t^+)=y(t)+I({y}(t)), \quad \text{for } t=\tau(y(t)) .
\end{gathered} \label{eq:impulDlaselekcjigSKwym}
\end{equation}
In the previous problem we denote by $g_n$ selection of the map $G_n$.
There exists exactly one solution of this problem; we denote it by
$y^{2}_{\bar{y},r}$. Then
$y^2_{\bar{y},r}\in\operatorname{cl}\,S_n$,
Next for $r\in(1/2,1]$ we consider the problem
\begin{equation}
\begin{gathered}
\dot{y}(t)=g_n(t,y(t)), \quad \text{for } t\in[t^{\bar{y},r},a] \text{, } t\ne\tau(y(t)) , \\
y(t)=\bar{y}(t), \quad \text{for } t\in[0,t^{\bar{y},r}] ,\\
y(t^+)=y(t)+I({y}(t)), \quad \text{for } t=\tau(y(t)) .
\end{gathered}
\label{eq:mimpulDlaselekcjigSKwym}
\end{equation}
where $t^{\bar{y},r}:=t_{\bar{y}}-2(r-\frac{1}{2})t_{\bar{y}}$.
There exists exactly one solution of this problem, denoted by
$y^{1}_{\bar{y},r}$, which also belongs to $\operatorname{cl}\,S_n$.
Finally we consider the function
$h:[0,1]\times\operatorname{cl} S_n \to \operatorname{cl}S_n$ given by
\begin{equation}
h(r,\bar{y}):=\begin{cases}
y^2_{\bar{y},r}, & r\in[0,\frac{1}{2}],\\
y^1_{\bar{y},r}, & r\in(\frac{1}{2},1].
\end{cases}
\end{equation}
Now, we show that the function $h$ is continuous.
Due to the continuous dependence of solutions on initial conditions
\cite{Hale} we know that the function $h$ is continuous on
$[0,1/2)\times{\operatorname{cl}\,S_n}$ and left continuous
on $\{1/2\}\times\operatorname{cl}\,S_n$.
Let $((r_k,\bar{y}_k))_k$ be a sequence convergent to
$(\frac{1}{2}^+,\bar{y})$.
We know that if $t_{\bar{y}}0$ and we have
$$
t_{\bar{y}}-\tau(h(r_k,\bar{y}_k)(t_{\bar{y}}))
= \int_{t_{\bar{y}}}^{t_{h(r_k,\bar{y}_k)}}(\tau(h(r_k,\bar{y}_k)(\cdot))
-\cdot)'(\theta)d\theta < -p'(t_{h(r_k,\bar{y}_k)}-t_{\bar{y}}),
$$
so
$$
t_{h(r_k,\bar{y}_k)}t_0}(y(t))=
\begin{cases}
y(t), & \text{for } t>t_0, \\
0, & \text{for } t\leq{t_0}.
\end{cases}
\end{equation}
We define function $\varrho_{\bar{y}_k,r_k} : [0,a]\to\mathbb{R}^N$
\begin{align*}
\varrho_{\bar{y}_k,r_k}(t)
&:=y^1_{\bar{y}_k,r_k}(t)- {\mathbf{1}}_{t>t_{y^1_{\bar{y}_k,r_k}}}
(I(y^1_{\bar{y}_k,r_k}(t_{y^1_{\bar{y}_k,r_k}})))\\
&\quad -(y^2_{\bar{y},1/2}(t)-{\mathbf{1}}_{t>t_{\bar{y}}}
(I(y^2_{\bar{y},1/2}(t_{\bar{y}}))),
\end{align*}
which is function of differences between
$y^1_{\bar{y}_k,r_k}$, and $y^2_{\bar{y},1/2}$
with deleted changes caused by jumps. It is easy to see that
$t_{\bar{y}}=t_{y^2_{\bar{y},1/2}}$.
For $t^{\bar{y}_k,r_k}\leq t\leq t_{\bar{y}}$ we obtain
\begin{align*}
\|\varrho_{\bar{y}_k,r_k}(t)\|
&\leq \|\bar{y}-\bar{y}_k\|+\int_{t^{\bar{y}_k,r_k}}^t
\|g_n(s,{y^1_{\bar{y}_k,r_k}}(s))-\bar{\phi}_n(s)\|ds\\
&\leq\|\bar{y}-\bar{y}_k\|+\int_{t^{\bar{y}_k,r_k}}^{t}
\left(\|\bar{\phi}_n(s)\|+\|g_n(s,y^1_{\bar{y}_k,r_k}(s))\|\right)ds\\
&\leq\|\bar{y}-\bar{y}_k\|+\int_{t^{\bar{y}_k,r_k}}^{t}2
\Big(\alpha(s)(1+\bar{K})+\frac{1}{n}\Big)ds\\
&=:z_{\bar{y}_k,r_k}(t)
\end{align*}
and for $t> t_{\bar{y}}$ we have
\begin{align*}
&\|\varrho_{\bar{y}_k,r_k}(t)\|\\
&\leq z_{\bar{y}_k,r_k}(t_{\bar{y}})
+ \int_{t_{\bar{y}}}^{t}\|g_n(s,{y^1_{\bar{y}_k,r_k}}(s))
-g_n(s,{y^2_{\bar{y},1/2}}(s))\|ds\\
&\leq z_{\bar{y}_k,r_k}(t_{\bar{y}})
+\int_{t_{\bar{y}}}^{t} \Lambda(s)\|{y^1_{\bar{y}_k,r_k}}(s)
-{y^2_{\bar{y},1/2}}(s)\|ds
\\
&=z_{\bar{y}_k,r_k}(t_{\bar{y}})
+\int_{t_{\bar{y}}}^{t}\Lambda(s)\|{\check{y}^1_{\bar{y}_k,r_k}}(s)
+{\mathbf{1}}_{s>t_{y^1_{\bar{y}_k,r_k}}}I(y^1_{\bar{y}_k,r_k}
(t_{y^1_{\bar{y}_k,r_k}})) -{\check{y}^2_{\bar{y},1/2}}(s)
\\
&\quad -I({y}^2_{\bar{y},1/2}(t_{\bar{y}}))\|ds
\\
&\leq z_{\bar{y}_k,r_k}(t_{\bar{y}})+
\Big|\int_{t_{\bar{y}}}^{t_{y^1_{\bar{y}_k,r_k}}}
\Lambda(s)\|I({y}^2_{\bar{y},1/2}(t_{\bar{y}})\|ds\Big|
\\
&\quad +\int_{\max\{t_{\bar{y}},t_{y^1_{\bar{y}_k,r_k}}\}}^t
\Lambda(s)\|I(y^1_{\bar{y}_k,r_k}(t_{y^1_{\bar{y}_k,r_k}}))
-I({y}^2_{\bar{y},1/2}(t_{\bar{y}}))\|ds
\\
&\quad +\int_{t_{\bar{y}}}^{t}\Lambda(s)\|{\check{y}^1_{\bar{y}_k,r_k}}(s)
-{\check{y}^2_{\bar{y},1/2}}(s)\|ds,
\end{align*}
hence by the Gronwall inequality we obtain
\begin{align*}
\|\varrho_{\bar{y}_k,r_k}(t)\|
&\leq \Big(z_{\bar{y}_k,r_k}(t_{\bar{y}})
+\Big|\int_{t_{\bar{y}}}^{t_{y^1_{\bar{y}_k,r_k}}}
\Lambda(s)\|I({y}^2_{\bar{y},1/2}(t_{\bar{y}}))\|ds\Big|\\
&\quad +\int_{\max\{t_{\bar{y}},t_{y^1_{\bar{y}_k,r_k}}\}}^t
\Lambda(s)\|I(y^1_{\bar{y}_k,r_k}(t_{y^1_{\bar{y}_k,r_k}}))
- I({y}^2_{\bar{y},1/2}(t_{\bar{y}}))\|ds\Big)\\
&\quad\times \exp{\int_{t_{\bar{y}}}^{t}\Lambda(s)ds}
=:z_{\bar{y}_k,r_k}(t).
\end{align*}
By previous convergences and continuity of $I$, we obtain
\begin{gather}\label{eq:jumps}
\| I(h(r_k,\bar{y}_k)(t_{h(r_k,\bar{y}_k)}))
-I(h(1/2,\bar{y})(t_{h(1/2,\bar{y})}))\|\to 0, \\
\label{eq:supy}
\sup_{t\in[0,a]}\|\varrho_{\bar{y}_k,r_k}(t)\|\leq z_{\bar{y}_k,r_k}(a)\to 0.
\end{gather}
Summing up, by \eqref{eq:supy}, \eqref{eq:t} and \eqref{eq:jumps},
if $(r_k,\bar{y}_k)$ converges to $(\frac{1}{2}^+,\bar{y})$,
then $y^1_{\bar{y}_k,r_k}$ converges to $y^2_{\bar{y},\frac{1}{2}}$
in norm in the space $CJ_1([0,a])$.
The function $h$, as continuous on
$[0,1]\times{\operatorname{cl}\,S_n}$, is a homotopy.
By definition of the function $h$ we have
$h(0,\bar{y})=\bar{y}$ and $h(1,\bar{y})=y^1_{\bar{y},1}$, so
${\operatorname{cl}\,S_n}$ is a contractible set.
\smallskip
\noindent\textbf{Step 4.}
We show that properties needed to theorem \ref{twr:Hyman} are fulfilled.
The sets $\operatorname{cl}\,S_n$ are contractible in the power of Step 3.
If $x\in \cap_{n\in\mathbb{N}} \operatorname{cl}S_n$, then
$x\in \operatorname{cl}S_n$ for every $n$. Therefore, there exists
sequence $(d_n)\subset\mathbb{R}_+$ converges to $0$ such that
$B(x,d_n)$ (in $CJ_1([0,a])$) contains $y_n$ ($y_n\in{S_n}$).
Hence $y_n\to x$ in the space $CJ_1([0,a])$. Moreover, we know that
subsequence $y_{n_k}$ converges to a solution of problem
\eqref{eq:zagadnienieINKLUZJA}, where $m=1$, so $x\in S$. We obtain
$$
S\subset\cap_{n\in\mathbb{N}}\, S_n\subset\cap_{n\in\mathbb{N}}\operatorname{cl} S_n\subset S,
$$
so $S=\cap_{n\in\mathbb{N}}\operatorname{cl} S_n$.
We will show that $\sup\{d(z,S)| z\in{S_n}\}\to_{n\to\infty}{0}$.
Assume that there exists $\varepsilon>0$ and a sequence $(y_n)$
such that $y_n\in{S_n}$ and $d(y_n,S)\geq\varepsilon$.
From the Step 3 we know that this sequence has subsequence $(y_{n_k})$
such that $y_{n_k}\to{\tilde{y}}\in{S}$,
so $d(y_{n_k},S)\to{0}$. It is contrary to the choice of the sequence
$(y_n)$, hence $\sup\{d(z,S)|z\in{S_n}\}\to{0}$. Therefore
$\sup\{d(z,S)| z\in{\operatorname{cl}{S_n}}\}\to{0}$.
We obtain
$S_n\subset S + B(0,p_n)$, where $p_n:=\sup_{z\in S_n}d(z,S)\to 0$ with
$n\to\infty$.
The compactness of $S$ implies
$$
\beta(\operatorname{cl}\, S_n)=\beta(S_n)\leq \beta(S)+p_n=p_n,
$$
so $\beta(\operatorname{cl} S_n)\to 0$.
Summing up, we can use theorem \ref{twr:Hyman}, which implies that
the set $S$ is an $R_{\delta}$-set.
\end{proof}
We will use this theorem to prove more general case.
\begin{theorem} \label{twr:drugieskonWym}
Let the assumptions {\rm (F0)--(F3), (H1)--(H3)} hold. Then every
solution of the problem \eqref{eq:zagadnienieINKLUZJA} for every
$j=1,\dots,m$ meets $\Sigma_j$ exactly once and the solution set $S$
of this problem is an $R_\delta$-set in the space $CJ_m([0,a])$.
\end{theorem}
\begin{proof}
We show that we can divide the interval $[0, a]$ into $m$ disjoint parts
and any of them will have exactly one jump effect.
Then we will be able to use the reasoning of theorem
\ref{twr:dokladniejedenskokINKLUZJA} on every such part, which will complete the proof.
By analogy to Step 1. in the proof of Theorem \ref{twr:dokladniejedenskokINKLUZJA}
we define a multivalued map $G_n:[0,a]\times\mathbb{R}^N\multimap\mathbb{R}^N$ and consider
the problem
\begin{equation}\label{eq:zagadnieniezGnvariabletimesk}
\begin{gathered}
\dot{y}(t)\in{G_n(t,y(t))}, \quad \text{for $t\in[0,a]$, $t\neq{\tau_j(y(t))}$,
$j=1,\dots,m,$} \\
y(0)=y_0, \\
y(t^{+})=y(t)+I_j(y{(t)}), \quad \text{for $t={\tau}_j(y(t))$, $j=1,\dots,m$}.
\end{gathered}
\end{equation}
We denote by $t^j_{y_n}$ the time of $j$-th jump for the function
$y_n:[0,a]\to\mathbb{R}^N$. If the function $y_n$ has less than $j$ jumps we
take $t^j_{y_n}:=a$.
Let $y_n$ be an arbitrary solution of the problem
\eqref{eq:zagadnieniezGnvariabletimesk} for $0\leq{t}\leq{t^2_{y_n}}$.
By analogy to theorem \ref{twr:dokladniejedenskokINKLUZJA} in Step 1a.
we show that there exists a constant $\bar{K}$ such that
$$
\sup_{t\in[0,t^2_{y_n}]}\|y_n(t)\|\leq{\bar{K}}.
$$
Next we will proceed similarly to the proof of theorem
\ref{twr:dokladniejedenskokINKLUZJA} (Step 1b), we show that there exists
a constant $p'>0$ such that
$$
{\tau}'_j(y)\cdot\phi_n-1<-p'
$$
for all $j=1,\dots,m$, and for enough big $n$,
where $\bar{y}$ is fixed function with values in
$\operatorname{cl}\,B(0,\bar{K})$, $t$ is such that the assumption
(H3) is satisfied, $\bar{y}(t)=y$, $\phi_n\in{G_n(t,\bar{y}(t))}$.
We define the function $w_{n,j}:[0,a]\to\mathbb{R}$ in the following way:
$$
w_{n,j}(t):=\tau_j(y_n(t))-t, \quad j=1,\dots,m.
$$
Let us fix a solution $y_n$ of problem \eqref{eq:zagadnieniezGnvariabletimesk}.
Now we prove by induction that the $j$-th time of
jump is zero of the function $w_{n,j}$.
\noindent\textbf{Basis.} By assumption (H2) we have:
\begin{itemize}
\item[(a)] $w_{n,j}(0)=\tau_j(y_0)>0$,
\item[(b)] $w_{n,j}(a)={\tau}_j(y_n(a))-a<0$,
\item[(c)] $w_{n,j}(t)=\tau_j(y_n(t))-t
<\tau_{j+1}(y_n(t))-t=w_{n,j+1}(t)$ for all $t\in[0,a]$.
\end{itemize}
If there are no impulses then $w_{n,j}(t)\neq{0}$ on $[0,a]$, but by the
definition of $w_{n,j}$ for every $j$ the function $w_{n,j}$
is continuous, which contradicts with a) and b).
Hence there is at least one jump time.
Let $00,\quad \text{for } t\in[0,t^1_{y_n}).
$$
By the assumption (H2) we obtain
$$
w_{n,j}({t^1_{y_n}}^+)=\tau_j(y_n({t^1_{y_n}}^+))
-{t^1_{y_n}}= \tau_j(y_n(t^1_{y_n})+I_j(y_n(t^1_{y_n})))-t^1_{y_n}\leq{0}.
$$
For a.e. $t\geq{t^1_{y_n}}$ we have
$$
w'_{n,j}(t)={\tau}'_j(y_n(t))\cdot{y_n'(t)}-1
={\tau}_j'(y_n(t))\cdot{\phi}_n(t)-1<-p'<0.
$$
Hence $w_{n,j}$ is decreasing function at $[t^1_{y_n},a]$, so
$y_n$ for every $j$ meets $\Sigma_j$ exactly once.
By c) we know that the time $t^1_{y_n}$ of the first jump is zero of the
function $w_{n,1}$.
\noindent\textbf{Inductive step.}
We assume that the $j$-th time of jump is zero of $w_{n,j}$, $j\tau_j(y_n(t^j_{y_n}))-{t^j_{y_n}}=0
\]
for $j>1,$ $l>j$.
We consider interval $J:=(t^j_{y_n},a]$. By analogy to Step 1 in the proof
of theorem \ref{twr:dokladniejedenskokINKLUZJA} we have
$w_{n,i}(t)\neq{0}$, $t\in{J}$, $i\leq{j}$, as long as there is
no jump caused by $w_{n,l}$ for $l>j$.
There have to be at least one jump after $t^j_{y_n}$ and we
denote it by $\tilde{t}$. By c) there are no jumps before the jump
caused by $w_{n,j+1}$, so $\tilde{t}=t^{j+1}_{y_n}$.
Since both the basis and the inductive step have been proved, it has been proved
by mathematical induction that there are exactly $m$ jumps one for each
$\Sigma_j$, $j=1,\dots,m$.
Next we will proceed similarly to the proof of theorem
\ref{twr:dokladniejedenskokINKLUZJA}.
Let $(t^1_*, t^2_*, \dots, t^m_*)$ be the limit of the sequence
$((t^1_{y_n}, t^2_{y_n}, \dots, t^m_{y_n}))$.
We can show that on $[0, t^1_*]$ there exists the subsequence of
$(y_{n_k})$ such that $I_1(y_{n_{k_l}}(t_{y_{n_{k_l}}}))\to{I_1(y_*(t^1_*))}$
and $\tau_1(y_*(t_*^1))=t^1_*,$ so
$$
\tau_2(y_*((t_*^1)^+))=\tau_2(y_*(t_*^1)+I_1(y_*(t_*^1)))>\tau_1(y_*(t_*^1))=t_*^1.
$$
By the assumptions (H1) and (H2) there exists $\varepsilon_1>0$ such that
there is only one jump time of $y_*$ on $[0,t^1_*+2\varepsilon_1]$
and we obtain that for all sufficiently big $l$ there is only
one jump time of $y_{n_{k_l}}$ on $[0,t^1_*+\varepsilon_1]$.
By analogy we find subsequence and $\varepsilon_2>0$ such that there
is exactly one jump on $[t_*^1+\varepsilon_1, t_*^2+\varepsilon_2]$,
and so on. On every such interval we
proceed with reasoning from theorem \ref{twr:dokladniejedenskokINKLUZJA}.
\end{proof}
The following example shows the case of inclusion with exactly one jump,
but can easily be rearranged to a multijump case.
\begin{example} \label{examp1} \rm
There are two trust funds with interest rates (dependent on time and amount of money)
$\alpha(t,y_1)\in A(t,y_1)$ and $\beta(t,y_2)\in B(t,y_2)$ respectively where
$A$ and $B$ are (multivalued) investment plans.
It is available to transfer money once (in or out) without loosing interest.
\end{example}
We decided to start both trust funds with the same amount of money and transfer
money from worse deposit to better one after clarifying which one is better.
The amount of money that we wish to transfer would be proportional to
difference in incomes.
This situation can be represented in the form of following differential inclusion:
\begin{gather}
\dot{(y_1,y_2)}(t)\in F(t,(y_1,y_2)(t)),\quad
\text{for $t\in[0,1]$, $t\neq{\tau((y_1,y_2)(t))}$}, \\
(y_1,y_2)(0)=(y_0,y_0), \\
(y_1,y_2)(t^{+})=(y_1,y_2)(t)+I((y_1,y_2)(t)),
\quad \text{for $t={\tau}((y_1,y_2)(t))$}
\end{gather}
with
$$
F(t,y_1,y_2)=A(t,y_1)\times B(t,y_2),
$$
where $A$ and $B$ fulfill assumptions (F1) and (F2) (for example continuous)
and have interval values with $A(t,y), \; B(t,y) \in [-6y,6y]$
$$
I(y_1,y_2)=\begin{cases}
(-y_1,y_1), \quad 0\leq (1+\frac{1}{\rho}) y_1p=1-3/\pi>0$. We have analogous inequality for $\beta$
therefore we obtain
\begin{align*}
\sup_{\varphi\in{F(t,y_1,y_2)}}{\tau}_j'(y_1,y_2)\cdot \varphi
&= \sup_{\alpha\in{A(t,y_1)}}\sup_{\beta\in{B(t,y_2)}}
\frac{\alpha+\beta}{-\pi(1+(y_1+y_2)^2)}\\
&\leq (1-p)\frac{1+2y_1^2+2y_2^2}{1+(y_1+y_2)^2}\leq{1-p}<1
\end{align*}
All assumptions of Theorem \ref{twr:dokladniejedenskokINKLUZJA}
are satisfied, therefore the solution set of this problem is an $R_\delta$-set.
\begin{thebibliography}{00}
\bibitem{AK} Akhmed, M.;
\emph{Principles of Discontinuous Dynamical Systems},
Springer, New York, 2010.
\bibitem{AE} Aubin, J.-P.; Ekeland, I.;
\emph{Applied nonlinear analysis}, Wiley, New York, 1984.
\bibitem{BeBeOu} Belarbi, A., Benchohra, M., Ouahab, A.;
\emph{Nonconvex-valued impulsive functional differential inclusions
with variable times}, Nonlinear Oscillations, Vol. 10, No 4 (2007).
\bibitem{BeOu} Benchohra, M.; Ouahab, A.;
\emph{Impulsive neutral functional differential equations with variable times},
Nonlinear Anal. 55, 679-693 (2003).
\bibitem{BeOu2} Benchohra, M.; Ouahab, A.;
\emph{Impulsive neutral functional differential inclusions with variable times},
Electronic Journal of Differential Equations, Vol. 2003, No. 67, pp. 1-12.
\bibitem{BeRu} Benedetti, I.; Rubbioni, P.;
\emph{Existence of solutions on compact and non-compact intervals
for semilinear impulsive differential inclusions with delay},
Topol. Methods Nonlinear Anal. 32, 227-245 (2008).
\bibitem{CarRub} Cardinali, T.; Rubbioni, P.;
\emph{On the existence of mild solutions of semilinear evolution differential
inclusions}, J. Math. Anal. Appl. 308, 620-635 (2005).
\bibitem{CS} Cardinali, T.l Servadei, R.;
\emph{On the existence of solutions for nonlinear impulsive periodic
viable problems}, Cent. Eur. J. Math. 2 (2004), no. 4, 573–583.
\bibitem{DjebaliGor} Djebali, S.; G\'{o}rniewicz, L.; Ouahab, A.;
\emph{Filippov-Wa\.{z}ewski theorems and structure of solution sets
for first order impulsive semilinear functional differential inclusions},
Topol. Methods Nonlinear Anal. 32, 261-312 (2008).
\bibitem{DjebaliGorOua} Djebali S., G\'{o}rniewicz L., Ouahab A. \emph{Topological structure of solution sets for impulsive differential inclusions in Fr\'{e}chet spaces}, Nonlinear Anal. 74, 2141-2169 (2011).
\bibitem{DjebaliGorOuaBook} Djebali, S.; G\'{o}rniewicz, L.; Ouahab, A.;
\emph{Solution Sets for Differential Equations and Inclusions},
De Gruyter Series in Nonlinear Analysis and Applications 18,
Berlin - New York, (2013).
\bibitem{GabGru} Gabor, G.; Grudzka, A.;
\emph{Structure of the solution set to impulsive functional differential
inclusions on the half-line}, Nonlinear Differential Equations and
Applications 19 (2012), 609-627.
\bibitem{Hale} Hale, J. K.;
\emph{Theory of functional diferential equations},
Springer-Verlag, New York, 1977.
\bibitem{HX} He, Y.; Xing, Y.;
\emph{Poincaré map and periodic solutions of first-order impulsive differential
equations on Moebius stripe},
Abstr. Appl. Anal. 2013, Art. ID 382592, 11 pp.
\bibitem{Hyman} Hyman, D. M.;
\emph{On decreasing sequence of compact absolute retracts},
Fund. Math. 64 (1959), 91-97.
\bibitem{KryPla} Kryszewski, W.; Plaskacz, S.;
\emph{Periodic solutions to impulsive diferential inclusions with constraints},
Nonlinear Anal., 65:1794-1804, 2006.
\bibitem{ObuYao} Obukhovskii, V.; Yao, J.-C.;
\emph{On impulsive functional differential inclusions with Hille-Yosida
operators in Banach spaces}, Nonlinear Anal, 73(6), 1715-1728 (2010).
\end{thebibliography}
\end{document}