\documentclass[reqno]{amsart} \AtBeginDocument{{\noindent\small {\em Electronic Journal of Differential Equations}, Vol. 2005(2005), No. 42, pp. 1--42.\newline ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu \newline ftp ejde.math.txstate.edu (login: ftp)} \thanks{\copyright 2005 Texas State University - San Marcos.} \vspace{9mm}} \begin{document} \title[\hfilneg EJDE-2005/42\hfil Nonlinear evolution equations] {Nonlinear evolution equations} \author[C.-Y. Lin\hfil EJDE-2005/42\hfilneg] {Chin-Yuan Lin} \dedicatory{In memory of my mother, Liu Gim } \address{Chin-Yuan Lin \hfill\break Department of Mathematics \\ National Central University \\ Chung-Li, Taiwan} \email{cylin@math.ncu.edu.tw} \date{} \thanks{Submitted December 2, 2004. Published April 7, 2005.} \subjclass[2000]{47H15, 34G05} \keywords{Dissipative operators; operator semigroups; evolution operators} \begin{abstract} Nonlinear evolution equations are studied under various conditions. The methods used are based on the theory of difference equations. The results presented here are illustrated with examples. \end{abstract} \maketitle \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \newtheorem{proposition}[theorem]{Proposition} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{example}[theorem]{Example} \allowdisplaybreaks \section{Introduction} \label{S:A} In this section, we review some background material needed later. Let \(\omega \in \mathbb{R}\) and \(M \geq 1\). Let \(X\) be a real Banach space with the norm \(\|\cdot\|\). Let \(B : D(B) \subset X \to X\) be a linear operator satisfying the following two assumptions: \begin{itemize} \item[(i)] \(B\) is closed and has domain \(D(B)\) dense in \(X\). \item[(ii)] The resolvent set \(\rho(B)\) of \(B\) contains \((\omega, \infty)\), and \[ \|(I - \lambda B)^{-n}\| \leq M(1 - \lambda \omega)^{-n} \] for \(\lambda > 0, \lambda \omega < 1\) and \(n = 1, 2, 3, \dots\). \end{itemize} It is well-known \cite{Gol,Pa} that \(B\) generates a \(C_{0}\) semigroup \(T(t)\), \(t \geq 0\), that \(\|T(t)\| \leq Me^{\omega t} \), and that \(T(t)u_{0}\) for \(u_{0} \in D(B) \) is a unique classical solution to the Cauchy problem \begin{equation} \label{E:A} \frac{d}{dt}u = Bu, \quad t > 0, \quad u(0) = u_{0}. \end{equation} Here by a \(C_{0}\) semigroup \(T(t)\), it is meant that \(T(t), t \geq 0\) is a family of bounded linear operators on \(X\), such that \(T(0) = I, T(t + s) = T(t)T(s)\) for \(t, s \geq 0\), and \(\lim_{t \to 0}T(t)x = x\) for \(x \in X\) hold. The above result is proved in \cite{Gol} and \cite[Page 19]{Pa} by applying the Hille-Yosida theorem, combined with a renorming technique. This result is due (independently) to Feller, Phillips and Miyadera (see \cite{Gol} for references to the original works). This result is also valid under the seemingly more general conditions: \begin{itemize} \item[(ii')] The range of \((I - \lambda B)\) contains \(\overline{D(B)} \) for small enough \(\lambda > 0\) with \(\lambda \omega < 1\). \item[(ii'')] \((I - \lambda B)^{-1}x\) is single-valued for \(x \in \overline{D(B)} \), and \(\|u\| \leq M(1 - \lambda \omega)^{-n}\|x\|\) holds for all \(\lambda > 0\) with \(\lambda \omega < 1\), \(x \in \overline{D(B)}\), and \(u = (I - \lambda B)^{-n}x, n = 1, 2, 3, \dots\) \end{itemize} For generating nonlinear operator semigroups, let \(A : D(A) \subset X \to X\) be a nonlinear multi-valued operator, which satisfies: \begin{itemize} \item[(iii)] The range of \((I - \lambda A)\) contains \(\overline{D(A)}\), for small enough \(\lambda > 0\) with \(\lambda \omega < 1\). \item[(v)] (Dissipativity): \(\|u - v\| \leq \|u - v - \lambda(x - y)\|\) holds for all $\lambda > 0$, $u, v \in D(A)$, $x \in (A - \omega)u$, and \( y \in (A - \omega)v\). \end{itemize} Crandall-Liggett \cite{Cra} proved that \(A\) generates a nonlinear operator semigroup $T(t)$, $t \geq 0$. When applied to \(u_{0} \in D(A)\), \(T(t)u_{0}\) gives a unique generalized solution to the Cauchy problem \begin{equation} \label{E:B} \frac{d}{dt}u \in Au, t > 0, u(0) = u_{0}, \end{equation} the notion of solution being due to Benilan \cite{Be}. The generalized solution is a strong one if \(X\) is reflexive \cite{Cra}. A different condition on \(A\) or \(A(t)\) with \(t\) dependence, for the existence of a strong solution is given in \cite{Lin,Lin1}, where applications to parabolic partial differential equations are given. This condition is called embeddedly quasi-demi-closed, which is weaker than that of continuous or demi-closed \cite{Mi}. Generalizing the Crandall-Liggett theorem, Kobayashi \cite{Kob} (see Miyadera \cite[pp. 131-132, 141-142, 146]{Mi}, and Takahashi \cite{Ta}) assumed (v) and the weaker (vi) (actually, only (viii) was used.): \begin{itemize} \item[(vi)] \(\liminf_{\lambda \to 0}\big(d(\mathop{\rm Ran}(I - \lambda A), x) )/\lambda= 0\) uniformly for all \(x \in \overline{D(A)}\). \end{itemize} Here \(\mathop{\rm Ran}(I - \lambda A)\) denotes the range of \((I - \lambda A) \), and \(d(\mathop{\rm Ran}(I - \lambda A), x)\) denotes the distance between the set \(\mathop{\rm Ran}(I - \lambda A)\) and the point \(x\). That for a family of functions \(\{f_{\lambda}\}_{\lambda > 0} : \overline{D(A)} \subset X \to \mathbb{R}, \liminf_{\lambda \to 0}f_{\lambda}(x) = f_{0}(x)\) holds uniformly for all \(x \in \overline{D(A)}\) means: \begin{itemize} \item[(vii)] For every \(\epsilon > 0\), there is a \(\lambda_{1} = \lambda_{1} (\epsilon) > 0\), which is independent of \(x\) such that \(f_{0}(x) - \epsilon < f_{\lambda}(x)\) for all \(0 < \lambda \leq \lambda_{1}\) and for all \(x \in \overline{D(A)}\). \item[(viii)] Given \(\epsilon > 0\) and given \(\mu > 0\), there is a \(0 < \lambda_{2} = \lambda_{2}(\epsilon, \mu) \leq \mu\), which is independent of \(x\) such that \(f_{\lambda_{2}}(x) < f_{0}(x) + \epsilon\) for all \(x \in \overline{D(A)}\). \end{itemize} Note the following condition, (viii'), weaker than (viii) (see \cite[Lemma 10]{Lin3}), is easy to compare with (iii): \begin{itemize} \item[(viii')] For each \(x \in \overline{D(A)}\) and each \(\mu > 0\), there are \(0 < \mu_{1} = \mu_{1}(\mu) \leq \mu\) and \(\epsilon^{\mu, x} \in X\) with \(\|\epsilon^{\mu, x}\| < \mu\) for all \(x \in \overline{D(A)}\), such that \[ (\mu_{1} \epsilon^{\mu, x} + x) \in \mathop{\rm Ran}(I - \mu_{1} A) \] holds. Here \(\mu_{1}\) is independent of \(x\). \end{itemize} Kobayashi \cite{Kob} (see the book by Miyadera \cite[pp.152-153]{Mi}, or the book by Lakshmikantham \cite[pp. 112-113]{La}) gave this example that his theory applies to but the Crandall-Liggett theory does not: \begin{example} \label{ex1.1} \rm Define an operator \(A: D(A) \subset \mathbb{R}^{2} \to \mathbb{R}^{2}\) by \[ A \begin{pmatrix} x \\ y\end{pmatrix} \equiv \begin{pmatrix} y \\ -x \end{pmatrix} \] for \[ \begin{pmatrix} x \\ y \end{pmatrix} \in D(A) \equiv \big\{ \begin{pmatrix} x \\ y \end{pmatrix} \in\mathbb{R}^2:x^2+y^2=1\big\}. \] Here \((\mathbb{R}^{2}, \|.\|)\) is a real Hilbert space with the inner product \((u, v)\) of \(u\) and \(v\) for \(u, v \in \mathbb{R}^{2}\) and with the norm \(\|u\| = \sqrt{(u, v)} \) of \(u\) for \(u \in \mathbb{R}^{2}\). Then \(A\) satisfies (vi) but not (iii), and the equation \eqref{E:B} has a unique classical solution. For \[ u_{0} = \begin{pmatrix} \cos(\theta_{0}) \\ \sin(\theta_{0}) \end{pmatrix} \in D(A), \] the solution is \[ u(t) \equiv \lim_{\lambda \to 0} \begin{pmatrix} \cos(\theta_{0} - t\frac{\arctan(\lambda)}{\lambda}) \\ \sin(\theta_{0} - t\frac{\arctan(\lambda)}{\lambda}) \\ \end{pmatrix} = \begin{pmatrix} \cos(t) & \sin(t) \\ - \sin(t) & \cos(t) \end{pmatrix} u_{0}. \] With %with \[ A_{M} \equiv \begin{pmatrix} 0 & 1 \\ -1 & 0 \end{pmatrix}, \] a matrix whose restriction to the unit circle is the matrix representation of \(A\), the solution also equals \(e^{t A_{M}} \equiv \sum_{n = 1}^{\infty} (tA_{M})^{n}/ n!\), applied to \(u_{0}\), which is \[ \begin{pmatrix} \cos(t) & \sin(t) \\ - \sin(t) & \cos(t) \end{pmatrix} u_{0}. \] But this is a coincidence, since, for a general matrix \(S\), \(e^{t S}\), existing as an infinite series of \(t S\), does not leave unit circle invariant, in general. Note that \(A\) is not a linear operator since \(D(A)\) is not a linear space. If \(A\) is defined on the unit sphere in \(\mathbb{R}^{3}\) with \[ Au = \begin{pmatrix} y \\ -x \\ 0 \end{pmatrix}, \quad\mbox{for}\quad u = \begin{pmatrix} x \\ y \\ z \end{pmatrix} \in D(A), \] the unit sphere in \(\mathbb{R}^{3}\), then for \[ u_{0} = \begin{pmatrix} \cos(\theta_{0}) \\ \sin(\theta_{0}) \\ z \end{pmatrix} \in D(A), \] \[ \lim_{\lambda \to 0} \begin{pmatrix} \cos(\theta_{0} - t \frac{\arctan(\lambda)}{\lambda}) \\ \sin(\theta_{0} - t \frac{\arctan(\lambda)}{\lambda}) \\ z \end{pmatrix} = \begin{pmatrix} \cos(t) & \sin(t) & 0 \\ -\sin(t) & \cos(t) & 0 \\ 0 & 0 & 1 \end{pmatrix} u_{0}, \] from Kobayashi theory, is the unique classical solution to the equation \eqref{E:B}, and \[ \begin{pmatrix} \cos(t) & \sin(t) & 0 \\ - \sin(t) & \cos(t) & 0 \\ 0 & 0 & 1 \end{pmatrix} \] are the special rotation matrices about the \(z-\) axis, preserving the length. \end{example} For rotation matrices about a general axis $\hat{n} = \begin{pmatrix} n_{1} \\ n_{2} \\ n_{3} \end{pmatrix}$, a unit vector in \(\mathbb{R}^{3}\), the result also follows from the Kobayashi theory, in which the unique solution to \eqref{E:B} is given by the limit, as \(\lambda \to 0\) with \(f_{\lambda} = \arctan(\lambda)/\lambda\), of {\scriptsize\[ \begin{pmatrix} \cos t f_{\lambda} + (1 - \cos t f_{\lambda})n_{1}^{2} & n_{3}\sin t f_{\lambda} + n_{1} n_{2} (1 - \cos(t f_{\lambda})) & -n_{2}\sin t f_{\lambda} + n_{1}n_{3}(1 - \cos t f_{\lambda}) \\ -n_{3}\sin t f_{\lambda} + (1 - \cos t f_{\lambda})n_{1}n_{2} & \cos t f_{\lambda} + n_{2}^{2}(1 - \cos t f_{\lambda}) & n_{1}\sin t f_{\lambda} + n_{2}n_{3}(1 - \cos t f_{\lambda}) \\ n_{2}\sin t f_{\lambda} + (1 - \cos t f_{\lambda})n_{1}n_{3} & -n_{1}\sin t f_{\lambda} + n_{2}n_{3}(1 - \cos t f_{\lambda}) & \cos t f_{\lambda} + n_{3}^{2}(1 - \cos t f_{\lambda}) \end{pmatrix} u_{0}, \]} which is equal to {\scriptsize \[ \begin{pmatrix} \cos(t) + (1 - \cos(t))n_{1}^{2} & n_{3}\sin(t) + n_{1}n_{2}(1 - \cos(t)) & -n_{2}\sin(t) + n_{1}n_{3}(1 - \cos(t)) \\ -n_{3}\sin(t) + (1 - \cos(t))n_{1}n_{2} & \cos(t) + n_{2}^{2}(1 - \cos(t)) & n_{1}\sin(t) + n_{2}n_{3}(1 - \cos(t)) \\ n_{2}\sin(t) + (1 - \cos(t))n_{1}n_{3} & -n_{1}\sin(t) + n_{2}n_{3}(1 - \cos(t)) & \cos(t) + n_{3}^{2}(1 - \cos(t)) \end{pmatrix} u_{0}, \]} where the associated matrices are rotations about the axis \(\hat{n}\). (see Section \ref{S:BB}). The general rotation matrices have important applications in Physics, Altmann \cite{Al}, especially Pages 73-75, and in Global Positioning System, GPS, in Civil Engineering, Soler and Marshall \cite{So}, especially Pages 30-31. Compare how the physicists \cite{Al} derived the formula to ours. See Section \ref{S:BB} for details and more examples, including nonlinear, single-valued or multi-valued, finite or infinite dimensional ones, and time-non-autonomous ones, which cannot be derived by the restriction as in Example \ref{ex1.1}. Those examples are interpreted as non-liner non-autonomous rotations, single-valued or multi-valued, of finite or infinite dimensions, evolving with time by satisfying \eqref{E:B} or \eqref{E:C} and preserving the lenth in a nonlinear and time nonautonomous way. This seems a complete approach to the rotation problems, compared to the approach by the physicists, Altmann \cite{Al}. The time-nonautonomous examples requires a theory that we shall develop in this paper. An introduction of the background of the time-nonautonomous theory is presented below. In time-nonautonomous theory, by an evolution operator \(U(t, s), 0 \leq s < t < T\) on \(C \subset X\), it is meant that \(U(s, s) = I\)(the identity operator) and \(U(t, s)U(s, r) = U(t, r)\) for \(0 \leq r \leq s \leq t \leq T\) hold and that \(U(t, s)x\) for \(x \in C\) is continuous in the pair \((t, s)\) on the triangle \(0 \leq s \leq t \leq T\) \cite{Cran}. The time-nonautonomous operator \(A(t)\), associated with the evolution operator \(U(t, s)\), is defined as follows. Let \(T > 0\) and let \(A(t) : D(A(t)) \subset X \to X\) be a time-dependent, nonlinear, multi-valued operator that satisfies (ix), (x), and (xi) for each \(0 < t < T\) \cite{Cran,Mi}: \begin{itemize} \item[(ix)] \(\|u - v\| \leq \|(u - v) - \lambda (g - h)\|\) for all \(u, v \in D(A(t))\), \(g \in (A(t) - \omega)u, h \in (A(t) - \omega)v\), all \(t \in [0, T]\), and all \(\lambda > 0\). Or equivalently, \(\eta(g - h) \leq 0\) for some \(\eta \in G(u - v) \equiv \{\xi \in X^{*} : \|u - v\|^{2} = \xi(u - v) = \|\xi\|_{X^{*}}^{2} \}\), the duality map of \((u - v)\). Here \((X^{*}, \|.\|_{X^{*}})\) is the dual space of \(X\). \item[(x)] The range condition. The range of \((I - \lambda A(t))\) contains the closure \(\overline{D(A(t))}\) of \(D(A(t))\) for small \(0 < \lambda < \lambda_{0}\) with \(\lambda_{0}\omega < 1\). \item[(xi)] \(\overline{D(A(t))} = \overline{D}\), is independent of \(t \). \end{itemize} Assume further that \(A(t)\) has the \(t\)-dependence (xii) or (xiii). \begin{itemize} \item[(xii)] There are a continuous function \(f : [0, T] \to X\) and a monotone increasing function \(L : [0, \infty) \to [0, \infty)\), such that \[ \|J_{\lambda}(t)x - J_{\lambda}(\tau)x\| \leq \lambda \|f(t) - f(\tau)\| L(\|x\|) \] for \(0 < \lambda < \lambda_{0}, 0 \leq t, \tau \leq T,\) and \(x \in \overline{D}\), where \(J_{\lambda}(t)x \equiv (I - \lambda A(t))^{-1} \) exists for \(x \in \overline{D}\) by (ix) and (x). \item[(xiii)] There is a continuous function \(f : [0, T] \to X\), which is of bounded variation on \([0, T]\), and a monotone increasing function \(L : [0, \infty) \to [0, \infty)\), such that \[ \|J_{\lambda}(t)x - J_{\lambda}(\tau)x\| \leq \lambda \|f(t) - f(\tau)\| L(\|x\|) (1 + |A(\tau)x|) \] for \(0 < \lambda < \lambda_{0}, 0 \leq t, \tau \leq T,\) and \(x \in \overline{D}\). \(|A(\tau)x| \equiv \lim_{\lambda \to 0}\|\frac{(J_{\lambda}(\tau) - I)x}{\lambda}\|\) exists by \cite{Crand,We,Cran}. \end{itemize} Note that either (xii) or (xiii) implies (xi); see Crandall-Pazy \cite{Cran}. Define \(\hat{D}(A(t)) \equiv \{x \in \overline{D(A(t))} : |A(t)x| < \infty \}\), a generalized domain for \(D(A(t))\), introduced by Crandall \cite{Crand,Cran} and Westphal \cite{We}. Crandall and Pazy \cite{Cran} showed that \(\hat{D} \equiv \hat{D}(A(t))\) is constant in \(t\), that \(U(t, s)x \equiv \lim_{n \to \infty} \prod_{i=1}^{n} J_{\frac{t - s}{n}}( s + i \frac{t - s}{n})x\) exists for \(x \in \overline{D}\) and \(0 \leq s \leq t \leq T\) and is Lipschitz continuous in \(t\) for \(x \in \hat{D}\), and that \(U(t , s)\) is an evolution operator on \(\overline{D} = \overline{D(A(t))} = \overline{\hat{D}}\) satisfying \[ \|U(t, s)x - U(t, s)y\| \leq e^{\omega(t - s)}\|x - y\| \] for \(0 \leq s, t \leq T\) and \(x, y \in \overline{D}\). Further it is showed in \cite{Cran} that \(U(t, s)x_{0}\) is a generalized solution to the time-dependent nonlinear equation \begin{equation} \label{E:C} \begin{aligned} \frac{du}{dt} &\in A(t)u, 0 \leq s < t < T \\ u(s) &= x_{0} \end{aligned} \end{equation} for \(x_{0} \in \overline{D}\), and that \(U(t, s)x_{0}\) for \(x_{0} \in \hat{D}\) is a strong solution if \(X\) is reflexive and \(A(t)\) is closed. The above result \cite{Cran} generalizes many previous results, which assume either linear or t-independent or single-valued \(A(t)\) or more restricted \(A(t)\) or \(X\). See \cite{Cran} for a discussion of these. This result also applies to time-dependent nonlinear parabolic boundary value problems with time-independent boundary conditions \cite{Cran}. More references on this subject can be found in \cite{Ba,Be,Cr,Cra,Cran, Crand,Go,Gol,Kat,Kob,Ko,La,Mi,Miy,Mo,Pa,Ta}. For problems with time-dependent boundary conditions, a theory was developed in \cite{Lin1,Lin4}. \cite{Lin1} allows for time-dependent domain and strong solutions. This applies to problems with time-dependent boundary conditions. \cite{Lin4} strengthened \cite{Lin1} to prove that not just a subsequence but the original sequence converges and that the applications in \cite{Lin1} with the space dimensions equal to \(2\) or \(3\) are solvable numerically by the boundary element methods \cite{Che}. The condition introduced in \cite{Lin1} (see also \cite{Lin}), under which a strong solution exists, is called embeddedly quasi-demi-closedness and is weaker than continuity or demi-continuity \cite{Mi}. Its definition is this: Let \((Y, \|.\|_{Y})\) be a real Banach space with \((X, \|.\|)\) continuously embedded into it. That the operator \(A(t)\) is embeddedly quasi-demi-closed is that if \(t_{n} \in [0, T] \to t\), \(x_{n} \in D(A(t_{n})) \to x\) and \( \|y_{n}\| \leq k\) for some \(y_{n} \in A(t_{n})x_{n}\), then \(x \in D(\eta \circ A(t))\)(that is, \(\eta(A(t)x)\) exists) and \[ |\eta(y_{n_{k}}) - z| \to 0 \] for some subsequence \(y_{n_{k}}\) of \(y_{n}\), for some \(z \in \eta(A(t)x)\) and for each \(\eta \in Y^{*} \subset X^{*}\), the real dual space of \(Y\). The question arises: for nonlinear evolution equation problems, would it be possible to develop a theory that uses as the basis, the generalized range condition (vi) in Kobayashi \cite{Kob} and the time-regulating conditions (xii) and (xiii) in Crandall-Pazy \cite{Cran}? This is what we intend to do in this paper. Examples are given in Section \ref{S:BB}. However, we should remark that our new examples here do not include applications from partial differential equations. This is because we need uniform continuity of \(A(t)\) for our examples but this will not be satisfied by partial differential operators. In this paper, we shall use the difference equations method in \cite{Lin2,Lin3} to show that under various conditions related to (vi), (xii), and (xiii), a quantity \(V(t, s)x_{0}\) similar to \[ U(t, s)x_{0} \equiv \lim_{n \to \infty}\prod_{i = 1}^{n} J_{\frac{t - s}{n}}(s + i \frac{t - s}{n})x_{0} \] (in Crandall-Pazy \cite{Cran} ) exists and is the so-called a limit solution to the equation \eqref{E:C} for \(x_{0}\) in a generalized domain \(\hat{E}\), similar to \(\hat{D}(A(t))\). The limit solution is a strong solution if \(A(t)\) is embeddedly quasi-demi-closed. Furthermore, \(V(t, s)x_{0}\) is Lipshitz continuous in \(t\) for \(x_{0} \in \hat{E}\) and \(V(t, s)\) is an evolution operator on \(\overline{\hat{E}}\). Here \cite{Lin2,Lin3} proved the generation results in \cite{Cra,Kob,Cran} by the method of difference equations. Two remarks follow. Under a condition similar to (vi), a quantity similar to \[ |A(t)x| \equiv \lim_{\lambda \to 0}\|\frac{(J_{\lambda}(t) - I)x}{\lambda}\| \] in \[ \hat{D}(A(t)) \equiv \{x \in \overline{D(A(t))}: |A(t)x| < \infty\} \] (in Crandall-Pazy \cite{Cran}) does not necessarily exist, and so we generalize \(\hat{D}(A(t))\) to obtain \(\hat{E}\) by weakening \(\lim_{\lambda \to 0}\) to \(\limsup_{\lambda \to 0} \). A balance exists between the range condition (x) and the time-regulating condition (xii) or (xiii); this means that a range condition more generalized than (x), such as (R6), (R7), (R1), or (R2) (in Section \ref{S:ZA}), should be coupled with a time-regulating condition less generalized than (xiii), such as (T4), or (T1) (in Section \ref{S:ZA}), in developing the theory, unless an additional condition is assumed such as (R3); and vice versa. The rest of the paper is organized as follows. Sections \ref{S:ZA} and \ref{S:B} contain basic assumptions and some preliminaries, respectively. Section \ref{S:ZB} contains the main results. Section \ref{S:C} contains some intermediate results. Section \ref{S:D} deals with the proof of the main results in Section \ref{S:ZB}. Finally, Section \ref{S:BB} concerns applications, which satisfy (R4), (R6), (T4), (R1), (R2), %\(\overline{(4.1)}\), % %AUTHOR: I DID NOT FIND THIS CONDITION. % and (T1) (see Section \ref{S:ZA}) but do not satisfy the (iii) in Crandall-Liggett \cite{Cra} or (x) in Crandall-Pazy \cite{Cran}. \section{Basic assumptions} \label{S:ZA} We make the following assumptions: \begin{itemize} \item[(A1)] Dissipativity as stated above (ix): \(\|u - v\| \leq \|(u - v) - \lambda (g - h)\|\) for all \(u, v \in D(A(t))\), \(g \in (A(t) - \omega)u, h \in (A(t) - \omega)v\), all \(t \in [0, T]\), and all \(\lambda > 0\). Or equivalently, \(\eta(g - h) \leq 0\) for some \(\eta \in G(u - v) \equiv \{\xi \in X^{*} : \|u - v\|^{2} = \xi(u - v) = \|\xi\|_{X^{*}}^{2} \}\), the duality map of \((u - v)\). Here \((X^{*}, \|.\|_{X^{*}})\) is the dual space of \(X\). \item[(A2)] Constant domain as stated above (xi): \(\overline{D(A(t))} = \overline{D}\), is independent of \(t\). \end{itemize} \subsection*{Generalized range conditions} \begin{itemize} \item[(R1)] %4.1 There is a closed subset \(E\) in \(X\) such that \(E \supset \overline{D(A(t))}\) holds for all \(t\). For all \(x \in E \), \[ \liminf_{\mu \to 0}\frac{d(\mathop{\rm Ran}(I - \mu A(t)), x)}{\mu} = 0 \] holds uniformly in \(x\) and \(t\). \item[(R2)] %(4.1)' There is a closed subset \(E\) in \(X\) such that \(E \supset \overline{D(A(t))}\) holds for all \(t\). For each \(x \in E \) and each \(\mu > 0\), there is a \(0 < \mu_{1} \leq \mu\), which is independent of \(x, t\), such that \[ (\mu_{1} \epsilon^{\mu, x, t} + x) \in \mathop{\rm Ran}(I - \mu_{1} A(t)) \] holds for some \(\epsilon^{\mu, x, t} \in X\) with \(\|\epsilon^{\mu, x, t}\| < \mu\) for all \(x, t\). \item[(R3)] %(4.1)'' (Additional property) The same as the assumption above, with the additional property \[ \sum_{i = 1}^{n}\|\epsilon^{\mu, x_{i}, t_{i}} - \epsilon^{\mu, y_{i}, \tau_{i}}\| \leq k_{0} n \mu_{1} \] for some \(k_{0} > 0\), and for all \(x_{i}, y_{i} \in E\) and all \(t_{i}, \tau_{i} < T\), where \(i \leq n \in \mathbb{N}\). We use \(\{x, \mu, \{\mu_{1}\}, \epsilon^{\mu, x, t}\}\) to denote the set of values satisfying (R2), with \(\mu \leq \lambda_{0}\), so that \(\mu_{1} \omega \leq \mu \omega \leq \lambda_{0} \omega < 1\). \item[(R4)] %3.1 There is a closed subset \(E\) in \(X\) such that \(E \supset \overline{D(A(t))} = \overline{D}\) for all \(0 \le t \le T\), and that \[ \frac{d(\mathop{\rm Ran}(I - \mu A(t)), x)}{\mu} \leq g_{0}(\mu) \] uniformly for all \(x \in E, 0 \le t \le T\), for all \(0 < \mu \leq \mu_{0}\), for some \(\mu_{0} > 0\), and for some function \(g_{0}(\mu)\), where $g_{0}(\mu) = c_{0}\mu$ for some constant \(c_{0} > 0\). \item[(R5)] % (3.1)' There is a closed subset \(E\) in \(X\) such that \(E \supset \overline{D(A(t))}\) for all \(0 \le t \le T\). For each \(x \in E\), \[ (\mu \epsilon^{\mu, x, t} + x) \in \mathop{\rm Ran}(I - \mu A(t)) \] holds for some \(\mu_{0} > 0\), for all \(0 < \mu \leq \mu_{0}, 0 \le t \le T,\) and for some \(\epsilon^{\mu, x, t} \in X,\) where for all \(0 < \mu \le \mu_{0}, x \in E\), and \(0 \le t \le T\), \[ \|\epsilon^{\mu, x, t}\| \le g_{1}(\mu) \] holds with \(g_{1}(\mu) = g_{0}(\mu) = c_{0} \mu\). We use \(\{x, \mu_{0}, \mu,\epsilon^{\mu, x, t}\}\) to denote the set of variables in (R5). Here we take \(\mu \leq \lambda_{0}\) so that \(\mu \omega \leq \lambda_{0} \omega < 1\). \item[(R6)] % \overline 3.1 There is a closed subset \(E\) in \( X\) such that \(E \supset \overline{D(A(t))}\) holds for all \(0 \le t \le T\). And \[ \lim_{\mu \to 0}\frac{d(\mathop{\rm Ran}(I - \mu A(t)), x)}{\mu} = 0 \] holds uniformly for all \(x \in E\) and \(0 \le t \le T\). Note (R6) is weaker than (R4). \item[(R7)] %$\overline{(3.1)'}$] There is a closed subset \(E\) in \(X\) such that \(E \supset \overline{D(A(t))}\) holds for all \(0 \le t \le T\). For each \(x \in E\) and each \(\nu > 0\), there is a \(0 < \mu_{0} \le \nu\), which is independent of \(x, t\), such that for all \(0 < \mu \le \mu_{0} \le \nu\), \[ (\mu \epsilon^{\nu, x, t} + x) \in \mathop{\rm Ran}(I - \mu A(t)) \] holds for some \(\epsilon^{\nu, x, t} \in X\) with \(\|\epsilon^{\nu, x, t}\| \le \nu \) for all \(x, t\). We use \(\{x, \nu, \mu_{0}, \epsilon^{\nu, x, t}\}\) to denote the set of variables satisfying (R7). Here we take \(\nu \le \lambda_{0}\) so that \(\mu \omega \le \lambda_{0} \omega < 1\). \smallskip \end{itemize} \subsection*{Time-regulating conditions} \begin{itemize} \item[(T1)] %4.2 If \(x, y \in X\), \(0 \le t, \tau \le T\), and \(0 < \mu_{1} \le \mu < \lambda_{0}\) are such that % there exist \(J_{\mu_{1}}(t)x\) and \( J_{\mu_{1}}(\tau)y\) exist, then %such that \[ \|J_{\mu_{1}}(t)x -J_{\mu_{1}}(\tau)y\| \leq (1 - \mu_{1} \omega)^{-1}[\|x - y\| + \mu_{1}\|f(t) - f(\tau)\| L(\|y\|)] \] or \[ \|J_{\mu_{1}}(t)x -J_{\mu_{1}}(\tau)y\| \leq (1 - \mu_{1} \omega)^{-1}[\|x - y\| + \mu_{1}\|f(t) - f(\tau)\| L(\|J_{\mu_{1}}(\tau)y\|)] \] holds, where \(f\) and \(L\) are as in (T4) below. \item[(T2)] %4.2' If \(x, y \in X\), \(0 \le t, \tau \le T\), and \(0 < \mu_{1} \le \mu < \lambda_{0}\) are such that %, there exist \(J_{\mu_{1}}(t)x\) and \( J_{\mu_{1}}(\tau)y\) exist, then % such that \begin{align*} & \|J_{\mu_{1}}(t)x - J_{\mu_{1}}(\tau)y\| \\ & \leq (1 - \mu_{1}\omega)^{-1} [\|x - y\| + \mu_{1}\|f(t) - f(\tau)\| L(\|y\|)(1 + \|\frac{J_{\mu_{1}}(\tau)y - y}{\mu_{1}}\|)] \end{align*} or \begin{align*} & \|J_{\mu_{1}}(t)x - J_{\mu_{1}}(\tau)y\|\\ &\leq (1 - \mu_{1}\omega)^{-1} [\|x - y\| + \mu_{1}\|f(t) - f(\tau)\| L(\|J_{\mu_{1}}(\tau)y\|)(1 + \|\frac{J_{\mu_{1}}(\tau)y - y}{\mu_{1}}\|)] %\,. \end{align*} holds, where \( f \) and \( L \) are as in (T4) below. \item [(T3)] %4.2'' If \(x, y \in X\), \(0 \le t, \tau \le T\), and \(0 < \mu_{1} \le \mu < \lambda_{0}\) are such that % thee exist \(J_{\mu_{1}}(t)x\) and \( J_{\mu_{1}}(\tau)y\) exist, then \begin{align*} \|J_{\mu_{1}}(t)x - J_{\mu_{1}}(\tau)y\| &\leq (1 - \mu_{1}\omega)^{-1} \big[\|x - y\| + \mu_{1}\|f(t) - f(\tau)\| L(\|y\|) \\ &\quad \times (1 + \limsup_{\mu \to 0} \|\frac{J_{\mu_{1}}(\tau)y - y}{\mu_{1}}\|)\big] \end{align*} or \begin{align*} \|J_{\mu_{1}}(t)x - J_{\mu_{1}}(\tau)y\| &\leq (1 - \mu_{1}\omega)^{-1} \big[\|x - y\| + \mu_{1}\|f(t) - f(\tau)\| L(\|J_{\mu_{1}}(\tau)y\|)\\ &\quad\times (1 + \limsup_{\mu \to 0} \|\frac{J_{\mu_{1}}(\tau)y - y}{\mu_{1}}\|)\big] \end{align*} holds, where \( f \) and \( L \) are as in (T4) below. Lemma \ref{L:DA} shows that (R1) implies (R2). \item[(T4)] %3.2)]% If \(x, y \in X\), \(0 \le t, \tau \le T\), and \( 0 < \mu < \lambda_{0} \) are such that \(J_{\mu}(t)x \) and \( J_{\mu}(\tau)y\) exist, then \[ \|J_{\mu}(t)x - J_{\mu}(\tau)y\| \leq (1 - \mu \omega)^{-1}[ \|x - y\| + \mu \|f(t) - f(\tau)\|L(\|y\|)] \] or \[ \|J_{\mu}(t)x - J_{\mu}(\tau)y\| \leq (1 - \mu \omega)^{-1}[ \|x - y\| + \mu \|f(t) - f(\tau)\|L(\|J_{\mu}(\tau)y\|)] \] holds for some continuous function \(f : [0, T] \to X\), and for some monotone increasing function \(L : [0, \infty) \to [0, \infty)\). (So \(L(a)\) is finite for finite \(a \ge 0 \).) \item[(T5)] % (3.2)' If \(x, y \in E\), \(0 \le t, \tau \le T\), and \( 0 < \mu < \lambda_{0} \) are such that \(J_{\mu}(t)x\) and \( J_{\mu}(\tau)y\) exist, then \begin{align*} &\|J_{\mu}(t)x - J_{\mu}(\tau)y\| \\ &\leq (1 - \mu \omega)^{-1} [\|x - y\| + \mu \|f(t) - f(\tau)\|L(\|y\|) (1 + \|\frac{J_{\mu}(\tau)y - y}{\mu}\|)] \end{align*} or \begin{align*} &\|J_{\mu}(t)x - J_{\mu}(\tau)y\|\\ &\leq (1 - \mu \omega)^{-1} [\|x - y\| + \mu \|f(t) - f(\tau)\|L(\|J_{\mu}(\tau)y\|) (1+ \| \frac{J_{\mu}(\tau)y - y}{\mu}\|)] \end{align*} holds for the same \(L, f\) as in (T4) but with \(f\) of bounded variation also. \item[(T6)] %3.2)'' If \(x, y \in E\), \(0 \le t, \tau \le T\), and \( 0 < \mu < \lambda_{0} \) are such that \(J_{\mu}(t)x\) and \( J_{\mu}(\tau)y\) exist, then %such that \begin{align*} &\|J_{\mu}(t)x - J_{\mu}(\tau)y\| \\ &\leq (1 - \mu \omega)^{-1} [\|x - y\| + \mu \|f(t) - f(\tau)\| L(\|y\|)(1 + \limsup_{\mu \to 0} \|\frac{J_{\mu}(\tau)y - y}{\mu}\|)] \end{align*} or \begin{align*} &\|J_{\mu}(t)x - J_{\mu}(\tau)y\|\\ &\leq (1 - \mu \omega)^{-1} [\|x - y\| + \mu \|f(t) - f(\tau)\| L(\|J_{\mu}(\tau)y\|)(1 + \limsup_{\mu \to 0} \|\frac{J_{\mu}(\tau)(y) - y}{\mu}\|)] \end{align*} for the same \(L, f\) as in (T5). \end{itemize} Now, we state the following hypotheses: \begin{itemize} \item[(H1)] The assumptions of the dissipativity (A1), the constant domain (A2), either the generalized range condition (R4) or the generalized range condition (R5), and either the time-regulating condition (T4) or the time-regulating condition (T5) or the time-regulating condition (T6). \item[(H2)] The assumptions of the dissipativity (A1), the constant domain (A2), the time-regulating condition (T1), and either the generalized range condition (R1), or the generalized range condition (R2). \item[(H3)] The assumptions of the dissipativity (A1), the constant domain (A2), the additional property (R3), and either the time-regulating condition (T2) or the time-regulating condition (T3). \item[(H4)] The assumptions of the dissipativity (A1), the constant domain (A2), the time-regulating condition (T4), and either the generalized range condition (R4) or the generalized range condition (R5). \end{itemize} Note that the union of the hypotheses (H2) and (H3) contains the special cases: hypotheses (H1) and (H4). Note that as (A2) is implied by either (xii) or (xiii) in Crandall-Pazy \cite{Cran}, (A2) in (H1) is implied by other conditions in (H1)(see Lemmas \ref{L:BA}, \ref{L:BB}, and \ref{L:BC}, Section \ref{S:B}); also (A2) in (H2) or (H3) or (H4) is implied by other conditions in (H2) or (H3) or (H4) (see Lemmas \ref{L:BA}, \ref{L:BB}, and \ref{L:BC}, Section \ref{S:B}). Assuming (A2) (as in Crandall-Pazy \cite{Cran}) is redundant but it helps see what the hypotheses (H1), (H2), (H3), and (H4) are. Note that (R4) and (R5) are motivated by (vi) and (viii'), respectively, and (T4) and (T5) (or (T6)) by (xii) and (xiii), respectively. (R5) is weaker than (R4). For \(\overline{D(A(t))} = \overline{D}\) being constant, we can take \(E = \overline{D}\) and \(\epsilon^{\mu, x, t} = 0\), which is the case (x) and (A2). \section{Main results} \label{S:ZB} For \(A(t)\) satisfying (ix) %(iv) % %AUTHOR: I DID NOT FIND THIS ASSUMPTION. % and (R5), let \(x \in E\) be such that for \( \{x, \mu_{0}, \mu, \epsilon^{\mu, x, t}\}\), \[ [[A(t)x]] \equiv \limsup_{\mu \to 0}\|\frac{J_{\mu}(t)(\mu \epsilon^{\mu, x, t} + x)- x} {\mu}\| < \infty\,. \] Define \(\hat{E}(t)\) to be the set of all such \(x\)'s. For \(A(t)\) satisfying (H2) without (T1) or for \(A(t)\) satisfying (H3) without both (T2) and (T3), the \(\hat{E}(t)\) is similarly defined; \(\hat{E}(t)\) is defined to be the set of all \(x \in E\) such that for \(\{x, \mu, \{\mu_{1}\}, \epsilon^{\mu, x, t}\}\), \[ [[A(t)x]] \equiv \limsup_{\mu \to 0}\|\frac{J_{\mu_{1}}(t)(\mu_{1} \epsilon^{\mu, x, t} + x) - x}{\mu_{1}}\| < \infty\,. \] For \(A(t)\) satisfying (H4) without (T4), a similar definition made for \([[A(t)x]]\) is left to the reader. Here note that \(\limsup = \lim\) under the case (x) in Crandall-Pazy \cite{Cran}. It is proved in Lemma \ref{L:BC} and the Remark (see Section \ref{S:B}) that \(\hat{E} \equiv \hat{E}(t)\) is constant in \(t\) and that \[ \overline{\hat{E}} = \overline{D(A(t))} \equiv \overline{D}\,. \] \noindent\textbf{Definition.} For \(x \in \hat{E}\) with \(A(t)\) satisfying (H1), let \[ M(x) \equiv \sup_{0 \leq t \leq T}[[A(t)x]] \equiv \sup_{0 \leq t \leq T}\limsup_{\mu \to 0}\|\frac{J_{\mu}(t) (\mu \epsilon^{\mu, x, t} + x) - x}{\mu}\|. \] Similar definition is given for \(A(t)\) satisfying (H2) or (H3), for which \[ M(x) \equiv \sup_{0 \leq t \leq T}[[A(t)x]] \equiv \sup_{0 \leq t \leq T}\limsup_{\mu \to 0} \|\frac{J_{\mu_{1}}(t)(\mu_{1} \epsilon^{\mu, x, t} + x) - x}{\mu_{1}}\|. \] The case for \(A(t)\) satisfying (H4) is similarly treated and is left to the reader. \(M(x)\) is uniformly finite for each \(x\) by Lemma \ref{L:BC}. \begin{theorem} \label{T:B} Under the hypothesis (H2) or (H3), \(\lim_{\lambda \to 0}u_{\lambda}(t, s; x_{0})\) exists for \(x_{0} \in \overline{\hat{E}}\) and is the so-called a limit solution to the equation \eqref{E:C}. \(\lim_{\lambda \to 0}u_{\lambda}(t, s; x_{0})\) for \(x_{0} \in \hat{E}\) is a strong solution if \(A(t)\) is embeddedly quasi-demi-closed. Furthermore, \(U(t, s)\) defined by \[ U(t, s)x_{0} \equiv \lim_{\lambda \to 0}u_{\lambda} (t, s; x_{0}) \] is a nonlinear evolution operator on \(\overline{\hat{E}}\). Here \(u_{\lambda}(t, s; x_{0}) \equiv x_{0}\) for \(t = s\) and \[ u_{\lambda}(t, s; x_{0}) \equiv x^{\lambda}_{m}(s; x_{0}) \] for \(t \in (s + t^{\lambda}_{m - 1}, s + t^{\lambda}_{m}] \cap (s, T]\), where \(m =1, 2, 3, \dots , N^{\lambda}\) , and \(x^{\lambda}_{m}(s; x_{0}) = x^{\lambda}_{m}\) and \(N^{\lambda}\) comes from Lemma \ref{L:AE}. \end{theorem} \begin{corollary} \label{C:A} If a multi-valued nonlinear operator \(A: D(A) \subset X \to X\) is dissipative and satisfies \[ \lim_{\lambda \to 0}\frac{d(\mathop{\rm Ran}(I - \lambda A), x)}{\lambda} = 0 \] uniformly for \(x \in \overline{D(A)} \), then \(A\) generates a nonlinear contraction semigroup. \end{corollary} In some sense, Corollary \ref{C:A} is a result that lies between the Theorem of Crandall-Liggett \cite{Cra} and that of Kobayashi \cite{Kob}. \section{Preliminaries} \label{S:B} For the rest of this article, \(K\) denotes a generic constant, which can vary with different occasions. The following proofs of Lemmas \ref{L:BA}, \ref{L:BB}, and \ref{L:BC} are given for \(A(t)\) satisfying (A1) and (R5). For \(A(t)\) satisfying other hypotheses, the proofs are similar and left to the reader. \begin{lemma} \label{L:BA} \(\hat{E}(t) \subset \overline{D(A(t))}\) holds. \end{lemma} \begin{proof} As in \cite[Page 435]{Crand}, for \(x \in \hat{E}(t)\), the definition of \(\hat{E}(t)\) implies \begin{align*} &\limsup_{\mu \to 0}\|J_{\mu}(t)(\mu \epsilon^{\mu, x, t} + x) -x\|\\ &\leq (\limsup_{\mu \to 0}\|\frac{J_{\mu}(t)(\mu \epsilon^{\mu, x, t} + x) - x}{\mu}\|) \limsup_{\mu \to 0}\mu \to 0\,. \end{align*} Since \(J_{\mu}(t)(\mu \epsilon^{\mu, x, t} + x) \in D(A(t))\), we have \(x \in \overline{D(A(t))}\) and \(\hat{E}(t) \subset \overline{D(A(t))}\). \end{proof} \begin{lemma} \label{L:BB} For \(\{x, \mu_{0}, \mu, \epsilon^{\mu, x, t}\}\) with \(x \in D(A(t))\), \[ \|J_{\mu}(t)(\mu \epsilon^{\mu, x, t} + x) - x\| \leq (1 - \mu \omega)^{-1}(\mu g_{1}(\mu) + \mu \inf_{y \in A(t)x}\|y\|), \] \([[A(t)x]] \leq \inf_{y \in A(t)x}\|y\|\), and \(D(A(t)) \subset \hat{E}(t) \). \end{lemma} \begin{proof} For \(x \in D(A(t))\), we have \begin{align*} \|J_{\mu}(t)(\mu\epsilon^{\mu, x, t} + x) - x\| &= \|J_{\mu}(t)(\mu \epsilon^{\mu, x, t} + x)- J_{\mu}(t)(I - \mu A(t))x\|\\ &\leq (1 - \mu\omega)^{-1}(\mu g_{1}(\mu) + \mu \inf_{y \in A(t)x}\|y\|), \end{align*} which gives \(x \in \hat{E}(t)\). The proof is complete. \end{proof} \noindent\textbf{Remark.} For \(\{x, \mu, \{\mu_{1}\}, \epsilon^{\mu, x, t} \}\) with \(x \in D(A(t))\), \[ \|J_{\mu_{1}}(t)(\mu_{1}\epsilon^{\mu, x, t} + x) - x\| \leq (1 - \mu_{1}\omega)^{-1}(\mu_{1}\mu + \mu_{1}\inf_{y \in A(t)x}\|y\|), \] \([[A(t)x]] \leq \inf_{y \in A(t)x}\|y\|\), and \(D(A(t)) \subset \hat{E}(t)\). The case where \(\{x, \nu, \mu_{0}, \mu, \epsilon^{\nu, x, t}\}\) is given is similarly treated. \begin{lemma} \label{L:BC} The set \(\hat{E} \equiv \hat{E}(t)\) is constant in \(t\) and \(\overline{\hat{E}} = \overline{D(A(t))} = \overline{D}\) holds if (T4) or (T5) %(T52) or (T6) holds. \end{lemma} \begin{proof} As in \cite[Page 63]{Cran}, applying (R5) to (T4), dividing the inequality in (T4) by \(\mu\), and letting \(\mu \to 0\), we have \begin{align*} &\limsup_{\mu \to 0}\|\frac{J_{\mu}(t)(\mu \epsilon^{\mu, x, t} + x) - x}{\mu}\|\\ &\le \limsup_{\mu \to 0}\|\frac{J_{\mu}(\tau)(\mu \epsilon^{\mu, x, \tau} + x) - x}{\mu}\| + \|f(t) - f(\tau)\| \limsup_{\mu \to 0}L(\|\mu \epsilon^{\mu,x, \tau} + x\|) \end{align*} or \begin{align*} \limsup_{\mu \to 0}\|\frac{J_{\mu}(t)(\mu \epsilon^{\mu, x, t} + x) - x}{\mu}\| &\le\limsup_{\mu \to 0}\|\frac{J_{\mu}(\tau)(\mu \epsilon^{\mu, x, \tau} + x) - x}{\mu}\| \\ &+ \|f(t) - f(\tau)\| \limsup_{\mu \to 0}L(\|J_{\mu}(\tau)(\mu \epsilon^{\mu, x, \tau} + x)\|); \end{align*} similarly, from (R5) and (T5) or (T6), we have \begin{align*} &\limsup_{\mu \to 0}\|\frac{J_{\mu}(t)(\mu \epsilon^{\mu, x, t} + x) - x}{\mu}\|\\ &\leq \limsup_{\mu \to 0}\|\frac{J_{\mu}(\tau)(\mu \epsilon^{\mu, x, \tau} + x) - x}{\mu}\| + \|f(t) - f(\tau)\| \limsup_{\mu \to 0} L(\|\mu \epsilon^{\mu, x, \tau} + x\|)\\ &\times (1 +\limsup_{\mu \to 0} \|\frac{J_{\mu}(\tau)(\mu \epsilon^{\mu, x, \tau} + x) - x}{\mu}\|) \end{align*} or \begin{align*} \limsup_{\mu \to 0}\|\frac{J_{\mu}(t)(\mu \epsilon^{\mu, x, t} + x) - x}{\mu}\| &\leq \limsup_{\mu \to 0}\|\frac{J_{\mu}(\tau)(\mu \epsilon^{\mu, x, \tau} + x) - x}{\mu}\| \\ &\quad +\|f(t) - f(\tau)\| \limsup_{\mu \to 0} L(\|J_{\mu}(\tau) (\mu \epsilon^{\mu, x, \tau} + x)\|)\\ &\quad \times (1 +\limsup_{\mu \to 0} \|\frac{J_{\mu}(\tau)(\mu \epsilon^{\mu, x, \tau} + x) - x}{\mu}\|). \end{align*} Each inequality above implies that \(\hat{E}\) is constant in \(t\). Finally, use Lemmas \ref{L:BA} and \ref{L:BB}. Here note that if \[ \limsup_{\mu \to 0}\|\frac{J_{\mu}(\tau)(\mu \epsilon^{\mu, x, \tau} + x) - x}{\mu}\| \] is finite, then \[ \limsup_{\mu \to 0}\|J_{\mu}(\tau)(\mu \epsilon^{\mu, x, \tau} + x)\| = \|x\| \] is finite. \end{proof} \noindent\textbf{Remark.} To prove Lemma \ref{L:BC} in the case of \(A(t)\) satisfying (H2) or (H3) or (H4), we employ (T1) or (T2) or (T3) or (T4), respectively. \smallskip \noindent\textbf{Remark.} We can take \(E = \overline{D}\) and \(\epsilon^{\mu, x, t} = 0\) in (R5) if (x) holds; this is the case in Crandall-Pazy \cite{Cran}, where note \(\limsup = \lim\). \smallskip As in \cite{Lin2}, we now use the theory of difference equations \cite{Mic}. Let \(b : D(b) \subset \mathbb{R} \to \mathbb{R}\) be a real-valued function defined on \(D(b) = \mathbb{N}\cup \{0\}\). Let \(b_{n} = b(n)\) for \(n \in D(b)\) and call \(\{b_{n}\} = \{ b_{n} \}_{n = 0}^{\infty} \) a sequence of real numbers. Here \(b_{n} \in \mathbb{R}\) is the n-th term or the general term of the sequence \(\{b_{n}\}_{n = 0}^{\infty}\). For such a sequence \(\{b_{n}\}\), we define \(b_{n} \equiv 0\) for negative integers \(n\) for later use. Let \(S\) be the set of all such sequences. (Thus, for \(\{a_{n}\} \in S\), \(0 = a_{-1} = a_{-2} = \dots\) ). Define a right shift operator \(E : S \to S\) by \[ E\{b_{n}\} \equiv \{d_{n}\} \] for \(\{b_{n}\} = \{b_{n}\}_{n = 0}^{\infty} \in S\), where \(\{d_{n}\} = \{d_{n}\} _{n = 0}^{\infty} \in S\) with \(d_{n} = b_{n + 1}\); thus \[ E\{b_{n}\} = E\{b_{n}\}_{n = 0}^{\infty} = \{d_{n}\} = \{d_{n}\}_{n = 0}^{\infty} = \{b_{n + 1}\} = \{b_{n + 1}\}_{n = 0}^{\infty}. \] For \(c \in \mathbb{R}\) and \(c \ne 0\), define the operator \((E - c)^{*} : S \to S\) by \[ (E - c)^{*}\{b_{n}\}_{n = 0}^{\infty} \equiv \{a_{n}\}_{n = 0}^{\infty} \] for \(\{b_{n}\}_{n = 0}^{\infty} \in S\), where \(a_{0} \equiv 0\) and \[ a_{n} \equiv c^{n}\sum_{i = 0}^{n - 1} \frac{b_{i}}{c^{i + 1}} \] for \(n = 1, 2, 3, \dots \). Here for convenience, the range of \(n\), \(\{0\}\cup \mathbb{N}\) will be suppressed for the rest of the paper. Thus \[ (E - c)^{*}\{b_{n}\} \equiv \{a_{n}\}. \] It will be seen from below that \((E - c)^{*}\) acts approximately as the inverse of \((E - c)\). We also define the left shift operator \(E^{\#}\) by \[ E^{\#}\{b_{n}\} \equiv \{b_{n - 1}\} \] for \(\{b_{n}\} = \{b_{n}\}_{n = 0}^{\infty} \in S\). Here note \(0 = b_{-1} = b_{-2} = b_{-3} = \dots\). Similarly define \[ E^{i \#}\{b_{n}\} \equiv (E^{\#})^{i}\{b_{n}\} = \{b_{n - i}\} \] for \(\{b_{n}\} \in S\). Note \(b_{n - i} = 0\) for \(n < i\). \[ (E - c)^{i *} \equiv ((E - c)^{*})^{i} \] for \(i \in \mathbb{N}\) is defined in an obvious way. For convenience, we also define \[ (E - c)^{0}\{b_{n}\} \equiv \{b_{n}\} \] for \(\{b_{n}\} \in S\). For later use, we collect, from \cite{Lin2, Lin3}, the following results, except for Proposition \ref{P:ZA}, which is new and will be proved below. \begin{lemma} \label{L:D} Let \(\{b_{n}\}_{n = 0}^{\infty}, \{d_{n}\}_{n = 0}^{\infty}\) be two sequences of real numbers, with the general terms \(b_{n}\) and \(d_{n}\), respectively. Then the following hold: \begin{gather*} (E - c)^{*}(E - c)\{b_{n}\} = \{b_{n} - c^{n}b_{0}\},\\ (E - c)(E - c)^{*}\{b_{n}\} = \{b_{n}\}, \\ (E - c)^{*}\{b_{n}\} \leq (E - c_{1})^{*}\{b_{n}\} \end{gather*} for \(0 < c \leq c_{1}\) and positive \(\{b_{n}\}\), and \[ (E - c)^{*}\{b_{n}\} \leq (E - c)^{*}\{d_{n}\} \quad \text{for } c > 0 \quad \text{and } \{b_{n}\} \leq \{d_{n}\}. \] \end{lemma} \noindent\textbf{Remark.} Here \(\{b_{n}\} \leq \{d_{n}\}\) means \(b_{n} \leq d_{n}\) for \(n = 0, 1, 2, \dots \). \begin{proposition} \label{P:A} Let \(\xi, c \in \mathbb{R}$, $d = 1 - c$, $c \ne 1\), and \(c \ne 0\). Let $\{ n \}_{n = 0}^{\infty}$, $\{c^{n}\}_{n = 0}^{\infty}$, $\{\xi\}_{n = 0}^{\infty}$ be three sequences of real numbers, with the general terms \(n, c^{n},\) and \(\xi\), respectively. Then the following equalities hold \begin{gather*} (E - c)^{*}\{n\} = \{\frac{n}{d} - \frac{1}{d^{2}} + \frac{c^{n}}{d^{2}}\},\\ (E - c)^{*} \{\xi\} = \{\frac{\xi}{d} - \frac{\xi c^{n}}{d}\},\\ (E - c)^{i *}\{c^{n}\} = \{\binom{n}{i}c^{n - i}\}. \end{gather*} Here \(i = 0, 1, 2, \dots \), and \(\binom{n}{i} \equiv 0\) for \(n = 0\) or \(n < i\). \end{proposition} \begin{proposition} \label{P:B} Let \(\xi, c \in \mathbb{R}\), $d = 1 - c$, $c \ne 1$, and \(c \xi \ne 0\). Let $ \{ n \xi^{n} \}_{n = 0}^{\infty}$, $\{\xi^{n}\}_{n = 0}^{\infty}$, and \(\{(c \xi)^{n}\}_{n = 0}^{\infty}\) be three sequences of real numbers, with the general terms \(n \xi^{n}, \xi^{n}\), and \((c \xi)^{n}\), respectively. The following equalities hold \begin{gather*} (E - c \xi)^{*}\{n \xi^{n}\} = \{(\frac{n\xi^{n}}{d} - \frac{\xi^{n}}{d^{2}} + \frac{c^{n}\xi^{n}}{d^{2}})\frac{1}{\xi}\}, \\ (E - c\xi)^{*} \{\xi^{n}\} = \{(\frac{\xi^{n}}{d} - \frac{c^{n}\xi^{n}}{d})\frac{1}{\xi}\}, \\ (E - c \xi)^{i *}\{(c \xi)^{n}\} = \{\binom{n}{i}(c \xi)^{n - i}\}. \end{gather*} Here \(i = 0, 1, 2, \dots \), and \(\binom{n}{i} \equiv 0\) for \(n = 0\) or \(n < i\). \end{proposition} \noindent\textbf{Remark.} In \cite{Lin2}, the last equality in Proposition \ref{P:A} or \ref{P:B} has the restriction \(i \leq n\) on its right side. But this restriction is unnecessary from the proof in \cite{Lin2} if we use the convention \(\binom{n}{i} = 0\) for \(n = 0\) or \(n < i\). Thus \[ \{a_{n}\} = \{a_{n}\}_{n = 0}^{\infty} \equiv \{\binom{n}{2}(c \xi)^{n - 2}\} = \{\binom{n}{2}(c \xi)^{n - 2}\}_{n = 0}^{\infty} \] is a sequence with \(0 = a_{1} = a_{0} = a_{-1} = a_{-2} =\dots\). \begin{lemma} \label{L:AB} For $ \alpha, \beta > 0$, $\alpha + \beta = 1$, and \(m \in \mathbb{N}\), \[ ((E - \beta \gamma)^{*})^{m}\{n \gamma^{n}\} = \{\frac{n \gamma^{n}}{\alpha^{m}}\frac{1}{\gamma^{m}} - \frac{m \gamma^{n}}{\alpha^{m + 1}}\frac{1}{\gamma^{m}} + (\sum_{i = 0}^{m - 1} \binom{n}{i}\frac{\beta^{n - i}}{\alpha^{m + 1 - i}}(m - i)\frac{1}{\gamma^{m}}) \gamma^{n}\}. \] \end{lemma} \begin{lemma} \label{L:AC} With the notation in Proposition \ref{P:B}, \begin{align*} ((E - c \xi)^{*})^{j}\{\xi^{n} \} &= \{\frac{\xi^{n}}{(d \xi)^{j}} - \sum_{i = 0}^{j - 1}\binom{n}{i}(c \xi)^{n - i}\frac{1}{(d \xi)^{j - i}}\}\\ &= \{(\frac{1}{d^{j}} - \frac{1}{d^{j}}\sum_{i = 0}^{j - 1}\binom{n}{i} c^{n - i}d^{i})\xi^{n - j}\} \\ &= \{(\frac{1}{d^{j}}\sum_{i = j}^{n}c^{n - i}d^{i})\xi^{n - j}\} \end{align*} for \(j \in \mathbb{N}\), $\xi, c \in \mathbb{R}$, $d = 1 - c$, $c \ne 1$, $c \xi \ne 0$. \end{lemma} \begin{lemma} \label{L:AH} For $ \alpha > 0$, $0 < \beta = 1 - \alpha$, $\gamma > 1$, and \(\{a_{n}\}\) a sequence in \(S\), The following holds: \begin{align*} ((E - \gamma \beta)^{*}E)\{a_{n}\} &= (E - \gamma \beta)^{*}((E - \gamma \beta) + \gamma \beta)\{a_{n}\}\\ &= \{a_{n}\} - \{(\gamma \beta)^{n}a_{0}\} + (\gamma \beta)(E - \gamma \beta)^{*}\{a_{n}\}, \end{align*} $ (E - \gamma \beta)^{*}\{a_{n}\}$ has the first term zero, corresponding to \(n = 0\), \begin{gather*} ((E - \gamma \beta)^{*}E)^{m}\{n \gamma^{n}\} = \sum_{i = 0}^{m}\binom{m}{i}((\gamma \beta)(E - \gamma \beta)^{*})^{i} \{n \gamma^{n}\}, \\ ((E - \gamma \beta)^{*}E)^{m}\{n^{2}\gamma^{n}\} = \sum_{i = 0}^{m}\binom{m}{i}((\gamma \beta)(E - \gamma \beta)^{*})^{i}\{n^{2} \gamma^{n}\}, \end{gather*} \begin{align*} \sum_{i = 0}^{m - 1}((\gamma \alpha)(E - \gamma \beta)^{*}E)^{i} \{(\gamma \beta)^{n}\} &= \{\gamma^{n}\sum_{i = 0}^{m - 1}\gamma^{i}\beta^{n} \binom{n + i - 1}{i}\alpha^{i}\} \\ &\leq \{\gamma^{n} \gamma^{m}\sum_{i = 0}^{m - 1}\alpha^{i} \binom{n + i - 1}{i} \beta^{n}\}, \end{align*} and \begin{align*} (\gamma \alpha)^{j - 1}((E - \gamma \beta)^{*}E)^{j}\{\gamma^{n}\} &= \{\alpha^{-1}\gamma^{j - 1}\gamma^{n}[1 - \sum_{i = 1}^{j} \binom{n + i - 2}{i - 1} \alpha^{i - 1}\beta^{n}]\} \\ & = \{\alpha^{-1}\gamma^{j - 1}\gamma^{n}(\sum_{i = j + 1} ^{\infty}\binom{n + i - 2}{i - 1}\alpha^{i - 1}\beta^{n})\} \\ &\leq \{\alpha^{-1}\gamma^{j - 1}\gamma^{n}\} \end{align*} for \(m = 0, 1, 2, 3, \dots \), and \(j = 1, 2, 3, \dots\). \end{lemma} \noindent\textbf{Remark.} The fifth equality in the above lemma can be proved as the fourth equality is although it is not proved in \cite{Lin3}. \begin{proposition} \label{P:ZA} The following holds, where $\beta , \gamma > 0$, $\alpha + \beta = 1$, $m \in \mathbb{N}$: \begin{gather*} (E - \beta)^{*}\{n^{2}\} = \{\frac{n^{2}}{\alpha} - \frac{2n}{\alpha^{2}} + \frac{1}{\alpha^{2}} - \frac{1}{\alpha^{2}}\beta^{n}\}, \\ (E - \beta \gamma)^{*}\{n^{2}\gamma^{n}\} = \gamma^{n - 1}(E - \beta) ^{*}\{n^{2}\}, \end{gather*} \begin{align*} (E - \beta)^{m *}\{n^{2}\} &= \big\{\frac{n^{2}}{\alpha^{m}} - \frac{(2m)n}{\alpha^{m + 1}} + (\frac{m(m - 1)}{\alpha^{m + 2}} + \frac{m}{\alpha^{m + 1}})\\ &\quad - \sum_{j = 0}^{m - 1}(\frac{(m - j)(m - j - 1)} {\alpha^{m - j + 2}} + \frac{(m - j)}{\alpha^{m - j + 1}})\binom{n}{j} \beta^{n - j}\big\}, \end{align*} and \[ (E - \beta \gamma)^{m *}\{n^{2}\gamma^{n}\} = \gamma^{n - m}(E - \beta)^{m *}\{n^{2}\}. \] \end{proposition} \begin{proof} By definition and Proposition \ref{P:A}, we have \[ \{\beta^{n}(\frac{1}{\beta^{2}} + \frac{2}{\beta^{3}} + \dots + \frac{n - 1}{\beta^{n}})\} = (E - \beta)^{*}\{n\} = \{\frac{n}{\alpha} - \frac{1} {\alpha^{2}} + \frac{1}{\alpha^{2}}\beta^{n}\}. \] Differentiation with respect to \(\beta\) of the above gives the first identity in the Proposition. The second identity follows easily from the definition. The first and second identities gives \[ (E - \beta \gamma)^{*}\{n^{2} \gamma^{n}\} = \{(\frac{n^{2}\gamma^{n}} {\alpha} - \frac{2n \gamma^{n}}{\alpha^{2}} + \frac{1}{\alpha^{2}}\gamma^{n} - \frac{1}{\alpha^{2}}(\beta \gamma)^{n})\frac{1}{\gamma}\}. \] Applying \((E - \beta \gamma)^{*}\) to both sides and using Proposition \ref{P:B}, we have \begin{align*} &(E - \beta \gamma)^{2 *}\{n^{2} \gamma^{n}\} \\ &= \{(\frac{n^{2}}{\alpha^{2}} - \frac{4n}{\alpha^{3}} + (\frac{2}{\alpha^{4}} + \frac{2}{\alpha^{3}}) - (\frac{2}{\alpha^{4}} + \frac{2}{\alpha^{3}})\beta^{n} - \frac{1}{\alpha^{2}}\binom{n}{1}\beta^{n})\gamma^{n} \frac{1}{\gamma^{2}}\}. \end{align*} Repeating this operation gives \begin{align*} (E - \beta \gamma)^{3 *}\{n^{2} \gamma^{n}\} &= \{(\frac{n^{2}}{\alpha^{3}} - \frac{6n}{\alpha^{4}} + (\frac{6}{\alpha^{5}} + \frac{3}{\alpha^{4}}) - (\frac{6}{\alpha^{5}} + \frac{3}{\alpha^{4}}) \beta^{n} \\ &\quad - (\frac{2}{\alpha^{4}} + \frac{2}{\alpha^{3}})\binom{n}{1}\beta^{n - 1} - \frac{1}{\alpha^{2}}\binom{n}{2}\beta^{n - 2})\gamma^{n}\frac{1}{\gamma^{3}}\}. \end{align*} Continuing this way, we are led to, for \(m \in \mathbb{N}\), \begin{align*} (E - \beta \gamma)^{m *}\{n^{2} \gamma^{n}\} & = \big\{(\frac{n^{2}}{\alpha^{m}} - \frac{(2m)n}{\alpha^{m + 1}} + (\frac{\chi_{m}}{\alpha^{m + 2}} + \frac{m}{\alpha^{m + 1}})\\ &\quad - \sum_{i = 1}^{m}(\frac{\chi_{i}}{\alpha^{i + 2}} + \frac{i}{\alpha^{i + 1}}) \binom{n}{m - i}\beta^{n - (m - i)})\gamma^{n}\frac{1}{\gamma^{m}}\big\}, \end{align*} where \(\chi_{m}\) satisfies $\chi_{0} = 0$, $\chi_{1} = 0$, $\chi_{2} = 2$, and \[ \chi_{m} = \chi_{m - 1} + 2(m - 1). \] The theory of difference equation gives \(\chi_{m} = m(m - 1)\), which , together with the substitution \(j = m - i\), plugged into the above long identity gives the last two identities in the Proposition. \end{proof} \section{Intermediate results under hypothesis (H1)} \label{S:C} For the rest of the paper, we shall assume that the function \(L\) in (T4) or its similar conditions (e.g. (T5), (T6) and so on) takes the form \[ L(\|y\|). \] The proof for the other case where \(L(\|J_{\mu}(\tau)y\|)\) holds is similar. \begin{lemma} \label{L:CB} Under (A1) and (R5), for \(x_{0} \in E\) and \(0 < \mu \leq \mu_{0}\), there are \( x^{\mu}_{i} = x^{\mu}_{i}(x_{0}) \in D(A(t_{i}))\), depending on \(x_{0}\), and \(\epsilon^{\mu, x^{\mu}_{i - 1}, t_{i}} \in X\), \(i = 0, 1, 2, \dots,\) such that \[ x^{\mu}_{i} = J_{\mu}(t_{i})(\mu \epsilon^{\mu, x^{\mu}_{i - 1}, t_{i}} + x^{\mu}_{i - 1}) \] holds for all \(0 \leq t_{i} \leq T\) with \(\|\epsilon^{\mu, x^{\mu}_{i - 1}, t_{i}}\| \le g_{1}(\mu)\). Here \(x^{\mu}_{0} \equiv x_{0}\). \end{lemma} \begin{proof} Start with \(x = x_{0}\) in (R5), we have \[ x^{\mu}_{1} = J_{\mu}(t_{1})(\mu \epsilon^{\mu, x_{0}, t_{1}} + x_{0}) \] for all \(0 \leq t_{1} \leq T,\) and for some \(x^{\mu}_{1} \in D(A(t_{1}))\) and some \( \epsilon^{\mu, x_{0}, t_{1}} \in X\) with \( \|\epsilon^{\mu, x_{0}, t_{1}}\| \le g_{1}(\mu)\). Next with \(x = x^{\mu}_{1}\) in (R5), we have \[ x^{\mu}_{2} = J_{\nu}(t_{2})(\nu \epsilon^{\mu, x^{\mu}_{1}, t_{2}} + x^{\mu}_{1}) \] for all \(0 \leq t_{2} \leq T\) and for some \(x^{\mu}_{2} \in D(A(t_{2}))\) and some \(\epsilon^{\mu, x^{\mu}_{1}, t_{2}} \in X\) with \(\|\epsilon^{\mu, x^{\mu}_{1}, t_{2}}\| \le g_{1}(\mu)\). Continuing in this way we complete the proof. \end{proof} \noindent\textbf{Definition.} For \(x \in \hat{E}\) with \(A(t)\) satisfying (H1), let \[ M(x) \equiv \sup_{0 \leq t \leq T}[[A(t)x]] \equiv \sup_{0 \leq t \leq T}\limsup_{\mu \to 0}\|\frac{J_{\mu}(t) (\mu \epsilon^{\mu, x, t} + x) - x}{\mu}\|. \] Similar definition is given for \(A(t)\) satisfying (H2) or (H3), for which \[ M(x) \equiv \sup_{0 \leq t \leq T}[[A(t)x]] \equiv \sup_{0 \leq t \leq T}\limsup_{\mu \to 0} \|\frac{J_{\mu_{1}}(t)(\mu_{1} \epsilon^{\mu, x, t} + x) - x}{\mu_{1}}\|. \] \(M(x)\) is uniformly bounded for each \(x\) by Lemma \ref{L:BC}. The case for \(A(t)\) satisfying (H4) is similarly treated. \begin{lemma} \label{L:A} For \(x_{0} \in \hat{E}\) with \(A(t)\) satisfying (H1), the \(x^{\mu}_{i}\) in Lemma \ref{L:CB} satisfies \[ \|x^{\mu}_{i} - x_{0}\| \leq i \mu (1 - \mu \omega)^{-i} (M(x_{0}) + 2 g_{1}(\mu)). \] Here \(M(x_{0}) \leq K, i \in \mathbb{N},\) and \( \mu \omega < 1\). \end{lemma} \begin{proof} After \(x^{\mu}_{i}\) is chosen, we have, for \(x_{0} \in \hat{E}\), (R5) implies \(J_{\mu}(t_{i})(\mu \epsilon^{\mu, x_{0}, t_{i}} + x_{0})\) exists for some \(\epsilon^{\mu, x_{0}, t_{i}} \in X\) with \[ \|\epsilon^{\mu, x_{0}, t_{i}}\| \le g_{1}(\mu). \] With that, we have \[ \|x^{\mu}_{i} - x_{0}\| \leq \|x^{\mu}_{i} - J_{\mu}(t_{i})(\mu \epsilon^{\mu, x_{0}, t_{i}} + x_{0})\| + \|J_{\mu}(t_{i})(\mu \epsilon^{\mu, x_{0}, t_{i}} + x_{0}) - x_{0}\|. \] Note the first and the second term on the right side of the above inequality is less than or equal to \[ \gamma \|x^{\mu}_{i - 1} - x_{0}\| + 2 \gamma \mu g_{1}(\mu) \quad\text{and}\quad \mu M(x_{0}), \] respectively. Here \(\gamma \equiv (1 - \mu \omega)^{-1} > 1\). It follows that \[ \|x^{\mu}_{i} - x_{0}\| \leq \gamma \|x^{\mu}_{i - 1} - x_{0}\| + \mu M(x_{0}) + 2 \gamma \mu g_{1}(\mu)\,. \] This recursive inequality completes the proof. \end{proof} \begin{lemma} \label{L:ZA} For \(\{x_{0}, \mu_{0}, \mu, \epsilon^{\mu, x_{0}, t}\}\) and \(\{x_{0}, \mu_{0}, \lambda, \epsilon^{\lambda, x_{0}, t}\}\), where \(x_{0} \in E\) and \(\lambda \geq \mu\), \begin{align*} &(1 - \lambda \omega)\|\frac{J_{\lambda}(t)(\lambda \epsilon^{\lambda, x_{0}, t} + x_{0}) - x_{0}}{\lambda}\|\\ & \leq (1 - \mu\omega)\|\frac{J_{\mu}(t) (\mu\epsilon^{\mu, x_{0}, t} + x_{0}) - x_{0}}{\mu}\| + (1 - \mu\omega)( g_{1}(\lambda) + g_{1}(\mu)). \end{align*} Furthermore, for \(x_{0} \in \hat{E}(t)\), \[ \|\frac{J_{\lambda}(t)(\lambda\epsilon^{\lambda, x_{0}, t} + x_{0}) - x_{0}} {\lambda}\| \leq (1 - \lambda \omega)^{-1}(g_{1}(\lambda) + [[A(t)x_{0}]])\,. \] \end{lemma} \begin{proof} As in \cite[Page 61]{Cran}, \begin{align*} &\|\frac{J_{\lambda}(t)(\lambda\epsilon^{\lambda, x_{0}, t} + x_{0}) - x_{0}}{\lambda}\| \\ &\leq \frac{\mu}{\lambda}\|\frac{J_{\mu}(t)(\mu \epsilon^{\mu, x_{0}, t} + x_{0}) - x_{0}}{\mu}\| + \frac{1}{\lambda}\|J_{\mu}(t)(\mu\epsilon^{\mu, x_{0}, t} + x_{0}) - J_{\lambda}(t)(\lambda \epsilon^{\lambda, x_{0}, t} + x_{0})\|, \end{align*} where the second term of the right side is less than or equal to \[ (1 - \mu \omega)^{-1}[\frac{\lambda - \mu}{\lambda}\|\frac{J_{\lambda}(t) (\lambda \epsilon^{\lambda, x_{0}, t} + x_{0}) - x_{0}}{\lambda}\| + \mu(g_{1}(\mu) + g_{1}(\lambda))] \] by the nonlinear resolvent identity. Regrouping the terms and letting \(\mu \to 0\) we complete the proof. \end{proof} \noindent\textbf{Remark.} For \(\{x_{0}, \mu, \{\mu_{1}\}, \epsilon^{\mu, x_{0}, t} \} \) and \(\{x_{0}, \lambda, \{\lambda_{1}\}, \epsilon^{\lambda, x_{0}, t} \} \) given, where \(x_{0} \in \hat{E}(t)\), and \(\mu_{1} \leq \mu < \lambda_{1} \leq \lambda\), the above proof goes through and gives \begin{align*} &(1 - \lambda_{1}\omega)\|\frac{J_{\lambda_{1}}(t)(\lambda_{1} \epsilon^{\lambda, x_{0}, t} + x_{0}) - x_{0}}{\lambda_{1}}\|\\ & \leq (1 - \mu_{1} \omega)\|\frac{J_{\mu_{1}}(t)(\mu_{1} \epsilon ^{\mu, x_{0}, t} + x_{0}) - x_{0}}{\mu_{1}}\| + (1 - \mu_{1} \omega) (\mu + \lambda). \end{align*} Furthermore, for \(x_{0} \in \hat{E}(t)\), \[ \|\frac{J_{\lambda_{1}}(t)(\lambda_{1} \epsilon^{\lambda, x_{0}, t} + x_{0}) - x_{0}}{\lambda_{1}}\| \leq (1 - \lambda_{1} \omega)^{-1}(\lambda + [[A(t)x_{0}]])\,. \] The case where \(\{x_{0}, \nu, \mu_{0}, \mu, \epsilon^{\nu, x_{0}, t}\}\) and \(\{x_{0}, \nu', \mu_{0}', \mu', \epsilon^{\nu', x_{0}, t}\}\) are similarly treated. \begin{lemma} \label{L:ZB} For \(\{x_{n}, \mu_{0}, \mu, \epsilon^{\mu, x_{n}, t}\}\) and \(\{x_{0}, \mu_{0}, \mu, \epsilon^{\mu, x_{0}, t}\}\), where \(x_{n} \in \hat{E}(t)\), \(x_{0} \in \overline{\hat{E}(t)}\), and \(\|x_{n} - x_{0}\| \to 0\), it holds \[ [[A(t)x_{0}]] \leq \liminf_{n \to \infty}[[A(t)x_{n}]]\,. \] \end{lemma} \begin{proof} As in \cite[Page 61]{Cran}, by Lemma \ref{L:ZA}, we have \begin{align*} &\|\frac{J_{\mu}(t)(\mu \epsilon^{\mu, x_{0}, t} + x_{0}) - x_{0}}{\mu}\|\\ &\leq \|\frac{J_{\mu}(t)(\mu \epsilon^{\mu, x_{0}, t} + x_{0}) - x_{0}}{ \mu} - \frac{J_{\mu}(t)(\mu \epsilon^{\mu, x_{n}, t} + x_{n}) - x_{n}}{\mu}\| \\ &\quad + \|\frac{J_{\mu}(t)(\mu \epsilon^{\mu, x_{n}, t} + x_{n}) - x_{n}}{\mu}\|\\ &\leq \frac{1}{\mu}(1 - \mu \omega)^{-1} (\mu(g_{1}(\mu) + g_{1}(\mu)) + \|x_{n} - x_{0}\|) + \frac{1}{\mu}\|x_{n}- x_{0}\|\\ &\quad + (1 - \mu \omega)^{-1}(g_{1}(\mu)+ [[A(t)x_{n}]]). \end{align*} Letting \(n \to \infty\) and then \(\mu \to 0\) we complete the proof. \end{proof} \noindent\textbf{Remark.} In the case where \(\{x_{n}, \mu, \{\mu_{1}\}, \epsilon^{\mu, x_{n}, t}\}\) and \(\{x_{0}, \mu, \{\mu_{1}\}, \epsilon^{\mu, x_{0}, t}\} \) are given, the above proof goes through and gives the same result. Also the case associated with (R7) is similarly treated. \begin{lemma} \label{L:CC} For \(x_{0} \in \hat{E}\) with \(A(t)\) satisfying (A1), (A2), and (R5), the functions \(x^{\mu}_{i} = x^{\mu}_{i}(s; x_{0})\) in Lemma \ref{L:CB}, depending on \(s, x_{0}\), for \(0 \leq s \leq T \) and \(t_{i} = s + i \mu \leq T\), satisfy \begin{gather*} \frac{\|x^{\mu}_{i} - x^{\mu}_{i - 1}\|}{\mu} \leq K\,,\\ M(x^{\mu}_{i}) \equiv \sup_{0 \leq t \leq T}[[A(t)x^{\mu}_{i}]] \leq K \end{gather*} if (T5) holds. Here \(\mu \omega < 1\). Furthermore, \(M(x^{\mu}_{i}) \leq K\) if(T6) holds. \end{lemma} \begin{proof} In the case of (T5), we have \begin{align*} \frac{\|x^{\mu}_{i} - x^{\mu}_{i - 1}\|}{\mu} &= \frac{\|J_{\mu}(t_{i})(\mu \epsilon^{\mu, x^{\mu}_{i - 1}, t_{i}} + x^{\mu}_{i - 1}) - J_{\mu}(t_{i - 1})(\mu \epsilon^{\mu, x^{\mu}_{i - 2}, t_{i - 1}} + x^{\mu}_{i - 2})\|}{\mu} \\ & \leq (1 - \mu \omega)^{-1}[(2 g_{1}(\mu) + \frac{\|x^{\mu}_{i - 1} - x^{\mu}_{i - 2}\|}{\mu}) \\ &\quad + \|f(t_{i}) - f(t_{i - 1})\|L(\|\mu \epsilon^{\mu, x^{\mu}_{i - 2}, t_{i - 1}} + x^{\mu}_{i - 2}\|)(1 + \frac{\|x^{\mu}_{i - 1} - x^{\mu}_{i - 2}\|}{\mu})]. \end{align*} It follows that by Lemma \ref{L:A}, \[ a_{i} \leq c_{i}a_{i - 1} + b_{i}\,, \] where \(a_{i} = \frac{\|x^{\mu}_{i} - x^{\mu}_{i - 1}\|}{\mu}, \) \[ d_{i} = \|f(t_{i}) - f(t_{i - 1})\| L(\mu g_{1}(\mu) + KT(M(x_{0}) + 2 g_{1}(\mu) + \|x_{0}\|)), \] $ c_{i} = (1 - \mu \omega)^{-1}(1 + d_{i})$ , and $ b_{i} = (1 - \mu \omega)^{-1} [(2 g_{1}(\mu) +d_{i}]$. This recursive inequality gives \[ \|\frac{x^{\mu}_{i} - x^{\mu}_{i - 1}}{\mu}\| \leq K\,. \] Note that by \cite[Page 65]{Cran}, \begin{gather*} a_{i} \leq (\prod_{j = 1}^{i}c_{j})a_{0} + \sum_{k = 1}^{i}(\prod_{j = k + 1}^{i}c_{j})b_{k}, \\ \prod_{j = 1}^{i}(1 + d_{j}) \leq \exp(\sum_{j = 1}^{i}d_{j}) \end{gather*} and that \(f\) is of bounded variation over \([0, T]\). Since \(\frac{x^{\mu}_{i} - (\mu \epsilon^{\mu, x^{\mu}_{i - 1}, t_{i}} + x^{\mu}_{i - 1})}{\mu} \in A(t_{i})x^{\mu}_{i}\), we have \[ [[A(t_{i})x^{\mu}_{i}]] \leq \|\frac{x^{\mu}_{i} - (\mu \epsilon^{\mu, x^{\mu}_{i - 1}, t_{i}} + x^{\mu}_{i - 1})}{\mu}\| \leq g_{1}(\mu) + \|\frac{x^{\mu}_{i} - x^{\mu}_{i - 1}}{\mu}\| \leq g_{1}(\mu) + K \] by Lemma \ref{L:BB}. As the proof of Lemma \ref{L:BC} about \(\hat{E}(t)\) begin constant in \(t\), we have \[ M(x^{\mu}_{i}) \equiv \sup_{0 \leq t \leq T}[[A(t)x^{\mu}_{i}]] \leq K. \] In the case of (T6), we follow \cite[Pages 65-66]{Cran}. As above, by Lemma \ref{L:BB}, \[ [[A(t_{i})x^{\mu}_{i}]] \leq g_{1}(\mu) + \|\frac{x^{\mu}_{i} - x^{\mu}_{i - 1}}{\mu}\|, \] which by Lemma \ref{L:ZA} is less than or equal to \[ g_{1}(\mu) + ([[A(t_{i})x^{\mu}_{i - 1}]] + g_{1}(\mu))(1 - \mu \omega)^{-1}, \] where \([[A(t_{i})x^{\mu}_{i - 1}]]\), in turn, by the proof of Lemma \ref{L:BC} about \(\hat{E}(t)\) being constant in \(t\), this expression is less than or equal to, \[ [[A(t_{i - 1})x^{\mu}_{i - 1}]] + \|f(t_{i}) - f(t_{i - 1})\| L(\|\mu \epsilon^{\mu, x^{\mu}_{i - 1}, t_{i - 1}} + x^{\mu}_{i - 1}\|)(1 + [[A(t_{i - 1})x^{\mu}_{i - 1}]]). \] The rest of the proof is the same as that for the case of (T5). \end{proof} \section{The proof of the main results} \label{S:D} We use the notion about difference equations introduced in Section \ref{S:B}. Let \(E_{1}\) be a right shift operator acting on the first index of a doubly indexed sequence of real numbers; that is, let \[ E_{1}\{\rho_{m, n} \} = \{\rho_{m, n}\}_{m, n = 0}^{\infty} \equiv \{\rho_{m + 1, n} \} = \{\rho_{m + 1, n}\}_{m, n = 0}^{\infty} \] for \(\{\rho_{m, n} \} = \{\rho_{m, n} \}_{m, n = 0}^{\infty} \in \overline{S}.\) Here \(\overline{S}\) is the set of all doubly indexed sequences of real numbers \(\{\rho_{m, n}\}_{m, n = 0}^{\infty}\) with \[ \rho_{m, n} = 0 \] for negative integer \(m\) or \(n\). Similarly define \(E_{2}\) by \[ E_{2}\{\rho_{m, n} \} \equiv \{\rho_{m, n + 1} \} \] for \(\{\rho_{m, n} \} \in \overline{S}\). Thus \(E_{2}\) acts on the second index. It is easy to see that for a doubly indexed sequence \(\{\rho_{m, n}\} = \{\rho_{m, n}\}_{m, n = 0}^{\infty} \in \overline{S}\), \(E_{1}E_{2} \{\rho_{m, n}\} = E_{2}E_{1} \{\rho_{m, n}\}\) holds. \begin{lemma} \label{L:DA} (R1) implies (R2). \end{lemma} \begin{proof} This basically follows from \cite[Page 142.]{Mi}. Let \(\lambda > 0\). For \(x \in E\), we have by (R1) that there is a \(\lambda_{1} = \lambda_{1}(\lambda) > 0\) which is independent of \(x, t\),with \(0 < \lambda_{1} \leq \lambda\), such that \[ d(\mathop{\rm Ran}(I - \lambda_{1} A), x) < \frac{\lambda_{1} \lambda}{2} \] holds for all \(x \in E\) and \(t \in [0, T] \). For this \( \frac{\lambda_{1} \lambda}{2} > 0\), we have by the definition of distance in (vi) that there are \(x^{\lambda, t} = x^{\lambda_{1}, \lambda, t} \in D(A(t))\) and \(y^{\lambda, t} = y^{\lambda_{1}, \lambda, t} \in A(t)x^{\lambda, t}\), such that \[ \|x^{\lambda, t} - x - \lambda_{1}y^{\lambda, t}\| < \frac{\lambda_{1} \lambda}{2} + \frac{\lambda_{1} \lambda}{2} = \lambda_{1} \lambda\,. \] By letting \(\epsilon^{\lambda, x, t} = \frac{x^{\lambda, t} - x - \lambda_{1} y^{\lambda, t}}{\lambda_{1}}\), the proof is complete. \end{proof} \begin{lemma} \label{L:AD} Let \(0 \leq s \leq T\), \(x_{0} \in E\), and let (R2) hold. Then for each \(\lambda > 0\), there exists \(\lambda_{1} = \lambda_{1}(\lambda) > 0 \), which is independent of \(x \in E, t\), with \(0 < \lambda_{1} \leq \lambda\), and exist \(t^{\lambda}_{k} = k \lambda_{1}, x^{\lambda}_{k} = x^{\lambda_{1}, \lambda, t}_{k}\), and \(y^{\lambda}_{k} = y^{\lambda_{1}, \lambda, t}_{k}\), \(k = 1, 2, \dots\), such that \(x^{\lambda}_{k} \in D(A(t)), y^{\lambda}_{k} \in A(t)x^{\lambda}_{k}\), and \begin{gather*} 0 = t^{\lambda}_{0} < t^{\lambda}_{1} < \dots < t^{\lambda}_{k} < t^{\lambda}_{k + 1} < \dots, \lim_{k \to \infty}t^{\lambda}_{k} = \infty, \\ t^{\lambda}_{k} - t^{\lambda}_{k - 1} = \lambda_{1} \leq \lambda, \\ \|x^{\lambda}_{k} - x^{\lambda}_{k - 1} - (t^{\lambda}_{k} - t^{\lambda}_{k - 1})y^{\lambda}_{k}\| < (t^{\lambda}_{k} - t^{\lambda}_{k - 1})\lambda. \end{gather*} \end{lemma} \begin{proof} Compare with \cite[Page 142]{Mi}. Now starting with \(x = x_{0}\), \(\mu = \lambda > 0\), and \(\mu_{1} = \lambda_{1}\) in (R2), we have \(x^{\lambda}_{1} \in D(A(t)), y^{\lambda}_{1} \in A(t)x^{\lambda}_{1}\), such that \[ x^{\lambda} - \lambda_{1}y^{\lambda} = x_{0} + \lambda_{1} \epsilon^{\lambda, x_{0}, t} \] holds with \(\|\epsilon^{\lambda, x_{0}, t}\| < \lambda\). Next with \(x = x^{\lambda}_{1} \in D(A(t)), \mu = \lambda\), and \(\mu_{1} = \lambda_{1}\) in (R2), we have \(x^{\lambda}_{2} \in D(A(t)), y^{\lambda}_{2} \in A(t)x^{\lambda}_{2}\), such that \[ x^{\lambda}_{2} - \lambda_{1}y^{\lambda}_{2} = x^{\lambda}_{1} + \lambda_{1} \epsilon^{\lambda, x^{\lambda}_{1}, t} \] with \(\|\epsilon^{\lambda, x^{\lambda}_{1}, t}\| < \lambda\). Continuing this way, we have $x^{\lambda}_{k} \in D(A(t)), y^{\lambda}_{k} \in A(t)x^{\lambda}_{k}$, $(k = 1, 2, 3, \dots)$, satisfying \[ x^{\lambda}_{k} - \lambda_{1} y^{\lambda}_{k} = x^{\lambda}_{k - 1} + \lambda_{1}\epsilon^{\lambda, x^{\lambda}_{k - 1}, t} \] with \(\|\epsilon^{\lambda, x^{\lambda}_{k - 1}, t}\| < \lambda\). Define \(t^{\lambda}_{k} = k \lambda_{1}\), and the proof is complete. \end{proof} \begin{lemma} \label{L:AE} Let $T > s \geq 0$, $x_{0} \in E$, $0 < \lambda < \lambda_{0}$. Under Lemma \ref{L:AD}, there exist $0 < \lambda_{1} = \lambda_{1}(\lambda) \leq \lambda$, $N^{\lambda} \in \mathbb{N}$, $t^{\lambda}_{k} = k \lambda_{1} \geq 0$, $x^{\lambda}_{k} = x^{\lambda_{1}, \lambda, s + t^{\lambda}_{k}}_{k} \in D(A(s + t^{\lambda}_{k}), \epsilon^{\lambda, x^{\lambda}_{k}, s + t^{\lambda}_{k}}\in E$, $k = 0, 1, 2, \dots, N^{\lambda}$, such that \[ \frac{x^{\lambda}_{k} - x^{\lambda}_{k - 1}}{t^{\lambda}_{k} - t^{\lambda}_{k - 1}} - \epsilon^{\lambda, x^{\lambda}_{k -1}, s + t^{\lambda}_{k}} \in A(s + t^{\lambda}_{k})x^{\lambda}_{k} \] holds with \(\|\epsilon^{\lambda, x^{\lambda}_{k}, s + t^{\lambda}_{k}}\| < \lambda, t^{\lambda}_{k} - t^{\lambda}_{k - 1} = \lambda_{1} \leq \lambda\), and \[ 0 = t^{\lambda}_{0} < t^{\lambda}_{1} < t^{\lambda}_{2} < \dots < t^{\lambda}_{N^{\lambda} - 1} < T \leq t^{\lambda}_{N^{\lambda}}. \] \end{lemma} \begin{proof} As in \cite[Page 144]{Mi}, let \(t = s + t^{\lambda}_{k}\) and \(\epsilon^{\lambda, x^{\lambda}_{k - 1}, s + t^{\lambda}_{k}} = \frac{x^{\lambda}_{k} - x^{\lambda}_{k - 1}}{t^{\lambda}_{k} - t^{\lambda}_{k - 1}} - y^{\lambda}_{k} \) in Lemma \ref{L:AD}, and the proof is complete. \end{proof} For convenience, let \(\{T, s, x_{0}, \lambda, \lambda_{1}, x^{\lambda}_{k}, t^{\lambda}_{k}, N^{\lambda}, \epsilon^{\lambda, x^{\lambda}_{k - 1}, s + t^{\lambda}_{k}}\}\) denote the contents in Lemma \ref{L:AE}. \begin{lemma} \label{L:AF} Let $0 < \lambda$, $\mu < \lambda_{0}$. For \(\{T, s, x_{0}, \lambda, \lambda_{1}, x^{\lambda}_{m}, t^{\lambda}_{m}, N^{\lambda}, \epsilon^{\lambda, x^{\lambda}_{m - 1}, s + t^{\lambda}_{m}} \}\) and $\{T, s, x_{0}, \mu, \mu_{1}, x^{\mu}_{n}, t^{\mu}_{n}, N^{\mu}, \epsilon^{\mu, x^{\mu}_{n - 1}, s + t^{\mu}_{n}}\}$ given under the conditions of Lemma \ref{L:AE}, and (A1), the inequality \[ a_{m, n} \leq \gamma \alpha a_{m - 1, n} + \gamma \beta a_{m, n - 1} + b_{m, n} \] holds. Here \(a_{m, n} = \|x^{\lambda}_{m} - x^{\mu}_{n}\|, \gamma = ( 1 - \frac{\lambda_{1} \mu_{1}}{\lambda_{1} + \mu_{1}} \omega)^{-1}\), \(0 < \mu_{1} \leq \mu < \lambda_{0}, \alpha = \frac{\mu_{1}}{\lambda_{1} + \mu_{1}}, \beta = 1 - \alpha = \frac{\lambda_{1}}{\lambda_{1} + \mu_{1}}\), and \begin{align*} b_{m, n} &\leq \gamma \frac{\lambda_{1} \mu_{1}}{\lambda_{1} + \mu_{1}} (\|\epsilon^{\lambda, x^{\lambda}_{m - 1}, s + t^{\lambda}_{m}}\| + \|\epsilon^{\mu, x^{\mu}_{n - 1}, s + t^{\mu}_{n}}\|) + k_{m, n} \\ & \leq \gamma \frac{\lambda_{1} \mu_{1}}{\lambda_{1} + \mu_{1}} (\lambda + \mu) + k_{m, n}, \end{align*} and \(0 < \lambda_{0} \omega < 1\), where \begin{align*} k_{m, n} & \leq \gamma \sigma \|f(s + m \lambda_{1}) - f(s + n \mu_{1})\| L(\|\beta(\mu_{1} \epsilon^{\lambda, x^{\mu}_{n - 1}, s + t^{\mu}_{n}} + x^{\mu}_{n - 1}) + \alpha x^{\mu}_{n}\|) (1 \\ &\quad + \text{ either 0 or } \frac{\|x^{\mu}_{n} - (\mu_{1} \epsilon^{\mu, x^{\mu}_{n - 1}, s + t^{\mu}_{n}} + x^{\mu}_{n - 1})\|}{\mu_{1}} \\ &\quad \text{or } \limsup_{\mu \to 0}\|\frac{x^{\mu}_{n} - (\mu_{1} \epsilon^{\mu, x^{\mu}_{n - 1}, s + t^{\mu}_{n}} + x^{\mu}_{n - 1})}{\mu_{1}} \|). \end{align*} Here \(\sigma =\lambda_{1}\mu_{1}/(\lambda_{1} + \mu_{1})\). \end{lemma} \noindent\textbf{Remark.} The recursive inequality in Crandall-Liggett \cite[Page 270]{Cra} is different from that in Lemma \ref{L:AF} and cannot be used here since we do not know whether \(\lambda_{1}\) or \( \mu_{1}\) is bigger. \begin{proof} From Lemma \ref{L:AE}, we have \begin{gather*} x^{\lambda}_{m} = J_{\lambda_{1}}(s + m \lambda_{1}) (\lambda_{1} \epsilon^{\lambda, x^{\lambda}_{m - 1}, s + t^{\lambda}_{m}} + x^{\lambda}_{m - 1})\,,\\ x^{\mu}_{n} = J_{\mu_{1}} (s + n \mu_{1})(\mu_{1} \epsilon^{\mu, x^{\mu}_{n - 1}, s + t^{\mu}_{n}} + x^{\mu}_{n - 1})\,. \end{gather*} Setting \(\sigma = \frac{\lambda_{1} \mu_{1}}{\lambda_{1} + \mu_{1}}\) and using the nonlinear resolvent identity \cite[Page 268]{Cra}, we have, as in \cite[Page 86]{La}, \begin{gather*} x^{\lambda}_{m} = J_{\sigma}(s + m \lambda_{1})(\alpha(\lambda_{1} \epsilon^{\lambda, x^{\lambda}_{m - 1}, s + t^{\lambda}_{m}} + x^{\lambda}_{m - 1}) + \beta x^{\lambda} _{m})\,,\\ x^{\mu}_{n} = J_{\sigma}(s + n \mu_{1})( \beta (\mu_{1} \epsilon^{\mu, x^{\mu}_{n - 1}, s + t^{\mu}_{n}} + x^{\mu}_{n - 1}) + \alpha x^{\mu}_{n}), \end{gather*} where \(\alpha = \frac{\mu_{1}}{\lambda_{1} + \mu_{1}}\) and \(\beta = 1 - \alpha\). It follows as in \cite[Page 86]{La}, using (4.3) that \[ a_{m, n} \equiv \|x^{\lambda}_{m} - x^{\mu}_{n}\| \leq \gamma (\alpha a_{m - 1, n} + \beta a_{m, n - 1} + \sigma (\lambda + \mu)) + k_{m, n}, \] where by (4.3), \begin{align*} k_{m, n} &\leq \gamma \sigma \|f(s + m \lambda_{1}) - f(s + n \mu_{1})\| L(\beta(\|\mu_{1} \epsilon^{\mu, x^{\mu}_{n - 1}, s + t^{\mu}_{n}} + x^{\mu}_{n - 1}) + \alpha x^{\mu}_{n}\|) ( 1 +\\ & \quad \text{either 0 or } \frac{\|x^{\mu}_{n} - ((\beta(\mu_{1} \epsilon^{\mu, x^{\mu}_{n - 1}, s + t^{\mu}_{n}} + x^{\mu}_{n - 1}) + \alpha x^{\mu}_{n})\|}{\sigma} \\ &\quad \text{or } \limsup_{\mu \to 0}\|\frac{x^{\mu}_{n} - (\beta(\mu_{1} \epsilon^{\mu, x^{\mu}_{n - 1}, s + t^{\mu}_{n}} + x^{\mu}_{n - 1}) + \alpha x^{\mu}_{n})}{\sigma}\|). \end{align*} Here \(\gamma = (1 - \sigma \omega)^{- 1}\). This completes the proof. \end{proof} \begin{lemma} \label{L:AG} Under the the assumptions of Lemma \ref{L:AF}, \begin{align*} \{a_{m, n}\}&\leq (\gamma \alpha (E_{2} - \gamma \beta)^{*}E_{2})^{m}\{a_{0, n}\} + \sum_{i = 0}^{m - 1}(\gamma \alpha(E_{2} - \gamma \beta)^{*}E_{2})^{i} \{(\gamma \beta)^{n}a_{m - i, 0}\} \\ &\quad + \sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}((E_{2} - \gamma \beta)^{*}E_{2})^{j} \{b_{m + 1 - j, n}\}. \end{align*} Note \(b_{m, n} \leq \gamma \frac{ \lambda_{1} \mu_{1}}{\lambda_{1} + \mu_{1}}(\lambda + \mu) + k_{m, n}\). \end{lemma} The proof of the above lemma can be found in cite{Lin3}. \begin{lemma} \label{L:AI} Under the assumption of Lemma \ref{L:AF} and (T1) or (T2) or (T3), the foloowing inequality holds for \(x_{0} \in \hat{E}\): \[ a_{m, 0} = \|x^{\lambda}_{m} - x_{0}\| \leq (1 - \lambda_{1}\omega)^{- m}(m \lambda_{1}) (2 \lambda + M(x_{0})), \] where \(M(x_{0}) \leq K \) and \( x^{\lambda}_{0} = x_{0} = x^{\mu}_{0}\). \end{lemma} The proof of the above lemma follows from the proof of Lemma \ref{L:A} and is left to the reader. \begin{proposition} \label{P:MA} Under the assumption of Lemma \ref{L:AF} and (A2), for \(x^{\lambda}_{0} = x_{0} \in \hat{E}\), we have \begin{gather*} \frac{\|x^{\lambda}_{i} - x^{\lambda}_{i - 1}\|}{\lambda_{1}} \leq K\,,\\ M(x^{\lambda}_{i}) \equiv \sup_{0 \leq t \leq T}[[A(t)x^{\lambda}_{i}]] \leq K \end{gather*} for \(0 \leq s + i \lambda_{1} \leq T\) if ((R3), (T2)) holds. Furtheremore, \(M(x^{\lambda}_{i}) \leq K\) if ((R3), (T3)) holds. \end{proposition} The proof of the above proposition uses Lemma \ref{L:AI}, the Remarks in Section \ref{S:C}, and ((R3), (T2)), or ((R3), (T3)), and follows the proof of Lemma \ref{L:CC}. It is left to the reader. For convenience, denote the first, and second on the right side of the inequality in Lemma \ref{L:AG} by \(\{c_{m, n}\}, \{d_{m, n}\}\), respectively, and the third term by \(\{s_{m, n}\} + \{e_{m, n}\}\). \begin{proposition} \label{P:F} For \(x_{0} \in \hat{E}\), \[ \{c_{m, n}\} \leq \{\gamma_{1}^{n}((n \mu_{1} - m \lambda_{1}) + \sqrt{(n \mu_{1} - m \lambda_{1})^{2} + (m \lambda_{1})\mu_{1} + (n \mu_{1})\lambda_{1}})(2 \mu + M(x_{0})))\}, \] where \(\gamma_{1} = (1 - \mu_{1}\omega)^{-1}\). \end{proposition} The proof of the above proposition can be found in \cite{Lin3}, for which Lemma \ref{L:AI} is used. \begin{proposition} \label{P:G} For \(x_{0} \in \hat{E}\), \[ \{d_{m, n}\} \leq \{\gamma^{m} \gamma^{n} ((m \lambda_{1} - n \mu_{1})^{2} + (n \mu_{1})(\lambda_{1} + \mu_{1}))^{1/2})\gamma_{2}^{m}(2 \lambda + M(x_{0}))\}, \] where \(\gamma_{2} = (1 - \lambda_{1}\omega)^{-1}\). \end{proposition} The proof of the above proposition can be found in \cite{Lin3}, for which Lemma \ref{L:AI} is used. \begin{proposition} \label{P:H} For \(x_{0} \in \hat{E}\), \[ \{s_{m, n}\} \leq \{\gamma^{m} \gamma^{n} (m \lambda_{1})(\lambda + \mu)\}. \] \end{proposition} The proof of the above proposition see \cite{Lin3}. \begin{proposition} \label{P:I} Let \(\{T, s, x_{0}, \lambda, \lambda_{1}, x^{\lambda}_{m}, t^{\lambda}_{m}, N^{\lambda}, \epsilon^{\lambda, x^{\lambda}_{m - 1}, s + t^{\lambda}_{m}}\}\) and \[ \{T, s, y_{0}, \lambda, \lambda_{1}, y^{\lambda}_{m}, t^{\lambda}_{m}, N^{\lambda}, \epsilon^{\lambda, y^{\lambda}_{m - 1}, s + t^{\lambda}_{m}}\} \] be given, where \(x_{0} \in \overline{\hat{E}} = \overline{D}, y_{0} \in \overline{\hat{E}}\), and \(\|\epsilon^{\lambda, x^{\lambda}_{m - 1}, s + t^{\lambda}_{m}}\|, \|\epsilon^{\lambda, y^{\lambda}_{m - 1}, s + t^{\lambda}_{m}}\| < \lambda\). Then \[ \|x^{\lambda}_{m} - y^{\lambda}_{m}\| \leq \gamma_{2}^{m}(\|x_{0} - y_{0}\| + (m \lambda_{1})(2 \lambda)), \] where \(\gamma_{2} = (1 - \lambda_{1} \omega)^{-1}\). \end{proposition} \begin{proof} Note \(x^{\lambda}_{m}\) and \( y^{\lambda}_{m}\) satisfy \begin{gather*} \frac{x^{\lambda}_{m} - x^{\lambda}_{m - 1}}{\lambda_{1}} - \epsilon^{\lambda, x^{\lambda}_{m - 1}, s + t^{\lambda}_{m}} \in A(s + t^{\lambda}_{m})x^{\lambda}_{m},\\ \frac{y^{\lambda}_{m} - y^{\lambda}_{m - 1}}{\lambda_{1}} - \epsilon^{\lambda, y^{\lambda}_{m - 1}, s + t^{\lambda}_{m}} \in A(s + t^{\lambda}_{m})y^{\lambda}_{m}, \end{gather*} respectively. >From the condition (A1) for \(A(t)\), we have \[ \|x^{\lambda}_{m} - y^{\lambda}_{m}\| \leq \gamma_{2}(\|x^{\lambda}_{m - 1} - y^{\lambda}_{m - 1}\| + \lambda_{1}(\|\epsilon^{\lambda, x^{\lambda}_{m - 1}, s + t^{\lambda}_{m}}\| + \|\epsilon^{\lambda, y^{\lambda}_{m - 1}, s + t^{\lambda}_{m}}\|)). \] This recursive relation gives the result. Here note \(x^{\lambda}_{0} = x_{0}, y^{\lambda}_{0} = y_{0}, \gamma_{2}^{i} \leq \gamma_{2}^{m} \) for \(i \leq m\), and \(\|\epsilon^{\lambda, x^{\lambda}_{m - 1}, s + t^{\lambda}_{m}}\|, \|\epsilon^{\lambda, y^{\lambda}_{m - 1}, s + t^{\lambda}_{m}}\| < \lambda\). \end{proof} We now estimate \(\{e_{m, n}\}\). From Lemmas \ref{L:AF}, \ref{L:AG}, and \ref{L:AI}, and Proposition \ref{P:MA}, and the assumption on \(f\), we have \begin{align*} \{e_{m, n}\} &\leq \sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}((E_{2} - \gamma \beta)^{*}E_{2})^{j}\{K \gamma \sigma \rho(|n \mu_{1} - (m + 1 - j)\lambda_{1}|)\} \\ &\leq \sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}((E_{2} - \gamma \beta)^{*} E_{2})^{j}\{K \sigma \gamma^{n} \rho(|n \mu_{1} - (m + 1 - j)\lambda_{1}|)\}, \end{align*} where \(\gamma = (1 - \sigma \omega)^{-1}\). As in Crandall-Pazy \cite[Page 68]{Cran}, let \(\delta > 0\) be given and write the right side of the above inequality as \(\{I^{(1)}_{m , n}\} + \{I^{(2)}_{m, n}\}\), where \(\{I^{(1)}_{m, n}\}\) is the sum over indices with \(|n \mu_{1} - (m + 1 - j)\lambda_{1}| < \delta\), and \(\{I^{(2)}_{m, n}\}\) is the sum over indices with \(|n \mu_{1} - (m + 1 - j)\lambda_{1}| \geq \delta\). Using Lemma \ref{L:AH}, we have \[ \{I^{(1)}_{m, n}\} \leq \{K \sigma \rho(\delta)\sum_{j = 1}^{m}\alpha^{-1} \gamma^{j - 1}\gamma^{n}\} \leq \{K \rho(\delta)\gamma^{m} \gamma^{n}m \lambda_{1}\}. \] On the other hand, using Lemmas \ref{L:AB} and \ref{L:AH} and Proposition \ref{P:ZA}, we have \begin{align*} &\{I^{(2)}_{m, n}\} \\ &\leq K \sigma \rho(T)\sum_{j = 1}^{m} (\gamma \alpha)^{j - 1}((E_{2} - \gamma \beta)^{*}E_{2})^{j}\{\gamma^{n}\} \\ &\leq K \sigma \rho(T)\sum_{j = 1}^{m}(\gamma \alpha)^{j - 1} ((E_{2} - \gamma \beta)^{*}E_{2})^{j}\{\gamma^{n} \frac{(n \mu_{1} - (m + 1 - j)\lambda_{1})^{2}}{\delta^{2}}\} \\ &= K \sigma \rho(T) \delta^{-2}\sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}((E_{2} - \gamma \beta)^{*}E_{2})^{j}\{\gamma^{n}(n^{2}(\mu_{1})^{2} - 2n \mu_{1}( m + 1 - j)\lambda_{1}\\ &\quad + (m + 1 - j)^{2}(\lambda_{1})^{2})\} \\ &= K \sigma \rho(T)\delta^{-2} \sum_{j = 1}^{m}(\gamma \alpha)^{j - 1} \sum_{i = 0}^{j}\binom{j}{i}(\gamma \beta (E_{2} - \gamma \beta)^{*})^{i}\\ &\quad\times\big\{n^{2}\gamma^{n}(\mu_{1})^{2} - 2n \gamma^{n}\mu_{1} (m + 1 - j)\lambda_{1}\big\} \\ &\quad + \{K \sigma \rho(T)\delta^{-2}\sum_{j = 1}^{m}(1 - \sum_{i = 1}^{j}\binom{n + i - 2}{i - 1} \alpha^{i - 1}\beta^{n})(\alpha^{-1}\gamma^{j - 1}\gamma^{n})\\ &\quad\times (m + 1 - j)^{2} (\lambda_{1})^{2}\} \\ &= K \sigma \rho(T) \delta^{-2}\sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}\sum_{i = 0}^{j}\binom{j}{i} (\gamma \beta)^{i}\gamma^{n - i}[\{(\frac{n^{2}}{\alpha^{i}} - \frac{2in} {\alpha^{i + 1}} \\ &\quad + (\frac{i(i - 1)}{\alpha^{i + 2}} + \frac{i}{\alpha^{i + 1}}) - \sum_{k = 0}^{i - 1}(\frac{(i - k)(i - k - 1)}{\alpha^{i - k + 2}} + \frac{i - k}{\alpha^{i - k + 1}})\binom{n}{k}\beta^{n - k})(\mu_{1})^{2}\} \\ &\quad + \{(\frac{n}{\alpha^{i}} - \frac{i}{\alpha^{i + 1}} + \frac{1}{\alpha^{i + 1}} \sum_{k = 0}^{i - 1}\binom{n}{k}(i - k)\alpha^{k}\beta^{n - k})(- 2 \mu_{1} (m + 1 - j)\lambda_{1})\}] \\ &\quad + \{K \sigma \rho(T)\delta^{-2}\sum_{j = 1}^{m}(1 - \sum_{i = 1}^{j} \binom{n + i - 2}{i - 1}\alpha^{i - 1}\beta^{n}) (\alpha^{-1}\gamma^{j - 1}\gamma^{n})\\ &\quad \times (m + 1 - j)^{2} (\lambda_{1})^{2}\} \\ &\leq \{K \sigma \rho(T)\delta^{-2}\gamma^{m}\gamma^{n}\alpha^{-1} \sum_{j = 1}^{m}\sum_{i = 0}^{j - 1}\binom{j}{i}(n^{2} - \frac{2in}{\alpha} + (\frac{i(i - 1)} {\alpha^{2}} + \frac{i}{\alpha}))\beta^{i}\alpha^{j - i}(\mu_{1})^{2}\} \\ &\quad + \{K \sigma \rho(T)\delta^{-2}\gamma^{m}\gamma^{n}\alpha^{-1} \sum_{j = 1}^{m}\sum_{i = 0}^{j}\binom{j}{i}(n - \frac{i}{\alpha})(- 2 \mu_{1}(m + 1 - j) \lambda_{1})\} \\ &\quad + \{K \sigma \rho(T)\delta^{-2}\gamma^{m}\gamma^{n}\alpha^{-1} \sum_{j = 1}^{m}(m + 1 - j)^{2}(\lambda_{1})^{2}\}, \end{align*} (Note that \(\gamma > 1\) and that we dropped the negative terms, associated with \(\sum_{k = 0} ^{i - 1}\) or \(\binom{n + i - 2}{i - 1}\)) The above expression equals \begin{align*} &\{K \sigma \rho(T)\delta^{-2}\gamma^{m} \gamma^{n}\alpha^{-1}\sum_{j = 1}^{m} (n^{2} - \frac{2\beta j n}{\alpha} + \frac{\beta^{2}j(j - 1)+ \beta j - \beta j}{\alpha^{2}} + \frac{\beta j} {\alpha})(\mu_{1})^{2}\} \\ & + \{K \sigma \rho(T)\delta^{-2}\gamma^{m} \gamma^{n}\alpha^{-1}\sum_{j = 1}^{m} (n - \frac{\beta j}{\alpha})(- 2 \mu_{1}(m + 1)\lambda_{1} + j(2 \mu_{1}\lambda_{1}))\} \\ &+ \{K \sigma \rho(T)\delta^{-2}\gamma^{m} \gamma^{n}\alpha^{-1}\sum_{j = 1}^{m}((m + 1)^{2} - 2(m + 1)j + j^{2})(\lambda_{1})^{2}\}, \end{align*} where the calculations in \cite[Page 271]{Cra} were used: \[ \sum_{i = 0}^{j}\binom{j}{i}\beta^{i}\alpha^{j - i} = 1, \sum_{i = 0}^{j}\binom{j}{i}i \beta^{i}\alpha^{j - i} = \beta j, \sum_{i = 0}^{j}\binom{j}{i}i^{2} \beta^{i}\alpha^{j - i} = \beta^{2} j(j - 1) + \beta j), \] in which we have \(\sigma \alpha^{-1} = \lambda_{1}\), \begin{gather*} \sum_{j = 1}^{m}(n^{2}(\mu_{1})^{2} - 2n \mu_{1}(m + 1)\lambda_{1} + (m + 1)^{2}(\lambda_{1})^{2}) = m (n \mu_{1} - (m + 1) \lambda_{1})^{2}, \\ \begin{aligned} &\sum_{j = 1}^{m}(- \frac{2 \beta j n}{\alpha}(\mu_{1})^{2} + \frac{2 \beta j \mu_{1}(m + 1)\lambda_{1}}{\alpha}) + \sum_{j = 1}^{m} (n j(2 \mu_{1}\lambda_{1}) - 2(m + 1)j (\lambda_{1})^{2}) \\ & = - \sum_{j = 1}^{m}2 \lambda_{1} (n \mu_{1} - (m + 1)\lambda_{1})j + \sum_{j = 1}^{m}2 \lambda_{1}(n \mu_{1} - (m + 1)\lambda_{1})j = 0, \end{aligned} \\ \sum_{j = 1}^{m}(\frac{\beta^{2}j^{2}(\mu_{1})^{2}}{\alpha^{2}} - \frac{2\beta j^{2}\mu_{1} \lambda_{1}}{\alpha} + j^{2}(\lambda_{1})^{2}) = \sum_{j = 1}^{m} j^{2}(\frac{\beta \mu_{1}}{\alpha} - \lambda_{1}) = 0, \\ \sum_{j = 1}^{m}(- \frac{\beta^{2}j}{\alpha^{2}} + \frac{\beta j}{\alpha})(\mu_{1})^{2} = \sum_{j = 1}^{m}\lambda_{1}(\mu_{1} - \lambda_{1})j \leq \lambda_{1} (\mu_{1} + \lambda_{1})\frac{m(m + 1)}{2}. \end{gather*} Putting things together, we thus proved the following statement. \begin{proposition} \label{P:J} \begin{align*} &\{e_{m, n}\} \\ &\leq \{K \rho(\delta)\gamma^{m}\gamma^{n}(m \lambda_{1})\} \\ &\quad + \{K \rho(T)\gamma^{m}\gamma^{n}\delta^{-2}[(m \lambda_{1})(n \mu_{1} - (m + 1)\lambda_{1})^{2} + (\frac{m(m + 1)}{2}(\lambda_{1})^{2})(\mu_{1} + \lambda_{1})]\} \end{align*} \end{proposition} \begin{proof}[Proof of Theorem \ref{T:B}] Let \begin{gather*} \{T, s, x_{0}, \lambda, \lambda_{1}, x^{\lambda}_{m}, t^{\lambda}_{m}, N^{\lambda}, \epsilon^{\lambda, x^{\lambda}_{m - 1}, s + t^{\lambda}_{m}}\},\\ \{T, s, y_{p}, \lambda, \lambda_{1}, y^{\lambda}_{m}, t^{\lambda}_{m}, N^{\lambda}, \epsilon^{\lambda, y^{\lambda}_{m - 1}, s + t^{\lambda}_{m}}\} \\ \{T, s, x_{0}, \mu, \mu_{1}, x^{\mu}_{n}, t^{\mu}_{n}, N^{\mu}, \epsilon^{\mu, x^{\mu}_{n - 1}, s + t^{\mu}_{n}}\},\\ \{T, s, y_{p}, \mu, \mu_{1}, y^{\mu}_{n}, t^{\mu}_{n}, N^{\mu}, \epsilon^{\mu, y^{\mu}_{n - 1}, s + t^{\mu}_{n}}\} \end{gather*} be given, where \(x_{0} \in \overline{\hat{E}} = \overline{D}\), \(y_{p} \in \hat{E},\) and \( \lim_{p \to \infty}y_{p} = x_{0}. \) We write \(x^{\lambda}_{m} = x^{\lambda}_{m}(s; x_{0})\) to indicate the dependence of \(x_{0}\) and \(s\). As in \cite{Kob} (see \cite[Page 131]{Mi}), let \(u_{\lambda}(t, s; x_{0}) = x_{0}\) for \(t = s\) and \[ u_{\lambda}(t, s; x_{0}) = x^{\lambda}_{m}(s; x_{0}) \] for \(t \in (s + t^{\lambda}_{m - 1}, s + t^{\lambda}_{m}] \cap (s, T]\), where \(m = 1, 2, 3, \dots, N^{\lambda}\). Let \(t, \tau \in [s, T]\). Let \(u_{\lambda}(t, s; x_{0}) = x_{0} = x^{\lambda}_{0}\) for \(t = s\) and for \(t > s\), choose \(1 \leq m \leq N^{\lambda}\) such that \(t \in (s + t^{\lambda}_{m - 1}, s + t^{\lambda}_{m}]\), for which \(u_{\lambda}(t, s; x_{0}) = x^{\lambda}_{m}\). Similarly, let \(u_{\mu}(\tau, s; x_{0}) = x_{0} = x^{\mu}_{0}\) for \(\tau = s\) and for \(\tau > s\), choose \(1 \leq n \leq N^{\mu}\) such that \(\tau \in (s + t^{\mu}_{n - 1}, s + t^{\mu} _{n}]\), for which \(u_{\mu}(\tau, s; x_{0}) = x^{\mu}_{n}(s; x_{0}) \). Note \begin{align*} \|u_{\lambda}(t, s; x_{0}) - u_{\mu}(\tau, s; x_{0})\| &= \|x^{\lambda}_{m}(s; x_{0}) - x^{\mu}_{n}(s; x_{0})\|\\ &\leq \|x^{\lambda}_{m} - y^{\lambda}_{m}\| + \|y^{\lambda}_{m} - y^{\mu}_{n}\| + \|y^{\mu}_{n} - x^{\mu}_{n}\|. \end{align*} Also note \(m \lambda_{1} = t^{\lambda}_{m} \leq (t - s) + \lambda_{1} \leq (t - s) + \lambda, n \mu_{1} = t^{\mu}_{n} \leq (\tau - s) + \mu_{1} \leq (\tau - s) + \mu\), \begin{align*} |m \lambda_{1} - n \mu_{1}| & = |t^{\lambda}_{m} - t^{\mu}_{n}| \leq |t^{\lambda}_{m} - (t - s)| + |(t - s) - (\tau - s)| + |(\tau - s) - t^{\mu}_{n}| \\ & \leq \lambda_{1} + |t - \tau| + \mu_{1} \leq \lambda + |t - \tau| + \mu, \end{align*} and \[ (n \mu_{1} - (m + 1)\lambda_{1})^{2} \leq (|n \mu_{1} - m \lambda_{1}| + \lambda_{1})^{2} \leq (2(\mu_{1} + \lambda_{1}) + |t - \tau|)^{2}. \] Also note \(\gamma \leq \gamma_{1}, \gamma \leq \gamma_{2}\). Letting \(\delta^{2} = \sqrt{\mu_{1} + \lambda_{1}}\) and applying Lemma \ref{L:AG}, Propositions \ref{P:J}, \ref{P:I}, \ref{P:F}, \ref{P:G}, and \ref{P:H}, we have \[ \limsup_{\lambda, \mu \to 0}[\sup_{s \leq t \leq T}\|u_{\lambda}(t, s; x_{0}) - u_{\mu}(t, s; x_{0})\|] \leq 2 e^{T \omega}\|x_{0} - y_{p}\|. \] Let \(p \to \infty\) and we have \[ U(t, s) x_{0} \equiv \lim_{\lambda \to 0}u_{\lambda}(t, s; x_{0}) \] exists uniformly for \(t \in [0, T]\) and then, the \((**)\) holds by Proposition \ref{P:I}: \begin{equation} \label{**} \|U(t, s)x_{0} - U(t, s)y_{0}\| \leq K\|x_{0} - y_{0}\| \end{equation} for \(x_{0}, y_{0} \in \overline{\hat{E}}\). \(U(t, s)x_{0} \equiv \lim_{\lambda \to 0}u_{\lambda}(t) \equiv \lim_{\lambda \to 0}u_{\lambda}(t, s; x_{0})\) is the so-called a limit solution to the equation \eqref{E:C}. Next show that \(U(t, s)x_{0} \equiv u(t, s; x_{0}) \equiv \lim_{\lambda \to 0}u_{\lambda}(t) \equiv \lim_{\lambda \to 0} u_{\lambda}(t, s; x_{0}) \) for \(x_{0} \in \hat{E}\) is a strong solution to \eqref{E:C} if \(A(t)\) is embeddedly quasi-demi-closed. From Lemma \ref{L:AE}, we have \[ x^{\lambda}_{k} - \lambda_{1} A(s + t^{\lambda}_{k}) x^{\lambda}_{k} \ni \lambda_{1} \epsilon^{\lambda, x^{\lambda}_{k - 1}, s + t^{\lambda}_{k}} + x^{\lambda}_{k - 1}. \] For \(0 \le s \le t \le T\), construct the Rothe functions \cite{Ka,Ro}. Let \( C^{\lambda}(s) = A(s)\) and \(C^{\lambda}(t) = A(s + t^{\lambda}_{k})\) for \(t \in (s + t^{\lambda}_{k-1}, s + t^{\lambda}_{k}]\), and let \(\chi^{\lambda}(s) = \chi^{\lambda}(s, s; x_{0}) = x_{0}\) and \[ \chi^{\lambda}(t) = \chi^{\lambda}(t, s; x_{0}) = x^{\lambda}_{k-1} + (x^{\lambda}_{k} - x^{\lambda}_{k-1}) \frac{t - (s + t^{\lambda}_{k-1})}{\lambda_{1}} \] for \(t \in (s + t^{\lambda}_{k-1}, s + t^{\lambda }_{k}] \subset [s, T]\). Note \(\|\frac{x^{\mu}_{n} - x^{\mu}_{n-1}}{\mu}\| \leq K\) for \(x_{0} \in \hat{E}\) by Proposition \ref{P:MA}. From \cite[Pages 261-263]{Lin2} and \cite[Pages 8-9]{Lin4}, we have the following: for \(x_{0} \in \hat{E}\), \(k = [\frac{t - s}{\lambda_{1}}]\) or \([\frac{t - s}{\lambda_{1}}] + 1\), ( without loss of generality, assume the former is the case) \[ \lim_{\lambda \to 0}\sup_{t \in [s, T]}\|\chi^{\lambda}(t) - u_{\lambda}(t)\| = 0, \] \begin{equation} \label{E:HA} \|\chi^{\lambda}(t) - \chi^{\lambda}(\tau)\| \leq K|t - \tau| \end{equation} for \(t, \tau \in ( s + t^{\lambda}_{k-1}, s + t^{\lambda}_{k}]\), and \begin{equation} \label{E:HB} \frac{d\chi^{\lambda}(t)}{dt} \in C^{\lambda}(t)u_{\lambda}(t) + \epsilon^{\lambda, x^{\lambda}_{k - 1}, s + t^{\lambda}_{k}}, \chi^{\lambda}(s) = x_{0} \end{equation} for \(t \in (s + t^{\lambda}_{k-1}, s + t^{\lambda}_{k})\), where the last equation has values in \(B([s, T]; X)\), the real Banach space of all bounded functions from \([s, T]\) to \(X\), and \begin{align*} U(t, s)x_{0} &= u(t, s; x_{0}) \equiv \lim_{\lambda \to 0}u_{\lambda}(t) = \lim_{\lambda \to 0}\chi^{\lambda}(t)\\ &= \lim_{\lambda \to 0}x^{\lambda}_{[\frac{t - s}{\lambda_{1}}]} (s; x_{0}) = \lim_{k \to \infty}x^{\frac{t - s}{k}}_{k}(s; x_{0}) \end{align*} uniformly for finite \(0 \le s \le t \le T\) and for \(x_{0} \in \hat{E}\); for \(x_{0} \in \hat{E}\) and for \(v^{\lambda}(t) \in C^{\lambda}(t)u_{\lambda}(t) \) for which \eqref{E:HB} gives \[ \frac{d}{dt}\chi^{\lambda}(t) = v^{\lambda}(t) + \epsilon^{\lambda, x^{\lambda}_{k - 1}, s + t^{\lambda}_{k}}, \] this holds by integration: for each \(\eta \in Y^{*} \subset X^{*}\) , \[ \eta(\chi^{\lambda}(t) - x_{0}) = \int \eta(v^{\lambda}(\tau) + \epsilon^{\lambda, x^{\lambda}_{k - 1}, s + t^{\lambda}_{k}}) \, d \tau, \] where \(\|v^{\lambda}(t)\| \leq K\) and \(\|\epsilon^{\lambda, x^{\lambda}_{k - 1}, s + t^{\lambda}_{k}}\| \le \lambda\); as \(k = [\frac{t - s}{\lambda_{1}}] \to \infty\) or \(\lambda \to 0\), this holds: \[ \eta(v^{\lambda}(t)) \to \eta(v(t, s; x_{0})) \] for some \(v(t, s; x_{0}) \in A(t)u(t, s; x_{0})\), \begin{gather*} \eta(u(t, s; x_{0}) - x_{0}) = \int_{s}^{t} \eta(v(\tau, s; x_{0})) \, d \tau = \eta(\int_{s}^{t} v(\tau, s; x_{0})) \, d \tau, \\ u(t, s; x_{0}) - x_{0} = \int_{s}^{t} v(\tau, s; x_{0}) \, d \tau \end{gather*} in \(Y\), \[ \frac{d}{dt}u(t, s; x_{0}) = v(t, s; x_{0}) \] in $Y$ for almost every $t \in (s, T)$, and \[ %\label{E:D} \frac{d}{dt}u(t, s; x_{0}) \in A(t)u(t, s; x_{0}) \] in \(Y\) for almost every $t \in (s, T)$, $u(s, s; x_{0}) = x_{0}$. Thus \(U(t, s)x_{0} = u(t) = u(t, s; x_{0})\) is a strong solution to \eqref{E:C}. Now, we show that \(U(t, s)\) is an evolution operator. \noindent Step 1. Letting \(\lambda, \mu \to 0\), applying Lemma \ref{L:AG}, Propositions \ref{P:J}, \ref{P:I}, \ref{P:F}, \ref{P:G}, and \ref{P:H}, and then letting \(\delta = \sqrt{|t - \tau|}\), we have \begin{align*} &\lim_{\lambda, \mu \to 0}\|u_{\lambda}(t, s; x_{0}) - u_{\mu}(\tau, s; x_{0})\|\\ &= \|U(t, s) x_{0} - U(\tau, s) x_{0}\| \\ &\leq e^{3 T \omega} (2 \|x_{0} - y_{p}\| + 3|t - \tau| M(x_{0})) + K(\rho(\sqrt{|t - \tau|}) + |t - \tau|)\\ &\leq K(|t - \tau| + \rho(\sqrt{|t - \tau|}) \end{align*} for \(x_{0} = y_{p} \in \hat{E}\) and \(0 \leq s \tau\) and \(0 < \lambda_{1} \leq \lambda < \lambda_{0}\). Using Proposition \ref{P:MA} and letting \(\lambda \to 0\) completes the proof. \end{proof} \section{Applications} \label{S:BB} In this section, we state some examples first and preset their proofs next. We include linear or nonlinear, single-valued or multi-valued, finite or infinite dimensional, and time-nonautonomous examples. Example \ref{ex1''} below deals with linear rotations about a general axis \(\hat{n}\). These general rotations have important applications in Physics \cite[Pages 73-75]{Al}, and in Global Positioning System, GPS, in Civil Engineering, Soler and Marshall \cite[Pages 30-31.]{So}. Compare how the physicists, Altmann \cite{Al}, derived the formula to ours. As are stated in Section \ref{S:A}, these examples here are interpreted as linear or nonlianer, time autonomous or non-autonomous rotations, single-valued or multi-valued, of finite or infinite dimensions, evolving with time by satisfying \eqref{E:B} or \eqref{E:C} and preserving the length in a, linear or nonlinear, and, time autonomous or non-autonomous, way. This seems a complete approach to the rotation problems, compared to the approach by the physicists, Altmann \cite{Al}. However, we should remark that our new examples here do not include applications from partial differential equations. This is because we need uniform continuity of \(A(t)\) for our examples but this will not be satisfied by partial differential operators. \begin{example}[{Kobayashi \cite[Pages 152-153]{Mi}}] \label{ex1} \rm Define an operator \(A: D(A) \subset \mathbb{R}^{2} \to \mathbb{R}^{2}\) by \[ A \begin{pmatrix} x \\ y \end{pmatrix} \equiv \begin{pmatrix} y \\ -x \end{pmatrix} \quad \mbox{for}\quad \begin{pmatrix} x \\ y \end{pmatrix} \in D(A) \equiv \{ \begin{pmatrix} x \\ y \end{pmatrix} \in \mathbb{R}^{2}: x^{2} + y^{2} = 1 \}. \] Here \((\mathbb{R}^{2}, \|.\|)\) is a real Hilbert space with the inner product \((u, v)\) of \(u\) and \(v\) in $\mathbb{R}^{2}$ and with the norm \(\|u\| = \sqrt{(u, u)}\). Note that \(A\) is not linear, since \(D(A)\) is not a linear space. Note also that \(A\) is uniformly continuous, dissipative, and satisfies (R4), (R6), (R1), and (R2), % \(\overline{(4.1)}\), % %AUTHOR I DID NOT FIND THIS CONDITION. % (for which uniform continuity of \(A\) is not as necessarily needed as in Kobayashi (see Miyadera \cite[Pages 152-153]{Mi}) but does not satisfy (iii). The equation \eqref{E:B} has a strong (in fact, a classical) solution, \[ u(t) \equiv \lim_{\lambda \to 0} \begin{pmatrix} \cos(\theta_{0} - t\frac{\arctan(\lambda)}{\lambda}) \\ \sin(\theta_{0} - t\frac{\arctan(\lambda)}{\lambda}) \\ \end{pmatrix} = \begin{pmatrix} \cos(t) & \sin(t) \\ - \sin(t) & \cos(t) \end{pmatrix} u_{0} \] for \( u_{0} = \begin{pmatrix} \cos(\theta_{0}) \\ \sin(\theta_{0}) \end{pmatrix} \in D(A). \) With \( A_{M} \equiv \begin{pmatrix} 0 & 1 \\ -1 & 0 \end{pmatrix}, \) a matrix whose restriction to the unit sphere is the matrix representation of \(A\), the solution also equals \(e^{t A_{M}} \equiv \sum_{n = 1}^{\infty}\frac{(tA_{M})^{n}}{n!}\), applied to \(u_{0}\), which is \[ \begin{pmatrix} \cos(t) & \sin(t) \\ - \sin(t) & \cos(t) \end{pmatrix} u_{0}. \] But this is a coincidence, since, for a general matrix \(S\), \(e^{t S}\), existing as an infinite series of \(t S\), does not leave unit sphere invariant, in general. The Examples below, nonlinear or nonautonomous or multivalued or of infinite dimensions, cannot be derived by the restriction as in this example. \end{example} \begin{example} \label{ex1'}\rm In Example \ref{ex1}, replace \(\mathbb{R}^{2}\) by \(\mathbb{R}^{n}\), where $ n \in \mathbb{N}$, \(n \ge 2\), and replace the operator $A$ by by \[ A \begin{pmatrix} x_{1} \\ x_{2} \\ \vdots \\ x_{n} \end{pmatrix} = \begin{pmatrix} y_{1} \\ y_{2} \\ \vdots \\ y_{n} \end{pmatrix} \quad \text{for} \quad \begin{pmatrix} x_{1} \\ x_{2} \\ \vdots \\ x_{n} \end{pmatrix} \in D(A), \] the unit sphere in \(\mathbb{R}^{n}\), where \(\sum_{i = 1}^{n}y_{i}x_{i} = 0, y_{i} \in \mathbb{R}\) and \(y_{i}\) are linear functions of \( x_{1}, x_{2} \dots, x_{n}\) ; or define $A(\cdot)$ as an element from ,or two elements from, \dots, or all elements from the set \[ P \equiv \{ \begin{pmatrix} y_{1} \\ y_{2} \\ \vdots \\ y_{n} \end{pmatrix} \in \mathbb{R}^{n}: \sum_{i = 1}^{n}y_{i}x_{i} = 0, \quad \text{where $y_{i}$ are linear functions of \(x_{1}, x_{2}, \dots, x_{n}\)}\}. \] Then the results in Example \ref{ex1} hold, as in the proof of Example \ref{ex1}. The details of the proof are left to the reader. Here note that \(P \ne \emptyset\), e.g. \[ \begin{pmatrix} y_{1} \\ y_{2} \\ y_{3} \end{pmatrix} \equiv \begin{pmatrix} n_{3} x_{2} - n_{2}x_{3} \\ -n_{3}x_{1} + n_{1}x_{3} \\ n_{2}x_{1} - n_{1}x_{2} \end{pmatrix} \,. \] As in Example \ref{ex1''} below, where \(n_{1}, n_{2}, n_{3}\) are constants, lies in \(P\), and that each element in \(P\) determines uniquely an axis \(\hat{n}\), a vector in \(\mathbb{R}^{n}\), about which the associated matrices rotate. The axis \(\hat{n}\) in Example \ref{ex1''} is \( \begin{pmatrix} n_{1} \\ n_{2} \\ n_{3} \end{pmatrix}. \) \end{example} \noindent\textbf{Remark} The unit sphere, centered at the origin, can be replaced by the general sphere of radius \(r\), some \(r > 0\), centered at the origin, or by the set \[ \{u \in \mathbb{R}^{n}: 0 < r_{0} \le \|u\| \le r_{1}\} \] for some \(r_{1} > r_{0} > 0\). This also applies to the other examples. The next example is a special case of Example \ref{ex1'}, and its solution will be computed explicitly. \begin{example} \label{ex1''} \rm In Example \ref{ex1'}, let \(n = 3\), \(\hat{n} = \begin{pmatrix} n_{1} \\ n_{2} \\ n_{3} \end{pmatrix}, \) a unit vector in \(\mathbb{R}^{3}\), and \[ \begin{pmatrix} y_{1} \\ y_{2} \\ y_{3} \end{pmatrix} = \begin{pmatrix} n_{3} x_{2} - n_{2} x_{3} \\ - n_{3} x_{1} + n_{1} x_{3} \\ n_{2} x_{1} - n_{1} x_{2} \end{pmatrix}. \] The results in Example \ref{ex1'} hold, and the solution to \eqref{E:B} is given by the limit, as \(\lambda \to 0\) with \(f_{\lambda} =\arctan(\lambda)/\lambda\), of {\scriptsize \[ \begin{pmatrix} \cos t f_{\lambda} + (1 - \cos t f_{\lambda})n_{1}^{2} & -n_{3}\sin t f_{\lambda} + n_{1} n_{2} (1 - \cos(t f_{\lambda})) & n_{2}\sin t f_{\lambda} + n_{1}n_{3}(1 - \cos t f_{\lambda}) \\ n_{3}\sin t f_{\lambda} + (1 - \cos t f_{\lambda})n_{1}n_{2} & \cos t f_{\lambda} + n_{2}^{2}(1 - \cos t f_{\lambda}) & -n_{1}\sin t f_{\lambda} + n_{2}n_{3}(1 - \cos t f_{\lambda}) \\ -n_{2}\sin t f_{\lambda} + (1 - \cos t f_{\lambda})n_{1}n_{3} & n_{1}\sin t f_{\lambda} + n_{2}n_{3}(1 - \cos t f_{\lambda}) & \cos t f_{\lambda} + n_{3}^{2}(1 - \cos t f_{\lambda}) \end{pmatrix} u_{0}, \]} which is {\scriptsize \[ \begin{pmatrix} \cos(t) + (1 - \cos(t))n_{1}^{2} & -n_{3}\sin(t) + n_{1}n_{2}(1 - \cos(t)) & n_{2}\sin(t) + n_{1}n_{3}(1 - \cos(t)) \\ n_{3}\sin(t) + (1 - \cos(t))n_{1}n_{2} & \cos(t) + n_{2}^{2}(1 - \cos(t)) & -n_{1}\sin(t) + n_{2}n_{3}(1 - \cos(t)) \\ -n_{2}\sin(t) + (1 - \cos(t))n_{1}n_{3} & n_{1}\sin(t) + n_{2}n_{3}(1 - \cos(t)) & \cos(t) + n_{3}^{2}(1 - \cos(t)) \end{pmatrix} u_{0}, \]} where the associated matrices are rotations about the axis \(\hat{n}\). These general rotations have applications to Physics, Altmann \cite{Al}, especially Pages 73-75, and to Global Positioning System, GPS, in Civil Engineering, Soler and Marshall \cite[Pages 30-31]{So}. Compare how the physicists, \cite{Al}, derived the formula to ours. \end{example} \begin{example} \label{ex2} \rm Replace the operator $A$ in Example \ref{ex1} by the time dependent operator \[ A(t) \begin{pmatrix} x \\ y \end{pmatrix} = a(t) \begin{pmatrix} y \\ -x \end{pmatrix}. \] where \(a(t)\) is a continuous function on \([0, T]\) with $a(t) \ge \delta_{0} > 0$ for some \(\delta_{0}\). Using the proof of Example \ref{ex1}, we readily show that \(A(t)\) is uniformly continuous and dissipative, uniformly for all \(t \in [0, T]\), and satisfies (R4), (R6), (T4), (R1), (R2), %\(\overline{(4.1)}\), % %AUTHOR: I DID NOT FIND THIS CONDITION % and (T1) (for which uniform continuity of \(A(t)\) is not as necessarily needed as in Kobayashi (see Miyadera \cite[Pages 152-153]{Mi}) but does not satisfy (iii) or (x). Here note \(E = \overline{\hat{E}} = \overline{D(A(0))} = D(A(0))\). So the equation \eqref{E:C} has a strong (in fact, a classical) solution. The details of the proof are left to the reader. \end{example} \noindent\textbf{Remark} Extensions of Example \ref{ex2} to the \(n\) dimensions with multi-valued \(A(t)\), are performed as in Examples \ref{ex1'}, \ref{ex1''}. The details are left to the reader. \noindent\textbf{Remark} In infinite dimensions, uniform continuity of \(A\) is as needed for (R4), (R6), (R1), and (R2), %\(\overline{(4.1)}\), % %AUTHOR: I DID NOT FIND THIS CONDITION. % as in Kobayashi (see Miyadera \cite{Mi}, Pages 152-153); the following examples indicate how it is needed. Uniform continuity of \(A\) is defined as: for an \(\epsilon > 0\), there is a \(\delta > 0\), such that for \(x, y \in D(A)\) with \(\|x - y\| < \delta\), it is true that \[ \|u - v\| < \epsilon \] holds for some \(u \in Ax\) and some \(v \in Ay\), where \(\delta\) does not depend on \(x, y\). \begin{example} \label{ex3} \rm Let \(H\) be a real Hilber space with the inner product \((u, v)\) and norm \(\|u\| = \sqrt{(u, u)}\). Define a $t$-dependent nonlinear operator \(A'(t): D(A'(t)) = H \to H \) by \(A'(t)u = a(t)Bu\) for \(u \in H\), where \[ B: D(B) = H \to H \] is a nonlinear operator, such that \(\|Bu\| = 1\) (or both \(\|Bu\| \ge \delta_{00} > 0 \) and \(\|Bu\| \le \delta_{000}\) for some \(\delta_{00}, \delta_{000} > 0\), as the following proof shows) for \(u \in H\) with \(\|u\| = 1\), (or \(\|v\| = 1\) or both \(\|v\| \ge \delta_{00} > 0\) and \(\|v\| \le \delta_{000} > 0\) for all \(v \in Bu\) and for each \(u \in H\) with \(\|u\| = 1\)) and that either of the following three conditions holds: \begin{itemize} \item % 8.3.1 If \(B\) is single-valued, then \((Bu, v) + (u, Bv) = 0 \) for \(u, v \in H\) and \(B\) is uniformly continuous. \item If \(B\) is single-valued, then \((Bu, u) = 0\), \((B - \omega_{0})\) is dissipative for some \(\omega_{0} \in \mathbb{R}\), and \(B\) is uniformly continuous. \item If \(B\) is multi-valed, then \((v, u) = 0\) for all \(v \in Bu\), \((B - \omega_{0})\) is dissipative for some \(\omega_{0} \in \mathbb{R}\), and \(B\) is uniformly continuous (defined in the Remark above this example. \end{itemize} The first condition in Example \ref{ex3} is equivalent to \((Bu, u) = 0\) for \(u \in H\) and $(Bu - Bv, u - v) = 0$ for \(u, v \in H\); in which case, \[ (A'(t)u, v) + (u, A'(t)v) = 0,\quad (A'(t)u, u) = 0, \quad A'(t)0 = 0 \,. \] Here \(a(t)\) is as in Example \ref{ex2}. Let \(A(t)\) be the restriction of \(A'(t)\) to \[ D(A(t)) \equiv \{u \in H: \|u\| = 1\}. \] Then the conclusions in Example \ref{ex2} hold for \(A(t)\). \end{example} \begin{example} \label{ex4} \rm This example follows from Example \ref{ex3} and \ref{ex1''}. Let \(X =\mathbb{R}^{4}\), a real Hilbert space. Let \[ B = \begin{pmatrix} 0 & 1 & 0 & 0 \\ -1 & 0 & 0 & 0 \\ 0 & 0 & 0 & -1 \\ 0 & 0 & 1 & 0 \end{pmatrix} \] or \[ B = \begin{pmatrix} 0 & 1 & 0 & -1 \\ -1 & 0 & 1 & 0 \\ 0 & -1 & 0 & -1 \\ 1 & 0 & 1 & 0 \end{pmatrix}\,. \] Let \(A'(t) = a(t)B\) and let \(A(t)\) be the restriction of \(A'(t)\) to the unit circle in \(\mathbb{R}^{4}\). Here \(a(t)\) is as in Example \ref{ex2}. Then the conclusions in Example \ref{ex3} hold for \(A(t)\). In general, for \(H = \mathbb{R}^{2n}, n \in \mathbb{N}\), a real Hilbert space, and for \(A'(t) = a(t)B\), where \(a(t)\) is as in Example \ref{ex2} and \(B = (b_{ij})\) is a real anti-symmetric matrix of \(2n \times 2n\)(that is \(B + B^{T} = 0\) or \(b_{ii} = 0, b_{ij} + b_{ji} = 0, i, j = 1, 2, \dots, 2n \)), we have the conclusions in Example \ref{ex3} hold for \(A(t)\) after suitably choosing \(b_{ij}\). Here \(A(t)\) is the restriction of \(A'(t)\) to the unit circle in \(\mathbb{R}^{2n}\). The details are left to the reader. \end{example} \noindent\textbf{Remark.} Example \ref{ex4} extends to the case of infinite dimensions (see below, Example \ref{ex5o}, by using the abstract results in Example \ref{ex3}. \begin{example} \label{ex5} \rm Let $X= (L^{2}(-\pi, \pi)$, be a real Hilbert space with the inner product \((u, v) = \int_{-\pi}^{\pi}u(x)v(x) \, dx\) and norm \(\|u\| = \sqrt{(u, u)}\). Let \(S \equiv \{\phi_{0}, \phi_{1}, \phi_{2}, \dots \}\) be an orthornormal system in \(L^{2}(-\pi, \pi)\), where \[ \phi_{0}(x) = \frac{1}{\sqrt{2\pi}},\quad \phi_{2n - 1}(x) = \frac{\cos(nx)}{\sqrt{\pi}}, \quad \phi_{2n}(x) = \frac{\sin(nx)}{\sqrt{\pi}}, \dots \] For \(f \in L^{2}(-\pi, \pi)\), by Coddington and Levinson \cite[Page 199]{Co}, we have \( f = \sum_{n = 0}^{\infty}\alpha_{n}\phi_{n}; \) this holds in \(L^{2}(-\pi, \pi)\) with \( \|f - \sum_{i = 0}^{n}\alpha_{i}\phi_{i}\| \to 0 \) as \(n \to \infty\), where \( \alpha_{n} = \int_{-\pi}^{\pi}f(x)\phi_{n}(x) \, dx \) holds uniquely. Define a linear operator \(B: D(B) = H \to H\) by \[ Bf = \sum_{n = 0}^{\infty}\beta_{n}\phi_{n}, \] where $\beta_{2n} = \alpha_{2n + 1}$, $\beta_{2n + 1} = -\alpha_{2n}$, $n = 0, 1, 2, \dots$. For convenience, the operator \(B\) can be thought as the infinite matrix \[ B_{M} = \begin{pmatrix} 0 & 1 & & & & \\ -1 & 0 & & & & \\ & & 0 & 1 & & \\ & & -1 & 0 & & \\ & & & & \cdot & \cdot & \\ & & & & \cdot & \cdot & \end{pmatrix}, \] for which \[ \begin{pmatrix} \beta_{0} \\ \beta_{1} \\ \vdots \\ \end{pmatrix} = B_{M} \begin{pmatrix} \alpha_{0} \\ \alpha_{1} \\ \vdots \end{pmatrix}. \] Note that the \(2 \times 2\) matrix \( B_{0} = \begin{pmatrix} 0 & 1 \\ -1 & 0 \end{pmatrix} \) in the upper left corner is repeated down along the diagonal in the infinite matrix \(B_{M}\), and the sequences \(\{\alpha_{i}\}, \{\beta_{i}\}\) are in \(l^{2}\), which is defined to consist of all sequences \(\xi = \{\xi_{i}\}\) such that \(\sum_{i = 0}^{\infty}|\xi_{i}|^{2} < \infty\). The norm in \(l^{2}\) is \[ \|\xi\| = (\sum_{i = 0}^{\infty}|\xi_{i}|^{2})^{1/2}. \] Let \(A'(t) = a(t)B\) and let \(A(t)\) be the restriction of \(A'(t)\) to the unit circle in \(H\). Here \(a(t)\) is as in Example \ref{ex2}. Then the conclusions in Example \ref{ex3} holds for \(A(t)\). The convenience by this symbolic representation will be seen in the next example. \end{example} \begin{example} \label{ex5o} \rm The operator \(B\) in Example \ref{ex5} can be replaced by an operator \(B\), whose symbolic matrix representation \(B_{M} = (b_{ij}) \) is real anti-symmetric in an obvious sense, Kato \cite[Pages 143, 269-270]{Kato}, Taylor and Lay, \cite[Pages 57, 215]{Tay}: \[ B_{M} + B_{M}^{T} = 0 \quad \text{or} \quad b_{ij} + b_{ji} = 0, \quad i, j \in \{0\}\cup \mathbb{N}. \] Here it is assumed that for each \(\{\alpha_{i}\} \in l^{2}\), defined in Example \ref{ex5}, \[ \beta_{i} = \sum_{j = 0}^{\infty}b_{ij}\alpha_{j} \] is convergent for each \(i\), and the sequence \(\{\beta_{i}\}\) lies in \(l^{2}\). Under this assumption, it follows, Taylor and Lay \cite{Tay}, Pages 215-216, that $B_{M}: l^{2} \to l^{2}$ is a continuous mapping. But we need uniform continuity of \( B_{M}\), and so further restrictions on \((b_{ij})\) are required. For this purpose, \( B_{M}\) can be, e.g. \[ B_{M} = \begin{pmatrix} 0 & 2 & 0 & -3 & \cdot & \cdot & \cdot \\ -2 & 0 & 4 & 0 & \cdot & \cdot & \cdot \\ 0 & -4 & 0 & -5 & \cdot & \cdot & \cdot \\ 3 & 0 & 5 & 0 & & & \\ \cdot & & & & & & \\ \cdot & & & & & & \\ \cdot & & & & & & \end{pmatrix}, \] Here the \(4 \times 4\) real anti-symmetric matrix \( B_{0} \) in the upper left corner of the infinite matrix \(B_{M}\) is repeated down along the diagonal in the matrix \(B_{M}\). Note that the upper left corner matrix \(B_{0}\) is real anti-symmetric but it can be of any finite size. Thus, this example provides a large class of examples in infinite dimensions, which might be of interest. This large class of examples can be used in the set \(P\) in Example \ref{ex5'} below. \end{example} The proof of the above example follows that for Example \ref{ex5}, which used the abstract results in Example \ref{ex3}. The details are left to the reader. \begin{example} \label{ex5'} \rm In Example \ref{ex5}, replace $Bf = \sum_{n = 0}^{\infty}\beta_{n}\phi_{n}$, where $\beta_{2n} = \alpha_{2n + 1}$, $\beta_{2n + 1} = - \alpha_{2n}$, by $Bf$ which is one element, or two elements, or \dots , or all elements from $P$, where \begin{align*} P \equiv& \big\{g = \sum_{n = 0}^{\infty} \beta_{n}\phi_{n} \in L^{2}(-\pi, \pi): 0 = (g, f) = \sum_{n = 0}^{\infty}\beta_{n}\alpha_{n},\\ &\text{with \(\beta_{n}\) linear functions of \(\alpha_{0}, \alpha_{1}, \dots, \alpha_{m},\) for some \(m\)} \big\}, \end{align*} as in Example \ref{ex1''}. Then the results proved in Example \ref{ex5} hold. The details of the proof are left to the reader. Here note that \(P \ne \emptyset\), e.g. Example \ref{ex5}, and that each element in \(P\) determines uniquely an axis \(\hat{n}\), a vector in \(L^{2}(-\pi, \pi)\), about which the associated semigroup of operators rotates. For example, the axis \[ \hat{n} = \sum_{i = 0}^{2}n_{i}\phi_{i}, \] where \(\sum_{i = 0}^{2}n_{i}^{2} = 1\), if, for \(f = \sum_{i = 0}^{2}\alpha_{i}\phi_{i} \in D(B)\) with \(\sum_{i = 0}^{2}\alpha_{i}^{2} = 1\), \[ Bf \equiv (n_{2}\alpha_{1} - n_{1}\alpha_{2})\phi_{0} + (-n_{2}\alpha_{0} + n_{0}\alpha_{2})\phi_{1} + (n_{1}\alpha_{0} - n_{0}\alpha_{1})\phi_{2}. \] \end{example} \begin{example} \label{ex6} \rm Replace the operator $A(\cdot)$ in Example \ref{ex2} by \[ A(t) \begin{pmatrix} x \\ y \end{pmatrix} = a(t) B \begin{pmatrix} x \\ y \end{pmatrix} \equiv a(t)\sqrt{1+ y^{2}} \begin{pmatrix} y \\ -x \end{pmatrix}. \] Then the conclusions in Example \ref{ex2} holds for \(A(t)\). \end{example} \noindent\textbf{Remark.} The nonlinear operator \(A(t)\) in Example \ref{ex6} can be obtained by adjusting $A(t) \begin{pmatrix} x \\ y \end{pmatrix}$ to e.g. \[ a(t)\sqrt{1 + x^{2}} \begin{pmatrix} y \\ -x \end{pmatrix}, \quad a(t)\sqrt{1 + y^{4}} \begin{pmatrix} y \\ -x \end{pmatrix}, \dots, \quad \text{or} \quad a(t)h(x, y) \begin{pmatrix} y \\ -x \end{pmatrix}, \] where \(h(x, y)\) are differentiable functions of \(x, y\). Extensions to \(n\) dimensions with multi-valued \(A(t)\), are performed as in Example \ref{ex1''}. The details are left to the reader. \begin{example} \label{ex7} \rm Replace $Bf = \sum_{i = 0}^{\infty}\beta_{i}\phi_{i}$ in Example \ref{ex5} by \[ Bf = \sqrt{\alpha_{1}^{2} + 1} \sum_{i = 0}^{\infty}\beta_{i}\phi_{i}. \] Then the conclusions in Example \ref{ex5} hold for \(A(t)\). \end{example} \noindent\textbf{Remark} The nonlinear operator \(A(t)\) in Example \ref{ex7} can be obtained by adjusting \(Bf\) to \[ \sqrt{1 + \alpha_{2}^{2}}\sum_{i = 0}^{\infty}\beta_{i}\phi_{i}, \quad \sqrt{1 + \alpha_{3}^{2}}\sum_{i = 0}^{\infty} \beta_{i}\phi_{i}, \dots, \quad \text{or} \quad h(\alpha_{0}, \alpha_{1}, \dots, \alpha_{n}) \sum_{i = 0}^{\infty}\beta_{i}\phi_{i}, \] where \(h(\alpha_{0}, \alpha_{1}, \dots, \alpha_{n})\) are differentiable functions of \(\alpha_{0}, \alpha_{1}, \dots, \alpha_{n}\) for each \(n = 0, 1, 2, \dots\). These nonlinearities also apply to Example \ref{ex5o} and Example \ref{ex5'}. The details are left to the reader. \begin{proof}[Proof of Example \ref{ex1}] The proof given here, with parts different from Miyadera \cite[Pages 152-153]{Mi}, is intended to indicate how uniform continuity of \(A\) is not as necessary. \noindent Step 1. It is easy to see that \(A\) is uniformly continuous and dissipative. \noindent Step 2. Let \(u \in D(A)\) and \(w = (u - \lambda Au)\), where \(\lambda > 0\). Simple calculations show that \[ \|w\|^{2} = 1 + \lambda^{2}. \] Note \((Au, u) = 0\) and \((Au, Au) = 1\). So \[ \mathop{\rm Ran}(I - \lambda A) \cap D(A) = \emptyset \] and \[ \mathop{\rm Ran}(I - \lambda A) \subset S \equiv \{u \in \mathbb{R}^{2}: \|u\| = \sqrt{1 + \lambda^{2}}\}. \] Step 3. Claim: $\mathop{\rm Ran}(I - \lambda A) = S$. Let \(h =\sqrt{1 + \lambda^{2}}v\), where \[ v = \begin{pmatrix} \cos(\theta) \\ \sin(\theta) \end{pmatrix} \] is an element in the unit circle in \(\mathbb{R}^{2}\). Here \(0 \le \theta \le 2 \pi\). Suppose \[ u = \begin{pmatrix} \cos(\phi) \sin(\phi) \end{pmatrix} \in D(A) \] is such that $u - \lambda Au = h$, where \(0 \le \phi \le 2 \pi\). Then \begin{gather*} \sqrt{1 + \lambda^{2}} \cos(\phi + \psi) = \cos(\phi) - \lambda \sin(\phi) = \sqrt{1 + \lambda^{2}}\cos(\theta)\,, \\ \sqrt{1 + \lambda^{2}}\sin(\phi + \psi) = \sin(\phi) + \lambda \cos(\phi) = \sqrt{1 + \lambda^{2}}\sin(\theta), \end{gather*} where \(\lambda = \tan(\psi)\). So such a \(u\) exists. \noindent Step 4. Since \(D(A) = \overline{D(A)}\) is a unit circle and \(S\) is a concentric circle of \(D(A)\) with the radius \(\sqrt{1 + \lambda^{2}}\), it follows that \[ d(\mathop{\rm Ran}(I - \lambda A), u) = \sqrt{1 + \lambda^{2}} - 1 \] holds for \(u \in D(A) = \overline{D(A)} \). So \[ \frac{d(\mathop{\rm Ran}(I - \lambda A), u)}{\lambda} = \frac{\sqrt{1 + \lambda^{2}} - 1}{\lambda} \to 0 \] as \(\lambda \to 0 \), uniformly for all \(u \in D(A) = \overline{D(A)} \). \noindent Step 5. Since \(A\) is uniformly continuous (and so \(A\) is embeddedly quasi-demi-closed), equation \eqref{E:C} (or \eqref{E:B}) has a strong solution \(u(t, 0; x_{0}) \). In fact, the strong solution is a classical solution as the following shows: let \(0 < \lambda < \lambda_{0}\), small enough, and \[ u = \begin{pmatrix} u_{1} \\ u_{2} \end{pmatrix} \in D(A), \quad v = u - \lambda Au = \begin{pmatrix} u_{1} - \lambda u_{2} \\ u_{2} + \lambda u_{1} \end{pmatrix}; \] \(\|v\| = \sqrt{1 + \lambda^{2}}\) , \[ w \equiv \frac{v}{\|v\|} = \frac{1}{\sqrt{1 + \lambda^{2}}} \begin{pmatrix} 1 & - \lambda \\ \lambda & 1 \end{pmatrix} u \in D(A), \] and \(\tan(\theta) = \lambda\), where \(\theta\) is the angle between the vector \(v\) and the vector \(u\); \[ u = \frac{1}{\sqrt{1 + \lambda^{2}}} \begin{pmatrix} 1 & \lambda \\ - \lambda & 1 \end{pmatrix} w = \begin{pmatrix} \cos(\theta) & \sin(\theta) \\ - \sin(\theta) & \cos(\theta) \end{pmatrix} w; \] from the proof of Theorem \ref{T:B}, \[ x_{1}^{\lambda}= \begin{pmatrix} \cos(\theta) & \sin(\theta) \\ -\sin(\theta) & \cos(\theta) \end{pmatrix} x_{0}, \dots, x_{n}^{\lambda} = \begin{pmatrix} \cos(n \theta) & \sin(n \theta) \\ - \sin(n \theta) & \cos(n \theta) \end{pmatrix} x_{0}; \] \begin{align*} u(t, 0; x_{0}) &\equiv \lim_{\lambda \to 0}x_{[\frac{t}{\lambda}]}^{\lambda} = \lim_{n \to \infty}x_{n}^{\frac{t}{n}} \\ &= \lim_{\theta \to 0} \begin{pmatrix} \cos(t \frac{\theta}{\tan(\theta)}) & \sin(t \frac{\theta}{\tan(\theta)}) \\ -\sin(t\frac{\theta}{\tan(\theta)}) & \cos(t \frac{\theta}{\tan(\theta)}) \end{pmatrix} x_{0} \\ &=\begin{pmatrix} \cos(t) & \sin(t) \\ -\sin(t) & \cos(t) \end{pmatrix} x_{0} \in D(A)\,. \end{align*} From the proof of Theorem \ref{T:B} about a strong solution, we have \[ u(t, 0; x_{0}) - x_{0} = \int_{0}^{t}Au(\tau, 0; x_{0}) \, d\tau, \] which is differentiable, since \(Au(\tau, 0; x_{0})\) is continuous in \(\tau\); so \(u(t, 0; x_{0})\) is a classical solution; uniqueness follows from dissipativity, as is standard. \end{proof} \begin{proof}[Proof of Example \ref{ex1''}] Let \(0 < \lambda < \lambda_{0}\), small enough, and let \[ u = \begin{pmatrix} u_{1} \\ u_{2} \\ u_{3} \end{pmatrix} \in D(A),\quad v = \begin{pmatrix} v_{1} \\ v_{2} \\ v_{3} \end{pmatrix} \equiv u - \lambda Au = \begin{pmatrix} u_{1} - \lambda n_{3} u_{2} + \lambda n_{2}u_{3} \\ u_{2} + \lambda n_{3} u_{1} - \lambda n_{1} u_{3} \\ u_{3} - \lambda n_{2}u_{1} + \lambda n_{1} u_{2} \end{pmatrix}, \] where \(\sum_{i = 1}^{3} u_{i}^{2} = 1\) and \[ \hat{n} = \begin{pmatrix} n_{1} \\ n_{2} \\ n_{3} \end{pmatrix} \] with \(\sum_{i = 1}^{3}n_{i}^{2} = 1\). Then the terminal point of \(u\) and that of \(v\) are on the same plane, which is perpendicular to the axis, \(\hat{n}\), since \(u_{\parallel} = v_{\parallel}\), where \(u_{\parallel}\), the component of \(u\) on the axis \(\hat{n}\), is given by \[ u_{\parallel} = \hat{n}(u, \hat{n}) = \hat{n}\sum_{i = 1}^{3}u_{i}n_{i}, \] and the component \(v_{\parallel}\), the component of \(v\) on the axis \(\hat{n}\), is given by \[ v_{\parallel} = \hat{n}(v, \hat{n}) = \hat{n}\sum_{i = 1}^{3}v_{i}n_{i} =\hat{n}\sum_{i = 1}^{3}u_{i}n_{i} = u_{\parallel}. \] Here \((u, \hat{n}) = \sum_{i = 1}^{3}u_{i}n_{i}\) is the inner product $\hat{n}[n_{1}u_{1} + \lambda n_{1}n_{3}u_{2} - \lambda n_{1}n_{2}u_{3} n_{2}u_{2} - \lambda n_{2}n_{3}u_{1} + \lambda n_{1}n_{2} u_{3} n_{3}u_{3} + \lambda n_{2}n_{3}u_{1} - \lambda n_{1}n_{3}u_{2}] $. The component \(v_{\perp}\) of \(v\), that is perpendicular to the axis \(\hat{n}\), is given by \begin{align*} v_{\perp} &= v - v_{\parallel} = v - u_{\parallel} \\ &= \begin{pmatrix} (1 - n_{1}^{2})u_{1} & (- \lambda n_{3} - n_{1}n_{2})u_{2} & (- n_{1}n_{3} + \lambda n_{2})u_{3} \\ (- n_{1}n_{2} + \lambda n_{3})u_{1} & (1 - n_{2}^{2})u_{2} & (-n_{2}n_{3} - \lambda n_{1})u_{3} \\ (-n_{1}n_{3} - \lambda n_{2})u_{1} & (-n_{2}n_{3} + \lambda n_{1})u_{2} & (1 - n_{3}^{2})u_{3} \end{pmatrix}. \end{align*} The component \(u_{\perp}\) of \(u\), that is perpendicular to the axis \(\hat{n}\), is given by \[ u_{\perp} = u - u_{\parallel} = u - \hat{n}(u, \hat{n}) = \begin{pmatrix} u_{1} - n_{1}(u, \hat{n}) \\ u_{2} - n_{2}(u, \hat{n}) \\ u_{3} - n_{3}(u, \hat{n}) \end{pmatrix}. \] Note the following calculations: \[ \|u_{\perp}\|^{2} = \|u\|^{2} - \|u_{\parallel}\|^{2} = 1 - (u, \hat{n})^{2}; \] \begin{align*} \|v\|^{2}&= u_{1}^{2} + (\lambda n_{3}u_{2})^{2} + (\lambda n_{2}u_{3})^{2} + 2(-\lambda n_{3}u_{1}u_{2} + \lambda n_{2}u_{1}u_{3} - \lambda^{2}n_{2}n_{3}u_{2}u_{3}) \\ &\quad + u_{2}^{2} + (\lambda n_{3}u_{1})^{2} + (\lambda n_{1}u_{3})^{2} + 2(\lambda n_{3}u_{1}u_{2} - \lambda n_{1}u_{2}u_{3} - \lambda^{2}n_{1}n_{3}u_{1}u_{3}\\ &\quad + u_{3}^{2} + (\lambda n_{2}u_{1})^{2} + (\lambda n_{1}u_{2})^{2} + 2(-\lambda n_{2}u_{1}u_{3} + \lambda n_{1}u_{2}u_{3} - \lambda^{2}n_{1}n_{2}u_{1}u_{3})\\ &= 1 + \lambda^{2}n_{1}^{2}(1 - u_{1}^{2}) + \lambda^{2}n_{2}^{2}(1 - u_{2}^{2}) + \lambda^{2}n_{3}^{2}(1 - u_{3}^{2}) \\ &\quad - 2\lambda^{2}(n_{2}n_{3}u_{2}u_{3} + n_{1}n_{3}u_{1}u_{3} + n_{1}n_{2}u_{1}u_{2}); \end{align*} \begin{align*} \|v_{\parallel}\|^{2} &= \|u_{\parallel}\|^{2} = (u, \hat{n})^{2} \\ &= (n_{1}u_{1})^{2} + (n_{2}u_{2})^{2} + (n_{3}u_{3})^{2} + 2(n_{1}n_{2}u_{1}u_{2} + n_{1}n_{3}u_{1}u_{3} + n_{2}n_{3}u_{2}u_{3}); \end{align*} \begin{align*} \|v_{\perp}\|^{2} &= \|v\|^{2} - \|v_{\parallel}\|^{2} \\ &= 1 + \lambda^{2} - (1 + \lambda^{2})[(n_{1}u_{1})^{2} + (n_{2}u_{2})^{2} + (n_{3}u_{3})^{2}]\\ &\quad - 2(1 + \lambda^{2})(n_{1}n_{2}u_{1}u_{2} + n_{1}n_{3}u_{1}u_{3} + n_{2}n_{3}u_{2}u_{3}) \\ & = (1 + \lambda^{2}) - (1 + \lambda^{2})(u, \hat{n})^{2}\\ &= (1 + \lambda^{2})[1 - (u, \hat{n})^{2}]; \end{align*} \begin{align*} (v, u) &= u_{1}^{2} - \lambda n_{3}u_{2}u_{1} + \lambda n_{2}u_{3}u_{1} + u_{2}^{2} + \lambda n_{3}u_{1}u_{2} - \lambda n_{1}u_{3}u_{2} \\ &\quad + u_{3}^{2} - \lambda n_{2}u_{1}u_{3} + \lambda n_{1}u_{2}u_{3} = 1; \end{align*} \begin{gather*} (v, \hat{n})(u, \hat{n}) = (u, \hat{n})^{2} ;\\ (v_{\perp}, u_{\perp}) = (v, u) - (v, \hat{n})(u, \hat{n}) - (u, \hat{n})(u, \hat{n}) + (u, \hat{n})^{2} = 1 - (u, \hat{n})^{2} ; \\ \cos(\theta) = \frac{(v_{\perp}, u_{\perp})}{\|v_{\perp}\| \|u_{\perp}\|} = \frac{1 - (u, \hat{n})^{2}}{\sqrt{(1 + \lambda^{2})(1 - (u, \hat{n})^{2})} \sqrt{1 - (u, \hat{n})^{2}}} = \frac{1}{\sqrt{1 + \lambda^{2}}}, \end{gather*} for which \(\tan(\theta) = \lambda\); here \(\theta\) is the angle between \(v_{\perp}\) and \(u_{\perp}\). Let \(w_{\perp} \equiv \frac{v_{\perp}}{\|v_{\perp}\|}\|u_{\perp}\| = \frac{1}{\sqrt{1 + \lambda^{2}}}v_{\perp}\) and \(w_{\parallel} \equiv v_{\parallel} = u_{\parallel}\). Then \[ w = M_{\theta}u \equiv \begin{pmatrix} \frac{1 - n_{1}^{2}}{\sqrt{1 + \lambda^{2}}} + n_{1}^{2} & \frac{-\lambda n_{3} - n_{1}n_{2}}{\sqrt{1 + \lambda^{2}}} + n_{1}n_{2} & \frac{\lambda n_{2} - n_{1}n_{3}}{\sqrt{1 + \lambda^{2}}} + n_{1}n_{3} \\ \frac{\lambda n_{3} - n_{1}n_{2}}{\sqrt{1 + \lambda^{2}}} + n_{1}n_{2} & \frac{1 - n_{2}^{2}}{\sqrt{1 + \lambda^{2}}} + n_{2}^{2} & \frac{-\lambda n_{1} - n_{2}n_{3}}{\sqrt{1 + \lambda^{2}}} + n_{2}n_{3} \\ \frac{-\lambda n_{2} - n_{1}n_{3}}{\sqrt{1 + \lambda^{2}}} + n_{1}n_{3} & \frac{\lambda n_{1} - n_{2}n_{3}}{\sqrt{1 + \lambda^{2}}} + n_{2}n_{3} & \frac{1 - n_{3}^{2}}{\sqrt{1 + \lambda^{2}}} + n_{3}^{2} \end{pmatrix} u \] which is equal to {\scriptsize \[ \begin{pmatrix} \cos(\theta) + ( 1 - \cos(\theta))n_{1}^{2} & -n_{3}\sin(\theta) + n_{1}n_{2}(1 - \cos(\theta)) & n_{2}\sin(\theta) + n_{1}n_{3}(1 - \cos(\theta)) \\ n_{3}\sin(\theta) + (1 - \cos(\theta))n_{1}n_{2} & \cos(\theta) + n_{2}^{2}(1 - \cos(\theta)) & -n_{1}\sin(\theta) + n_{2}n_{3}(1 - \cos(\theta)) \\ -n_{2}\sin(\theta) + (1 - \cos(\theta))n_{1}n_{3} & n_{1}\sin(\theta) + n_{2}n_{3}(1 - \cos(\theta)) & \cos(\theta) + n_{3}^{2}(1 - \cos(\theta)) \end{pmatrix} u \]} and this is in $D(A)$. Tedious calculations, left to the reader, show that \(M_{\theta}M_{\phi} = M_{\theta + \phi}\). Since \(M_{0} = I\), the identity matrix, we have the inverse of \(M_{\theta}\) equal to \(M_{-\theta}\), and \[ u = M_{-\theta}w \in D(A), \] where \(M_{-\theta}\) is the rotation about the axis \(\hat{n}\), rotating \(w\) to \(u\) through an angle \(-\theta\), since \(u_{\perp}\) and \(w_{\perp}\) are on the same plane, perpendicular to the axis \(\hat{n}\), \( \|u_{\perp}\| = \|w_{\perp}\|, \|u_{\parallel}\| = \|w_{\parallel}\| \), and \(w_{\perp}\) is \(\theta\) ahead of \(u_{\perp}\) in counterclockwise sense. From the proof of Theorem \ref{T:B}, it follows that \[ x_{1}^{\lambda} = M_{-\theta}x_{0}, x_{2}^{\lambda} = M_{-2\theta}x_{0}, \dots, x_{n}^{\lambda} = M_{-n\theta}x_{0}. \] From from the proof of Theorem \ref{T:B}) it follows that \[ u(t, 0; x_{0}) \equiv \lim_{\lambda \to 0}x_{[\frac{t}{\lambda}]}^{\lambda} = \lim_{n \to \infty}x_{n}^{\frac{t}{n}} = \lim_{\theta \to 0}M_{-t \frac{\theta}{\tan(\theta)}}x_{0} = M_{-t}x_{0} \] which is equal to {\scriptsize \[ \begin{pmatrix} \cos(t) + (1 - \cos(t))n_{1}^{2} & n_{3}\sin(t) + n_{1}n_{2}(1 - \cos(t)) & -n_{2}\sin(t) + n_{1}n_{3}(1 - \cos(t)) \\ -n_{3}\sin(t) + (1 - \cos(t))n_{1}n_{2} & \cos(t) + n_{2}^{2}(1 - \cos(t)) & n_{1}\sin(t) + n_{2}n_{3}(1 - \cos(t)) \\ n_{2}\sin(t) + (1 - \cos(t))n_{1}n_{3} & -n_{1}\sin(t) + n_{2}n_{3}(1 - \cos(t)) & \cos(t) + n_{3}^{2}(1 - \cos(t)) \end{pmatrix} x_{0} \]} in $D(A)$. From the proof of Theorem \ref{T:B} about a strong solution, we have \[ u(t, 0; x_{0}) - x_{0} = \int_{0}^{t}Au(\tau, 0; x_{0}) \, d\tau, \] which is differentiable, since \(Au(\tau, 0; x_{0})\) is continuous in \(\tau\); so \(u(t, 0; x_{0})\) is a classical solution; uniqueness follows from dissipativity, as is standard. The rest of the proof is left to the reader. \end{proof} \begin{proof}[Proof of Example \ref{ex3}] The following proof assumes single-valued \(B\). The proof for multi-valued \(B\) is similar and is left to the reader. \noindent Step 1. Claim that \(A'(t)\) (and then \(A(t)\) ) is dissipative, uniformly for all \(t \in [0, T]\). It suffices to consider the first condition in Example \ref{ex3}. Let \(u, v \in D(A'(t)) = \overline{D(A'(t))}\). Then \[ (u - v, A'(t)u - A'(t)v) = (u, A'(t)u) - (u, A'(t)v)- (v,A'(t)u) + (v, A'(t)v) = 0. \] So \(A'(t)\) is dissipative, uniformly for all \(t \in [0, T]\). Here note \((w, A'(t)w) = 0\) for \(w \in H\). Note that \(A'(t)\) is weakly continuous under the condition \[ (Bu, v) + (u, Bv) = 0 \] for \(u, v \in H\) satisfying the first condition in Example \ref{ex3}. Let \(u_{n} \in D(A'(t)) \to u\) as \(n \to \infty\). Then \[ (A'(t)u_{n}, v) = - (u_{n}, A'(t)v) \to - (u, A'(t)v) = (A'(t)u, v) \] holds for \( v \in H\), uniformly for all \(t \in [0, T]\). So \(A'(t)\) is weakly continuous, uniformly for all \(t \in [0, T]\). \smallskip \noindent Step 2. Let \(u \in D(A(t))\). For \(\lambda > 0\), \begin{align*} \|u - \lambda A(t)u\|^{2} &= (I - \lambda A(t)u, u - \lambda A(t)u) &= \|u\|^{2} - 2(u, \lambda A(t)u) + \lambda^{2}\|A(t)u\|^{2}\\ & = 1 + \lambda^{2} a(t)^{2} &\ge 1 + \lambda^{2}\delta_{0}^{2}\quad (\text{or } \geq 1 + \lambda^{2} \delta_{0}^{2}\delta_{00}^{2})\,. \end{align*} So $\mathop{\rm Ran}(I - \lambda A(t))\cap D(A(t)) = \emptyset$, and \[ \mathop{\rm Ran}(I - \lambda A(t)) \subset S \equiv \{u \in H: \|u\| = \sqrt{1 + \lambda^{2}a(t)^{2}} \ge \sqrt{1 + \lambda^{2}\delta_{0}^{2}} \} \] if \(\|Bu\| = 1\). \noindent Step 3. Let \(u \in D(A(t))\) and \(\lambda > 0\). Let \(v = (u + \lambda A(t)u) \). As in Step 2, $\|v\| = \sqrt{1 + \lambda^{2}a(t)^{2}}$. Let \[ w = \frac{v}{\sqrt{1 + \lambda^{2}a(t)^{2}}}. \] It follows that \(\|w\| = 1\), \(w \in D(A(t))\), and $\|w - v\|= \sqrt{1 + \lambda^{2}a(t)^{2}} - 1$. So \[ d(\overline{D(A(t))}, u + \lambda A(t)u) \le (\sqrt{1 + \lambda^{2}a(t)^{2}} - 1) \] or \[ d(\overline{D(A(t))}, u + \lambda A(t)u) \le (\sqrt{1 + \lambda^{2} \max_{t \in [0, T]} |a(t)|\delta_{000}^{2}} - 1)) \] and \[ \lim_{\lambda \to 0}\frac{d( \overline{D(A(t))}, u + \lambda A(t)u)}{\lambda} = 0 \] uniformly for all \(0 \le t \le T\) and \(u \in \overline{D(A(t))} \). \smallskip \noindent Step 4. Following \cite[Page 151]{Mi}, let \(u \in D(A(t)) = \overline{D(A(t))} \). For \(\epsilon > 0\), there is a \(\lambda' = \lambda'(\epsilon) > 0\), independent of \(u\), such that for all \(0 < \lambda \le \lambda' \le \epsilon\), \[ \frac{d(D(A(t)), u + \lambda A(t)u)}{\lambda} < \epsilon\,. \] It follows from the definition of distance that there is \(u_{\epsilon} \in D(A(t))\) such that \[ \|u_{\epsilon} - (u + \lambda A(t)u)\| < \lambda \epsilon + d(D(A(t)), u + \lambda A(t)u) \le 2\lambda \epsilon\,. \] So, as \(\epsilon \to 0\), we have \(\lambda \to 0\) and \[ \|u_{\epsilon} - u\| \le \lambda \|A(t)u\| + 2\lambda \epsilon \to 0. \] Let \(\nu > 0\). Since \(A(t)\) is uniformly continuous, there is a \(\delta = \delta(\nu) < \nu\) such that \[ \|A(t)u' - A(t)u''\| < \nu \] if \(\|u' - u''\| < \delta\) for \(u' , u'' \in \overline{D(A(t))}\). Here \(\delta\) is independent of \(u' , u''\). Since \(\|u_{\epsilon} - u\| \to 0\) as \(\epsilon \to 0\), there is an \(0 < \epsilon_{0} = \epsilon_{0}(\delta) < \delta < \nu\) such that \[ \|u_{\epsilon} - u\| < \delta < \nu \] and then \[ \|A(t)u_{\epsilon} - A(t)u\| < \nu \] holds for all $0 < \lambda \le \lambda' \le \epsilon \le \epsilon_{0} < \delta < \nu $, uniformly for all \(t \in [0, T]\). Here \(\lambda' = \lambda'(\nu)\) and \(\epsilon_{0} = \epsilon_{0}(\nu) \) are independent of \(u\). It follows that \begin{align*} \frac{d(\mathop{\rm Ran}(I - \lambda A(t)), u)}{\lambda} &\le \lambda^{-1}[\|u_{\epsilon} - \lambda A(t)u_{\epsilon} - u\|] \\ &\le \lambda^{-1}[\|u_{\epsilon} - (u + \lambda A(t)u)\| + \lambda \|A(t)u_{\epsilon} - A(t)u\|] \\ &\le 2\epsilon + \nu < 2\nu + \nu \end{align*} for all $0 < \lambda \le \lambda' \le \epsilon \le \epsilon_{0} < \delta < \nu$, uniformly for \(t \in [0, T]\). So \[ \lim_{\lambda \to 0}\frac{d(\mathop{\rm Ran}(I - \lambda A(t)), u)}{\lambda} = 0 \] uniformly for all \(u \in D(A(t)) = \overline{D(A(t))} \) and \(t \in [0, T]\). \smallskip \noindent Step 5. Suppose \(u = (I - \lambda A(t))^{-1}f\) and \(v = (I - \lambda A(t))^{-1}g\) exist, where \(f, g \in H\) and \(\lambda > 0\). We have \(u - \lambda A(t)u = f\) and \(v - \lambda A(\tau)v = g\). It follows from the dissipativity of \(A(t)\) that \[ \|u - v\| \le \lambda |a(t) - a(\tau)|\|v\| + \|f - g\|\,. \] So \(A(t)\) satisfies (T4) and (T1). \end{proof} \begin{proof}[Proof of Example \ref{ex5}] Step 1. Note that for \(f = \sum_{i = 0}^{\infty}\alpha_{i}\phi_{i} \) and \( g = \sum_{i = 0}^{\infty}\alpha_{i}' \phi_{i}\), \[ (f, g) = \lim_{m \to \infty} \sum_{i = 0}^{m}\alpha_{i}\alpha_{i}' = \sum_{i = 0}^{\infty}\alpha_{i}\alpha_{i}'\,. \] Step 2. It is easy to see that \(A(t)\) is uniformly continuous, uniformly for all \(t \in [0, T]\). \noindent Step 3. Proving dissipativity of \(A(t)\) is left to the reader. Here note that for \(f = \sum_{i = 0}^{\infty}\alpha_{i} \phi_{i} \in L^{2}(-\pi, \pi)\), \(\lim_{i \to \infty}\alpha_{i} = 0 \) by the Parseval's formula \(\sum_{i = 0}^{\infty}\alpha_{i}^{2} = \|f\|^{2}\). The rest follows from the proof of Example \ref{ex3}. \end{proof} \begin{proof}[Proof of Example \ref{ex6}] Note that for \( \begin{pmatrix} x \\ y \end{pmatrix} \in D(A(t))\), \[ \|B \begin{pmatrix} x \\ y \end{pmatrix} \| = \sqrt{x^{2} + 2 y^{2}} = \sqrt{1 + y^{2}} \ge 1 \quad \text{and} \quad \|B \begin{pmatrix} x \\ y \end{pmatrix} \| \le \sqrt{2}. \] Also note that \((A(t)u, u) = 0\) for \(u \in D(A(t))\), and \[ \|A(t)u\| = \|a(t)Bu\| \le \max_{t \in [0, T]}|a(t)|\sqrt{2} \] for \(u \in D(A(t))\). The rest is left to the reader. \end{proof} \begin{proof}[Proof of Example \ref{ex7}] Step 1. To prove that \(A(t)\) is uniformly continuous, uniformly for all \(t \in [0, T\), use the mean value theorem and that for all \(a, b \in \mathbb{R}\), \[ (a + b)^{2} \le 2(a^{2} + b^{2})\,. \] Step 2. Use the proof of Example \ref{ex5} to prove the dissipativity condition. The rest follows from Example \ref{ex3}. The details are left to the reader. \end{proof} \subsection*{Acknowledgements} The author wishes to thank Professor Jerome A. Goldstein at University of Memphis for his teaching and training, which help the author in many ways. The author also wishes to thank Professor Julio G. Dix at Texas State University-San Marcos for taking pains to proofread the manuscript, which makes the article more readable. \begin{thebibliography}{99} \bibitem{Al} S. Altmann, {\em Rotations, Quaternions, And Double Groups}, Oxford Uiversity Press, New York, 1986. \bibitem{Ba} V. Barbu, {\em Semigroups and Differential Equations on Banach Spaces}, 1976, Leyden: Noordhoff. \bibitem{Be} Ph. Benilan, {\em Equations d'Evolution dans an Espace de Banach Quelconque et Applications}, Ph.D. Thesis, Univ. Paris. Orsay, 1972. \bibitem{Che} G. Chen and J. Zhou, {\em Boundary Element Methods}, Academic Press Inc., San Diego, U.S.A., 1992. \bibitem{Co} E. A. Coddington and N. Levinson, {\em Theory of Ordinary Differential Equations}, Tata McGraw-Hil Publishing Co. LTD., New Delhi, 1987. \bibitem{Cr} M. G. Crandall, {\em An introduction to evolution equations governed by accretive operators in Dynamical Systems}, vol{\bf 1}, ed. by L. Cesari, H. K. Hale, and J. P. Lassale, Academic, New York, 1976, 131-165. \bibitem{Cra} M. G. Crandall and T. M. Liggett, {\em Generation of semigroups of nonlinear transformations on general Banach spaces}, Amer. J. Math., {\bf 93} (1971), 256-298. \bibitem{Cran} M. G. Crandall and A. Pazy, {\em Nonlinear evolution equations in Banach spaces}, Israel J. Math., {\bf 11} (1972), 57-94. \bibitem{Crand} M. G. Crandall, {\em A generalized domain for semigroup generators}, Proc. Amer. Math. Soc., {\bf 37} (1973), 434-440. \bibitem{Go} J. A. Goldstein, {\em Semigroups of Nonlinear Operators and Applications}, to appear. \bibitem{Gol} J. A. Goldstein, {\em Semigroups of Linear Operators and Applications}, Oxford University Press, New York, 1985. \bibitem{Ka} J. Kacur, {\em Method of Rothe in Evolution Equations}, Teubner Texte Zur Mathematik, Band bf80, BSB B. G. Teubner Verlagsgessellschaft, Leipzig, 1985. \bibitem{Kat} T. Kato, {\em Nonlinear semigroups and evolution equations}, J. Math. Soc. Japan, {\bf 21} (1967), 508-520. \bibitem{Kato} T. Kato, {\em Purturbation Theory for Linear Operators}, Springer, 1972. \bibitem{Kob} Y. Kobayashi, {\em Difference approximation of Cauchy problems for quasi-dissipative operators and generation of nonlinear semigroups}, J. Math. Soc. Japan, {\bf 27} (1975), 640-665. \bibitem{Ko} Y. Komura, {\em Nonlinear semigroups in Hilbert space}, J. Math. Soc. Japan {\bf 19} (1967), 493-507. \bibitem{La} V. Lakshmikantham and S. Leela, {\em Nonlinear Differential Equations in Abstract Spaces}, Pergaman Press, Oxford, 1981. \bibitem{Lin} C. -Y. Lin, {\em Cauchy problems and applications}, Topological Methods in Nonlinear Analysis, {\bf 15}(2000), 359-368. \bibitem{Lin1} C. -Y. Lin, {\em Time-dependent nonlinear evolution equations}, Differential and Integral Equations, {\bf 15}(2002), 257-270. \bibitem{Lin2} C. -Y. Lin, {\em On generation of \(C_{0}\) semigroups and nonlinear operator semigroups}, Semigroup Forum, {\bf 66}(2003), 110-120. \bibitem{Lin3} C. -Y. Lin, {\em On generation of nonlinear operator semigroups and nonlinear evolution operators}, Semigroup Forum, {\bf 67} (2003), 226-246. \bibitem{Lin4} C. -Y. Lin, {\em Nonlinear evolution equations with time-dependent domain}, submitted. \bibitem{Liu} C. L. Liu, {\em Introduction to Combinatorial Mathematics}, McGraw-Hill, New York, 1968. \bibitem{Mic} R. E. Mickens, {\em Difference Equations, Theory and Applications}, Second Edition, Van Mostrand Reinhold, New York, 1990. \bibitem{Mi} I. Miyadera, {\em Nonlinear Semigroups}, Translations of Mathematical Monographs, {\bf 109}, American Mathematical Society, 1992. \bibitem{Miy} I. Miyadera, {\em Some remarks on semigroups of nonlinear operators}, Tohoku Math. J., {\bf 23} (1971), 245-258. \bibitem{Mo} G. Morosanu, {\em Nonlinear Evolution Equations and Applications}, D. Riedel Publishing Company, 1988. \bibitem{Pa} A. Pazy, {\em Semigroups of Linear Opeartors and Applications to Partial Difffertial Equations}, Springer-Verlag, New York, 1983. \bibitem{Ro} E. Rothe, {\em Zweidimensionale parabolische Randvertaufgaben als Grenfall eindimensionale Renvertaufgaben}, Math. Ann., {\bf 102} (1930), 650-670. \bibitem{So} T. Soler and J. Marshall, {\em A note on frame transformations with applications to geodetic datums}, GPS Solutions, {\bf 7}(2003), 7:23-32. \bibitem{Ta} T. Takahashi, {\em Convergence of difference approximation of nonlinear evolution equations and generation of semigroups}, J. Math. Soc. Japan, {\bf 28} (1976), 96-113. \bibitem{Tay} A. E. Taylor and D. Lay, {\em Introduction to Functional Analysis}, Wiley, 1980. \bibitem{We} U. Westphal, {\em Sur la saturation pour des semi-groups ono lineaires}, C. R. Acad. Sc. Paris, {\bf 274}(1972), 1351-1353. \end{thebibliography} \end{document}