\documentclass[reqno]{amsart} \pdfoutput=1\relax\pdfpagewidth=8.26in\pdfpageheight=11.69in\pdfcompresslevel=9 \usepackage{hyperref} \AtBeginDocument{{\noindent\small {\em Electronic Journal of Differential Equations}, Vol. 2004(2004), No. 96, pp. 1--48.\newline ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu \newline ftp ejde.math.txstate.edu (login: ftp)} \thanks{\copyright 2004 Texas State University - San Marcos.} \vspace{9mm}} \begin{document} \title[\hfilneg EJDE-2004/96\hfil A class of nonlinear differential equations] {A class of nonlinear differential equations on the space of symmetric matrices} \author[V. Dragan, Gerhard Freiling, A. Hochhaus, T. Morozan\hfil EJDE-2004/96\hfilneg] {Vasile Dragan, Gerhard Freiling, Andreas Hochhaus, Toader Morozan} % in alphabetical order \address{Vasile Dragan \hfill\break Institute of Mathematics of the Romanian Academy, P.O. Box 1-764, RO-70700 Bucharest, Romania} \email{vdragan@stoilow.imar.ro} \address{Gerhard Freiling \hfill\break Institut f\"ur Mathematik, Universit\"at Duisburg-Essen, D-47048 Duisburg, Germany} \email{freiling@math.uni-duisburg.de} \address{Andreas Hochhaus \hfill\break Institut f\"ur Mathematik, Universit\"at Duisburg-Essen, D-47048 Duisburg, Germany} \email{hochhaus@math.uni-duisburg.de} \address{Toader Morozan \hfill\break Institute of Mathematics of the Romanian Academy, P.O. Box 1-764, RO-70700 Bucharest, Romania} \email{tmorozan@stolilow.imar.ro} \date{} \thanks{Submitted March 18, 2003. Published August 6, 2004.} \subjclass[2000]{34A34, 34C11, 34C25, 93E03, 93E20} \keywords{Rational matrix differential equations; generalized Riccati \hfill\break\indent differential equations; generalized stabilizability and detectability; comparison theorem; \hfill\break\indent existence and convergence results} \begin{abstract} In the first part of this paper we analyze the properties of the evolution operators of linear differential equations generating a positive evolution and provide a set of conditions which characterize the exponential stability of the zero solution, which extend the classical theory of Lyapunov. In the main part of this work we prove a monotonicity and a comparison theorem for the solutions of a class of time-varying rational matrix differential equations arising from stochastic control and derive existence and (in the periodic case) convergence results for the solutions. The results obtained are similar to those known for matrix Riccati differential equations. Moreover we provide necessary and sufficient conditions which guarantee the existence of some special solutions for the considered nonlinear differential equations as: maximal solution, stabilizing solution, minimal positive semi-definite solution. In particular it turns out that under the assumption that the underlying system satisfies adequate generalized stabilizability, detectability and definiteness conditions there exists a unique stabilizing solution. \end{abstract} \maketitle \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{prop}[theorem]{Proposition} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \newtheorem{remark}[theorem]{Remark} \section{Introduction} The extension of deterministic linear-quadratic control to the stochastic case has been a notable and active research area and has many applications. Let us consider a linear-quadratic stochastic optimal control problems of the form \begin{equation} \label{eq:6.7.1} \begin{aligned} dx(t) & = \big[ A(t) x(t) + B(t) u(t) \big] \, dt + \big[ C(t) x(t) + D(t) u(t) \big] \, dw(t), \\ x(t_0) & = x_0, \end{aligned} \end{equation} \begin{equation} \label{eq:6.7.2} J(u) := E \Big\{ x(t_f)^T Q_f x(t_f) + \int_{t_0}^{t_f} \begin{pmatrix} x(t) \\ u(t) \end{pmatrix}^T \begin{pmatrix} M(t) & L(t) \\ L^T(t) & R(t) \end{pmatrix} \begin{pmatrix} x(t) \\ u(t) \end{pmatrix} dt \Big\} \end{equation} where the state $x(t)$ and the control $u(t)$ are stochastic processes, and where $w(t)$ is a standard Wiener process (Brownian motion) on some probability space $(\Omega, \mathcal{F}, P)$. Moreover, assume here that $u(t)$ is non-anticipating with respect to $w(t)$, $x_0$ is independent of $w(t)$ and that $A$, $B$, $C$, $D$, $M$, $L$ and $R$ are sufficiently smooth functions defined on a right unbounded interval $\mathcal{I} \subseteq \mathbb{R}$ with values in $\mathbb{R}^{n\times n}$, $\mathbb{R}^{n\times m}$, $\mathbb{R}^{m \times n}$, $\mathbb{R}^{m \times m}$, $\mathbb{R}^{n \times n}$, $\mathbb{R}^{n \times m}$ and $\mathbb{R}^{m \times m}$, respectively. For the basic definitions from stochastic control theory see \cite{YoZh99} and -- in the infinite dimensional case \cite{DPZa92}. It can be shown (see \cite{YoZh99}) that the optimal control for (\ref {eq:6.7.1}), (\ref{eq:6.7.2}) is determined by the solution of the following terminal value problem for a generalized Riccati matrix differential equation: \begin{equation} \label{eq:6.7.3} \begin{aligned} \lefteqn{\frac{d}{dt} X(t) + A^T(t)X(t) + X(t)A(t) + M(t) + C^T(t) X(t) C(t)} \\ & \quad {} - \big\{ X(t)B(t) + C^T(t) X(t) D(t) + L(t) \big\} \big\{ R(t) + D^T(t) X(t) D(t) \big\}^{-1} \\ & \qquad \times \big\{ X(t)B(t) + C^T(t) X(t) D(t) + L(t) \big\}^T = 0, \quad X(t_f) = Q_f. \end{aligned} \end{equation} The main goal of this paper is to show that several of the nice properties of standard symmetric matrix Riccati differential equations (which have been summarized in Chapter 4 of \cite{AFIJ03}) remain valid for the solutions of rational matrix equations of the form \begin{equation} \label{eq:6.7.4} \begin{aligned} \lefteqn{\frac{d}{dt} X(t) + A^T(t)X(t) + X(t)A(t) + M(t) + \Pi_1(t)[X(t)]} \\ & \quad {} - \big\{ X(t)B(t) + \Pi_{12}(t)[X(t)] + L(t) \big\} \big\{ R(t) + \Pi_2(t)[X(t)] \big\}^{-1} \\ & \qquad \times \big\{ X(t)B(t) + \Pi_{12}(t)[X(t)] + L(t) \big\}^T = 0 \end{aligned} \end{equation} and also of a slightly more general form. Here and below we assume that \[ \renewcommand{\arraystretch}{1.25} \Pi(t)[X] := \begin{pmatrix} \Pi_1(t)[X] & \Pi_{12}(t)[X] \\ \big( \Pi_{12}(t)[X] \big)^T & \Pi_2(t)[X] \end{pmatrix}, \quad X \in \mathcal{S}^n, \] is a bounded and continuous operator valued function, which is in addition positive, i.e. $X \geq 0$ implies $\Pi(t)[X] \geq 0$. Note that (\ref{eq:6.7.4}) contains, as particular cases, (\ref{eq:6.7.3}) and also many other matrix Riccati differential equations arising in connection with various types of control problems associated to linear control systems both in deterministic and stochastic framework. In particular in (\ref{eq:6.7.3}) $\Pi(t)$ takes the form \[ \Pi(t)[X] = \begin{pmatrix} C(t) & D(t) \end{pmatrix}^T X \begin{pmatrix} C(t) & D(t) \end{pmatrix}. \] Equations of the form (\ref{eq:6.7.4}), the corresponding algebraic equations (in the time-invariant case) and certain special cases have been studied recently (see \cite{ChLZ98}, \cite{DaHi01}, \cite{FrHo6}, \cite{FrHo5}, \cite{HiPr98}, \cite {YoZh99}) and can on account of their properties be considered as generalized Riccati-type equations. In the case where $R $ is invertible, $\Pi_1$ is linear and positive, $\Pi_2 \equiv 0$ and $\Pi_{12} \equiv 0$ (\ref{eq:6.7.4}) coincides with \begin{equation} \label{eq:6.7.5} \begin{aligned} \lefteqn{\frac{d}{dt} X(t) + A^T(t)X(t) + X(t)A(t) + M(t) + \Pi_1(t)[X(t)]} \\ & \quad {} - \big\{ X(t)B(t) + L(t) \big\} R^{-1}(t) \big\{ X(t)B(t) + L(t) \big\}^T = 0. \end{aligned} \end{equation} The latter class of perturbed Riccati differential equations appears among others in control problems with stochastically jumping parameters (see \cite {FCdS98} and \cite{AFIJ03}, Section 6.9); the corresponding algebraic equations and inequalities play also an important role in the application of the Lyapunov-Krasovskii method to linear time-delay systems. First steps concerning the theory of the rational matrix differential equation (\ref{eq:6.7.4}) have been performed by Hinrichsen and Pritchard \cite{HiPr98}, Ait Rami et al.\ \cite{ARCMZ01,ARZh00} and Chen et al.\ \cite {ChLZ98,ChYo01,ChZh00}, who obtained under additional assumptions sufficient conditions for the existence of the solutions of (\ref{eq:6.7.4}) on a given interval for certain initial values. The algebraic equation (\ref{eq:6.7.5}) has been studied in detail by Damm and Hinrichsen \cite{DaHi01} (see also \cite{DaHi03}). Dragan and Morozan (see \cite{preprint00} and the papers cited therein) considered in the case of time-varying coefficients coupled systems of differential equations which can be transformed to the form (\ref{eq:6.7.4}); they investigate properties of stabilizing and bounded solutions of these differential equations and provide a theorem on the existence of the maximal solution - the case of periodic coefficients is studied as well. Moreover \cite{preprint99} contains a rigorous analysis of linearly perturbed matrix Riccati differential equations of the form (\ref{eq:6.7.3}). For a discussion of the infinite-dimensional version of (1.5) see \cite{DPZa92}. The work by Freiling and Hochhaus (see \cite{FrHo4}, \cite {FrHo6}, \cite{FrHo5} or, alternatively, Chapters 6.7 and 6.8 of \cite{AFIJ03}) contains, in particular in the time-invariant case, a unified treatment of the class of equations (\ref{eq:6.7.4}) and of the discrete-time version of these equations. The main goal of the authors of this paper is to investigate several important problems concerning the solutions of time-varying nonlinear differential equations of the form (\ref{eq:6.7.4}) by combining the methods developed in their above-mentioned work (see also \cite{FCdS98}, \cite {FrJa96} and \cite{FJAK96}). Although the differential equations (\ref {eq:6.7.4}) cannot - in contrast to standard Riccati differential equations - be transformed by a nonlinear transformation to a linear system of differential equations, it turns out that it is possible to show that their solutions behave in various aspects similar to the solutions of matrix Riccati equations. In the first part of the paper we shall investigate the properties of the linear evolution operators associated to a class of linear differential equations on the space of symmetric matrices, namely, linear differential equations generating a positive evolution. For such a class of differential equations we provide a set of conditions which characterizes the exponential stability of the zero solution. The stability results presented in Section 2, which are of independent interest, can be considered as extensions of the classical stability results of Lyapunov and their generalizations by Schneider \cite{Schn65} and serve as a basis for the proof of the existence results presented in the second part of this work. Sections 3--6 contain the main results concerning differential equations of the form (\ref{eq:6.7.4}). We provide necessary and sufficient conditions which guarantee the existence of some special solutions for the considered nonlinear differential equations as: maximal solution, stabilizing solution, minimal positive semi-definite solution. In particular it turns out that under the assumption that the system underlying (\ref{eq:6.7.4}) satisfies adequate generalized stabilizability, detectability an definiteness conditions there exists a unique stabilizing solution $X_s$ of (\ref {eq:6.7.4}); moreover, in the case of periodic coefficients, \[ \lim_{t \to - \infty} \big[ X(t) - X_s(t) \big] = 0 \] for any solution $X$ of (\ref{eq:6.7.4}) with $X(t_f) \geq 0$. In Section 7 we indicate that the results derived explicitly in this paper remain valid for more general classes of nonlinear differential equations. \section{Linear differential equations generating positive evolutions} \subsection{Preliminaries} Let $\mathbb{R}^{m\times n}$ be the linear space of real $m \times n $ matrices. On $\mathbb{R}^{m\times n}$ we consider the usual inner product \begin{equation} \langle A, B \rangle := \mathop{\rm{Tr}} B^T A = \mathop{\rm{trace}} B^T A \label{eqno:2.1} \end{equation} for all $A$ and $B\in \mathbb{R}^{m\times n}$; throughout the paper the superscript $T$ stands for the transpose of a matrix or a vector. The norm induced by the inner product \eqref{eqno:2.1} is \begin{equation} \| A \| = \langle A, A \rangle^{1/2}, \label{eqno:2.2} \end{equation} which is known as the Frobenius norm of a matrix. Together with the inner product \eqref{eqno:2.1} and the norm \eqref{eqno:2.2} on $\mathbb{R}^{m\times n}$ we consider also the Euclidian norm (or spectral radius) of the matrix $A$, that is \begin{equation} \| A \|_2 = \sqrt{\lambda_{\max}(A^TA)}. \label{eqno:2.3} \end{equation} Since $\mathbb{R}^{m\times n}$ is a finite dimensional linear space it follows that the norms \eqref{eqno:2.2} and \eqref{eqno:2.3} are equivalent. Let $\mathcal{S}^n \subset \mathbb{R}^{n \times n}$ be the linear subspace of symmetric $n \times n$-matrices. It is obvious that the inner product \eqref{eqno:2.1} induces on $\mathcal{S}^n$ the structure of a Hilbert space while the norm \eqref{eqno:2.3} induces on $\mathcal{S}^n$ the structure of a Banach space. Let $\mathcal{S}^n_{+} := \{ S \in \mathcal{S}^n \mid S \geq 0 \}$ be the convex cone of positive semi-definite matrices. Then $\mathcal{S}^n_{+}$ induces an order on $\mathcal{S}^n$: $X \geq Y$ if and only if $X-Y\in \mathcal{S}^n_{+}$. An operator $T \colon \mathcal{S}^n \to \mathcal{S}^m$ is a positive operator if $T(\mathcal{S}^n_{+}) \subset \mathcal{S}^m_{+}$; in this case we shall write $T \geq 0$. The following properties of the positive linear operators will be used often in the paper. \begin{lemma} \label{lm2.1} If $T \colon \mathcal{S}^n \to \mathcal{S}^n$ is a linear operator then the following assertions hold: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item $T\geq 0$ if and only if $T^* \geq 0$, $T^*$ being the adjoint operator of $T$ with respect to the inner product \eqref{eqno:2.1}. \item If \begin{equation} \| T \| := \sup \{ \| TX \|_2: X \in \mathcal{S}^n, \|X\|_2 \leq 1 \} \label{eqno:2.4} \end{equation} then $T \geq 0$ implies $\| T \| = \|T I_n \|_2$ where $I_n$ is the identity $n \times n$-matrix. \end{enumerate} \end{lemma} \subsection{Linear differential equations generating positive evolutions} We note that the continuity assumptions made on the coefficients of the differential equations appearing in this paper are not essential -- most results remain valid for locally integrable and bounded coefficients if we consider solutions of \eqref{eq:6.7.4} in the sense of Caratheodory. Let $\mathcal{L} \colon \mathcal{I} \to \mathcal{B}(\mathcal{S}^n)$ be a continuous operator valued function where $\mathcal{I} \subseteq \mathbb{R}$ is an interval and $\mathcal{B}(\mathcal{S}^n)$ denotes the space of linear operators defined on $\mathcal{S}^n$ with values in $\mathcal{S}^n$. On $\mathcal{S}^n$ we consider the linear differential equation \begin{equation} \frac{d}{dt} S(t) = \mathcal{L}(t)[S(t)]. \label{eqno:2.5} \end{equation} For each $t_0\in \mathcal{I}$ and $H \in \mathcal{S}^n$ we denote by $S(\cdot,t_0,H)$ the solution of \eqref{eqno:2.5} with the initial value $S(t_0,t_0,H) = H$. Based on known uniqueness arguments we obtain that the function $H \to S(\cdot,t_0,H)$ is linear. For each $(t,t_0)$ the linear operator $T(t,t_0) \colon \mathcal{S}^n \to \mathcal{S}^n$, $T(t,t_0)[H] := S(t,t_0,H)$ is well-defined. $T(\cdot,\cdot)$ will be termed the \textbf{linear evolution operator} on $\mathcal{S}^n$ defined by the linear differential equation \eqref{eqno:2.5}. The next proposition summarizes well known properties of linear evolution operators: \begin{prop} \label{prop2.2} We have: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item $T(t,s) T(s,\tau) = T(t,\tau)$ for all $t,s,\tau \in \mathcal{I}$. \item $T(t,t) = I_{\mathcal{S}^n}$ (the identity operator on $\mathcal{S}^n$). \item $t \mapsto T(t,s)$ satisfies $\frac{d}{dt} T(t,s) = \mathcal{L}(t) T(t,s)$. \item $s \mapsto T^*(t,s)$ satisfies $ \frac{d}{ds} T^*(t,s) + \mathcal{L}^*(s) T^*(t,s) = 0$. \item If $\mathcal{L}(t) \equiv \mathcal{L}$ then $$ T(t,s) = e^{\mathcal{L}(t-s)} := \sum_{k=0}^{\infty} \frac{(t-s)^k}{k!} \mathcal{L}^k, $$ where $\mathcal{L}^k$ is the $k$-th iteration of the operator $\mathcal{L}$. \item If there exists $\theta > 0$ such that $\mathcal{L}(t+\theta) = \mathcal{L}(t)$ for all $t \in \mathbb{R}$, then $T(t+\theta, s+\theta) = T(t,s)$ for all $t, s\in \mathbb{R}$. \end{enumerate} \end{prop} Let $A \colon \mathcal{I} \to \mathbb{R}^{n\times n}$ be a continuous function. Set \begin{equation} \mathcal{L}_A(t)[S] := A(t) S + S A^T(t) \quad \mbox{for all} \quad S \in \mathcal{S}^n. \label{eqno:2.6} \end{equation} By direct computation we may check that the linear evolution operator defined by the linear differential equation \eqref{eqno:2.5} in the particular case of \eqref{eqno:2.6} is given by \begin{equation} T_A(t,t_0)[H] = \Phi_A(t,t_0) H \Phi_A^T(t,t_0), \label{eqno:2.7} \end{equation} where $\Phi_A(t,t_0)$ is the transition matrix of the linear differential equation \[ \dot x(t) = A(t)x(t). \] The corresponding adjoint operators $\mathcal{L}_A^*(t)$ and $T_A^*(t,t_0)$ are given here by \[ \mathcal{L}_A^*(t)S = A^T(t) S + S A(t) \] and \begin{equation} T_A^*(t,t_0)[H] = \Phi_A^T(t,t_0) H \Phi_A(t,t_0). \label{eqno:2.8} \end{equation} \begin{remark} \label{rmk2.3} \rm >From \eqref{eqno:2.7} and \eqref{eqno:2.8} it is seen that both $T_A(t,t_0)$ and $T_A^*(t,t_0)$ are positive operators. \end{remark} Motivated by the above remark we define \begin{definition} \label{def2.4} \rm The linear differential equation \eqref{eqno:2.5} (or equivalently, the linear operator valued function $\mathcal{L}$) \textbf{generates a positive evolution} if $$ T(t,t_0) \geq 0 \quad \mbox{for all} \quad t,t_0 \in \mathcal{I}, \; t \geq t_0. $$ \end{definition} The next result shows that the property that generates a positive evolution is preserved under some perturbations: \begin{prop} \label{prop2.5} Let $\mathcal{L}(t) := \mathcal{L}_0(t) + \Pi_1(t)$ be such that $\mathcal{L}_0(t) $ generates a positive evolution and $\Pi_1(t) \colon \mathcal{S}^n \to \mathcal{S}^n$ is a positive linear operator. Assume additionally that $t \mapsto \mathcal{L}_0(t)$ and $t \mapsto \Pi_1(t)$ are continuous operator valued functions. Under these assumptions the operator valued function $\mathcal{L}$ generates a positive evolution. \end{prop} \begin{proof} Let $T_0(t,t_0)$ be the linear evolution operator defined on $\mathcal{S}^n$ by the linear differential equation $$ \frac{d}{dt} S(t) = \mathcal{L}_0(t)[S(t)]. $$ By assumption it follows that $T_0(t,t_0) \geq 0$ for all $t \geq t_0$, $t,t_0 \in \mathcal{I}$. Let $S(t) = S(t,t_0,H)$ be the solution of the initial value problem $$ \frac{d}{dt} S(t) = \mathcal{L}(t)[S(t)], \quad S(t_0,t_0,H) = H, \quad H \geq 0. $$ We have to show that $S(t) \geq 0$ for $t \geq t_0$, $t \in \mathcal{I}$. The solution $S(t)$ admits the representation $$ S(t) = T_0(t,t_0) H + \int_{t_0}^t T_0(t,s) \Pi_1(s)[S(s)] \, ds. $$ Let $\{ S_k(t) \}_{k \geq 0}$ be the sequence of Volterra approximations defined as \begin{gather*} S_0(t) := T_0(t,t_0) H, \quad t \geq t_0, \\ S_k(t) := T_0(t,t_0) H + \int_{t_0}^t T_0(t,s) \Pi_1(s)[S_{k-1}(s)] \, ds, \quad t \geq t_0, \quad k \geq 1. \end{gather*} It is known that \begin{equation} S(t) = \lim_{k \to \infty} S_k(t), \quad t \geq t_0. \label{eqno:2.9} \end{equation} On the other hand we obtain inductively that $S_k(t) \geq 0$, hence, from \eqref{eqno:2.9} we get $S(t) \geq 0$. Since $T(t,t_0)H = S(t,t_0,H)$ it follows that $T(t,t_0) \geq 0$ and thus the proof is complete. \end{proof} From Proposition \ref{prop2.5} and Remark \ref{rmk2.3} we obtain the following result. \begin{corollary} \label{cor2.6} Let $\mathcal{L}_{A, \Pi_1}(t) \colon \mathcal{S}^n \to \mathcal{S}^n$ be defined by \begin{equation} \mathcal{L}_{A, \Pi_1}(t)[S] := A(t) S + S A^T(t) + \Pi_1(t)[S], \label{eqno:2.10} \end{equation} where $A \colon \mathcal{I} \to \mathbb{R}^{n\times n}$, $\Pi_1 \colon \mathcal{I} \to \mathcal{B}(\mathcal{S}^n)$ are continuous functions and $\Pi_1(t) \geq 0$ for all $t \in \mathcal{I}$. Then $\mathcal{L}$ generates a positive evolution. \end{corollary} \begin{remark} \label{rmk2.7} \rm The adjoint operator of $\mathcal{L}_{A, \Pi_1}(t)$ defined by \eqref{eqno:2.10} is given by \begin{equation} \mathcal{L}_{A, \Pi_1}^*(t)[S] = A^T(t) S + S A(t) + \Pi_1^*(t)[S] \quad \mbox{for all} \quad S \in \mathcal{S}^n. \label{eqno:2.11} \end{equation} \end{remark} The operators \eqref{eqno:2.10} and \eqref{eqno:2.11} contain as particular cases the Lyapunov-type operators studied starting with pioneering works of Wonham \cite{Wonh68,Wonh70} in connection with the problem of exponential stability of the zero solution of a linear It\^{o} differential equation (see also \cite[Section 4.6]{Reid72}). For $A$ and $\Pi_1$ not depending on $t$ the operators \eqref{eqno:2.10} and \eqref{eqno:2.11} were also considered by Damm and Hinrichsen \cite{DaHi01}, Freiling and Hochhaus \cite{FrHo5} (see also \cite{AFJ94}, \cite{DPZa92}, \cite{FCdS98}, \cite{FrJa96}, \cite{FJAK96}). The next result can be proved following step by step the proof of \cite[Proposition 4.4]{preprint99}. \begin{prop} \label{prop2.8} Let $\mathcal{L}$ be an operator valued function which generates a positive evolution on $\mathcal{S}^n$. If $t \mapsto \| \mathcal{L}(t) \|$ is bounded then there exist $\alpha, \beta > 0$ such that \[ T(t,t_0) I_n \geq \beta e^{-\alpha(t-t_0)} I_n \qquad \mbox{and} \qquad T^*(t,t_0)I_n \geq \beta e^{-\alpha(t-t_0)}I_n \] for all $t \geq t_0$, $t,t_0 \in \mathcal{I}$. \end{prop} \subsection{Exponential stability} In this subsection $\mathcal{I} \subset \mathbb{R}$ is a right unbounded interval and $\mathcal{L} \colon \mathcal{I} \to \mathcal{B}(\mathcal{S}^n)$ is a continuous operator valued function. \begin{definition} \label{def2.9} \rm We say that $\mathcal{L}$ \textbf{generates an exponentially stable evolution} if there exist $\alpha, \beta > 0$ such that $$ \| T(t,t_0) \| \leq \beta e^{-\alpha(t-t_0)} \quad \mbox{for} \quad t \geq t_0, \; t, t_0 \in \mathcal{I}. $$ \end{definition} Our goal is to provide some necessary and sufficient conditions for exponential stability in the case when $\mathcal{L}$ generates a positive evolution. \begin{theorem} \label{thm2.10} Let $\mathcal{L} \colon \mathbb{R}_+ \to \mathcal{B}(\mathcal{S}^n)$ be a bounded and continuous operator valued function such that $\mathcal{L}$ generates a positive evolution. Then the following are equivalent: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item $\mathcal{L}$ defines an exponentially stable evolution. \item There exists some $\delta > 0$ such that $$ \int_{t_0}^t \| T(t,s) \| \, ds \leq \delta \quad \mbox{for all} \quad t \geq t_0, \; t_0 \in \mathbb{R}_+. $$ \item There exists some $\delta > 0$ being independent of $t$ and $t_0$ with \begin{equation} \int_{t_0}^t T(t,s) I_n \, ds\leq \delta I_n \quad \mbox{for all} \quad t \geq t_0, \; t_0 \in \mathbb{R}_+. \label{eqno:2.12} \end{equation} \end{enumerate} \end{theorem} \begin{proof} (i) $\Rightarrow$ (ii) obvious (from definition). (ii) $\Rightarrow$ (iii) follows from $0 \leq T(t,s) I_n \leq \| T(t,s) \| I_n$. It remains to prove (iii) $\Rightarrow$ (i). Therefore let $H \colon \mathbb{R}_+ \to \mathcal{S}^n$ be a bounded and continuous function. This means that there exist $\gamma_1, \gamma_2 \in \mathbb{R}$ such that $\gamma_1 I_n \leq H(s) \leq \gamma_2 I_n$ for $s\geq 0$. Since $T(t,s)$ is a positive operator we obtain $$ \gamma_1 T(t,s) I_n \leq T(t,s) H(s) \leq \gamma_2 T(t,s) I_n, $$ hence $$ \gamma_1 \int_0^t T(t,s) I_n \, ds \leq \int_0^t T(t,s)H(s) \, ds \leq \gamma_2 \int_0^t T(t,s) I_n \, ds. $$ Using Proposition \ref{prop2.8} and inequalities \eqref{eqno:2.12} we get $$ \tilde{\gamma}_1 I_n \leq \int_0^t T(t,s)H(s) \, ds \leq \tilde{\gamma}_2 I_n \quad \mbox{for} \quad t\geq 0 $$ with some constants $\tilde{\gamma}_1, \tilde{\gamma}_2 \in \mathbb{R}$. So we have obtained that $t \to \int_0^t T(t,s)H(s) \, ds$ is bounded for arbitrary bounded and continuous functions $H(s)$. Applying Perron's theorem (see \cite{Hala66}) we conclude that there exist $\alpha, \beta > 0$ such that $$ \| T(t,t_0) \| \leq \beta e^{-\alpha(t-t_0)} \quad \mbox{for all} \quad t \geq t_0 \geq 0, $$ and the proof is complete. \end{proof} Throughout the paper a function $H \colon \mathcal{I} \to \mathcal{S}^n$ is termed \textbf{uniformly positive} and we write $H(t) \gg 0$ if there is a constant $c > 0$ such that $H(t) \geq c I_n > 0$ for all $t \in \mathcal{I}$. The next theorem is a time-varying version of results that have been summarized e.g.\ in \cite{DaHi01} and \cite{FCdS98} and generalizes classical results from Lyapunov theory which are in the time-invariant case essentially due to Schneider \cite{Schn65}. \begin{theorem} \label{thm2.11} Let $\mathcal{L} \colon \mathcal{I} \to \mathcal{B}(\mathcal{S}^n)$ be a continuous operator valued function such that $\mathcal{L}$ generates a positive evolution and $t \mapsto \| \mathcal{L}(t) \|$ is bounded. Then the following are equivalent: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item $\mathcal{L}$ defines an exponentially stable evolution. \item There exist $\alpha, \beta_1 > 0$ such that $$ \| T^*(t,t_0) \| \leq \beta_1 e^{-\alpha(t-t_0)} \quad \mbox{for all} \quad t \geq t_0, \; t, t_0 \in \mathcal{I}. $$ \item There exists $\delta > 0$ being independent of $t$ with $$ \int_t^{\infty} T^*(s,t) I_n \, ds \leq \delta I_n \quad \mbox{for all} \quad t \in \mathcal{I}. $$ \item The differential equation \begin{equation} \frac{d}{dt} K(t) + \mathcal{L}^*(t)[K(t)] + I_n = 0 \label{eqno:2.13} \end{equation} has a bounded and uniformly positive solution $K \colon \mathcal{I} \to \mathcal{S}^n$. \item For every continuous and bounded function $H \colon \mathcal{I} \to \mathcal{S}^n$ with $H(t)\gg 0$ the differential equation \begin{equation} \frac{d}{dt} K(t) + \mathcal{L}^*(t)[K(t)] + H(t) = 0 \label{eqno:2.14} \end{equation} has a bounded and uniformly positive solution. \item There is a continuous and bounded function $H \colon \mathcal{I} \to \mathcal{S}^n$, $H(t)\gg 0$, such that the corresponding differential equation \eqref{eqno:2.14} has a bounded solution $\hat K(t)\geq 0$, $t \in \mathcal{I}$. \item There exists a bounded $C^1$-function $K \colon \mathcal{I} \to \mathcal{S}^n$ with bounded derivative and $K(t) \gg 0$ which fulfills the linear differential inequality \begin{equation} \frac{d}{dt} K(t) + \mathcal{L}^*(t)[K(t)] \ll 0. \label{eqno:2.15} \end{equation} \end{enumerate} \end{theorem} \begin{proof} First we prove (i) $\Leftrightarrow$ (ii). Let $\|| T |\|$ be the norm of the operator $T$ induced by \eqref{eqno:2.2} and $\| T \|$ be the norm defined by \eqref{eqno:2.4}. Since the norms defined by \eqref{eqno:2.2} and \eqref{eqno:2.3} are equivalent it follows that there exist positive constants $c_1, c_2$ such that $$ c_1 \| T(t,t_0) \| \leq \|| T(t,t_0) \|| \leq c_2 \| T(t,t_0) \| $$ and $$ c_1 \| T^*(t,t_0) \| \leq \|| T^*(t,t_0) \|| \leq c_2 \| T^*(t,t_0)\|. $$ Taking into account that $\|| T(t,t_0) \|| = \|| T^*(t,t_0) \||$ one obtains that $$ \frac{c_1}{c_2} \, \| T(t,t_0) \| \leq \| T^*(t,t_0)\| \leq \frac{c_2}{c_1}\, \| T(t,t_0) \| $$ which shows that (i) $\Leftrightarrow$ (ii). The implication (ii) $\Rightarrow$ (iii) is obvious. We prove now (iii) $\Rightarrow$ (iv). Let $K_0(t) := \int_t^{\infty} T^*(s,t) I_n \, ds$. From (iii) it follows that $K_0$ is well defined and bounded. Applying Proposition \ref{prop2.8} we deduce that there exists $\delta_1 > 0$ such that $K_0(t) \geq \delta_1 I_n$. By direct computation, based on Proposition \ref{prop2.2}, we get that $K_0$ is differentiable and it solves equation \eqref{eqno:2.13}. We prove now (iv) $\Rightarrow$ (iii). Let $K \colon \mathcal{I} \to \mathcal{S}^n$ be a bounded and uniformly positive solution of equation \eqref{eqno:2.13}. For all $t \leq \tau$, $t,\tau \in \mathcal{I}$ we can write the representation formula $$ K(t) = T^*(\tau,t) K(\tau) + \int_t^{\tau} T^*(s,t)I_n \, ds. $$ Since $T^*(\tau,t)$ is a positive operator and $K$ is a positive and bounded function, we conclude that $$ 0 \leq \int_t^{\tau} T^*(s,t) I_n \, ds \leq K(t) \leq \delta I_n $$ for all $t \leq \tau$, $t, \tau \in \mathcal{I}$, where $\delta > 0$ does not depend on $t$ and $\tau$. Taking the limit for $\tau \to \infty$ we obtain that (iii) holds. We prove now (iii) $\Rightarrow$ (v). Let $H \colon \mathcal{I} \to \mathcal{S}^n$ be a continuous function with the property $0 < \nu_1 I_n \leq H(s) \leq \nu_2 I_n$ for all $s \in \mathcal{I}$ with $\nu_1, \nu_2$ being positive constants. Since $T^*(s,t)$ is a positive operator we obtain $$ \nu_1 T^*(s,t) I_n \leq T^*(s,t) H(s)\leq \nu_2 T^*(s,t) I_n, $$ which leads to $$ \nu_1 \int_t^{\tau} T^*(s,t) I_n \, ds \leq \int_t^{\tau} T^*(s,t) H(s) \, ds \leq \nu_2 \int_t^{\tau} T^*(s,t) I_n \, ds\,. $$ Taking the limit for $\tau \to \infty$ and invoking (iii), we deduce that for $t$ in $\mathcal{I}$, $$ \nu_1 \int_t^{\infty} T^*(s,t) I_n \, ds \leq \int_t^{\infty} T^*(s,t) H(s) \, ds \leq \nu_2 \int_t^{\infty} T^*(s,t) I_n \, ds \leq \nu_2 \delta I_n\,. $$ We define $\tilde K(t) := \int_t^{\infty} T^*(s,t) H(s) \, ds$. Applying Proposition \ref{prop2.8} we get $$ \nu_1 \frac{\beta}{\alpha} I_n \leq \tilde{K}(s) \leq \nu_2 \delta I_n \quad \mbox{for all} \quad t \in \mathcal{I}. $$ Using Proposition \ref{prop2.2} we obtain $$ \tilde{K}(t) = T^*(\tau,t) \tilde{K}(\tau) + \int_t^{\tau} T^*(s,t) H(s) \, ds, $$ which shows that $\tilde{K}$ is a solution of \eqref{eqno:2.14} and thus (v) is fulfilled. (v) $\Rightarrow$ (vi) is obvious. We prove now (vi) $\Rightarrow$ (ii). Let $\hat{K} \colon \mathcal{I} \to \mathcal{S}^n$ be a bounded solution of equation \eqref{eqno:2.14} such that $\hat{K}(t) \geq 0$, $t \in \mathcal{I}$. For each $t < \tau$, $t, \tau \in \mathcal{I}$, we write $$ \hat K(t) = T^*(\tau,t) \hat{K}(\tau) + \int_t^{\tau} T^*(s,t) H(s) \, ds. $$ Since $T^*(\tau, t)\geq 0$ and $0 \leq \hat K(t)\leq \rho I_n$ for $\rho > 0$ not depending on $t$, we deduce that $$ 0 \leq \int_t^{\tau} T^*(s,t) H(s) \, ds \leq \hat{K}(t) \leq \rho I_n, $$ which leads to $0 \leq \int_t^{\infty} T^*(s,t) H(s) \, ds \leq \rho I_n$ for $t \in \mathcal{I}$. Based on Proposition \ref{prop2.8} we deduce that there exists $\rho_0 > 0$ such that \begin{equation} \rho_0 I_n \leq \int_t^{\infty} T^*(s,t) H(s) \, ds =: \tilde{K}(t) \leq \rho I_n. \label{eqno:2.16} \end{equation} Let $t_0 \in \mathcal{I}$ be fixed and let us define $G(t) := T^*(t,t_0) \tilde{K}(t)$ for all $t \geq t_0$. It follows that $G(t) = \int_t^{\infty} T^*(s,t_0) H(s) \, ds$. We have directly \begin{equation} \frac{d}{dt} G(t) = - T^*(t,t_0) H(t) \quad \mbox{for all} \quad t \geq t_0. \label{eqno:2.17} \end{equation} On the other hand there exist positive constants $\nu_i$, $i = 1, 2$, such that $$ \nu_1 I_n \leq H(t)\leq \nu_2 I_n, $$ which leads to \begin{equation} \nu_1 T^*(t,t_0) I_n \leq T^*(t,t_0) H(t) \leq \nu_2 T^*(t,t_0) I_n. \label{eqno:2.18} \end{equation} From \eqref{eqno:2.17} and \eqref{eqno:2.18} we infer $$ \frac{d}{dt} G(t) \leq - \nu_1 T^*(t,t_0) I_n \quad \mbox{for all} \quad t \geq t_0. $$ On the other hand from \eqref{eqno:2.16} we get \begin{equation} \rho_0 T^*(t,t_0) I_n \leq G(t) \leq \rho T^*(t,t_0) I_n, \label{eqno:2.19} \end{equation} which leads to $\frac{d}{dt} G(t) \leq - \alpha G(t)$ where $\alpha = \frac{\nu_1}{\rho}$. By a standard argument we get $G(t) \leq e^{-\alpha(t-t_0)} G(t_0)$ for all $t \geq t_0$. Using again \eqref{eqno:2.19} we deduce that $$ T^*(t,t_0) I_n \leq \frac{\rho}{\rho_0} e^{-\alpha(t-t_0)} I_n. $$ Hence $\|T^*(t,t_0) I_n\|_2 \leq \frac{\rho}{\rho_0} e^{-\alpha(t-t_0)}$, therefore (ii) follows from Lemma \ref{lm2.1}, (ii). (iv) $\Rightarrow$ (vii) is obvious since any bounded and positive solution of \eqref{eqno:2.13} is also a solution of \eqref{eqno:2.15}. (vii) $\Rightarrow$ (vi). If $K \colon \mathcal{I} \to \mathcal{S}^n$, $K(t) \gg 0$, is a bounded $C^1$-function with bounded derivative which solves \eqref{eqno:2.15}, we define $H(t) := - \frac{d}{dt} K(t) - \mathcal{L}^*(t)[K(t)]$. Therefore $K$ is a bounded and positive semi-definite solution of \eqref{eqno:2.14} corresponding to this particular choice of $H$ and the proof ends. \end{proof} \begin{corollary} \label{coro2.12} Let $\mathcal{L} := \mathcal{L}_{A, \Pi_1}$ be defined as in \eqref{eqno:2.10} with $\Pi_1(t) \geq 0$. If $\mathcal{L}$ defines an exponentially stable evolution then the zero state equilibrium of the linear differential equation $$ \dot x(t) = A(t)x(t) $$ is exponentially stable. \end{corollary} \begin{proof} Using the equivalence (i) $\Leftrightarrow$ (vii) of Theorem \ref{thm2.11} we deduce that there exists a bounded $C^1$-function $X \colon \mathcal{I} \to \mathcal{S}^n$ with bounded derivative and $ X(t)\gg 0$ which satisfies \eqref{eqno:2.15}. Then it follows that $$ \frac{d}{dt} X(t) + A^T(t) X(t) +X(t) A(t) \ll 0, $$ which guarantees the exponential stability of the zero solution of the linear differential equation defined by $A(t)$ and the proof ends. \end{proof} The next result extends to the case of operators generating positive evolutions, a well known result concerning the uniqueness of bounded solutions of Lyapunov equations. \begin{theorem} \label{thm2.13} Let $\mathcal{L} \colon \mathcal{I} \to \mathcal{B}(\mathcal{S}^n)$ be a continuous and bounded operator valued function which generates a positive and exponentially stable evolution. Then: \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item For any bounded and continuous function $H \colon \mathcal{I} \to \mathcal{S}^n$ the differential equation \eqref{eqno:2.14} has a unique solution bounded on $\mathcal{I}$. This solution has the representation \begin{equation} \tilde{K}(t) = \int_t^{\infty} T^*(s,t) H(s) \, ds \quad \mbox{for} \quad t \in \mathcal{I}, \label{eqno:2.20} \end{equation} $T(s,t)$ being the linear evolution operator defined by the linear differential equation \eqref{eqno:2.5}. \item If $H(s) \geq 0$ then $\tilde{K}(t) \geq 0$, and if $H(s) \gg 0$ then $\tilde{K}(t) \gg 0$. \item If $\mathcal{L}$, $H$ are periodic functions with period $\theta$, then $\tilde{K}$ is a $\theta$-periodic function, and if $\mathcal{L}(t) \equiv \mathcal{L}$ and $H(t) \equiv H$, then $\tilde{K}(t) \equiv \tilde{K}$ is constant with $$ \tilde K = - (\mathcal{L}^*)^{-1}[H] = \int_0^{\infty} e^{\mathcal{L}^*s} H \, ds\,. $$ \end{enumerate} \end{theorem} \begin{proof} (a) Since $\mathcal{L}(t)$ defines a stable evolution $\int_t^{\infty} T^*(s,t) H(s) \, ds $ is convergent for each $t \in \mathcal{I}$. Hence $\tilde{K}(t) = \int_t^{\infty} T^*(s,t)H(s) \, ds$ is well defined. For each $\tau \geq t$, $\tau, t\in \mathcal{I}$ we write $$ \tilde{K}(t) = T^*(\tau, t) \tilde{K}(\tau) + \int_t^{\tau} T^*(s,t) H(s) \, ds. $$ Based on Proposition \ref{prop2.2} we conclude that $\tilde{K}$ is differentiable and it solves \eqref{eqno:2.14}. Let now $\hat{K}$ be another bounded solution of \eqref{eqno:2.14}. We may write \begin{equation} \hat{K}(t) = T^*(\tau, t) \hat{K}(\tau) + \int_t^{\tau} T^*(s,t) H(s) \, ds. \label{eqno:2.21} \end{equation} Based on exponential stability and boundedness of $\hat{K}$ we deduce $$ \lim_{\tau \to \infty} T^*(\tau, t) \hat{K}(\tau) = 0. $$ Taking the limit for $\tau \to \infty$ in \eqref{eqno:2.21} we get $$ \hat{K}(t) = \int_t^{\infty} T^*(s,t) H(s) \, ds = \tilde{K}(t), $$ and thus the uniqueness of the bounded solution is proved. (b) If $H(s) \geq 0$, then $T^*(s,t) H(s) \geq 0$, which implies $\tilde{K}(t) \geq 0$. If $H(s) \geq \mu I_n$, then, applying Proposition \ref{prop2.8}, we obtain that $\tilde{K}(t) \geq \tilde \mu I_n$ for all $t \in \mathcal{I}$. (c) Let us suppose that there exists $\theta > 0$ such that $\mathcal{L}(t+\theta) = \mathcal{L}(t)$ and $H(t+\theta) = H(t)$ for all $t \in \mathcal{I}$. We have \begin{align*} \tilde{K}(t+\theta) & = \int_{t+\theta}^{\infty} T^*(s,t+\theta) H(s) \, ds = \int_t^{\infty} T^*(\sigma+\theta, t+\theta) H(\sigma+\theta) \, d\sigma \\ & = \int_t^{\infty} T^*(\sigma, t) H(\sigma) \, d\sigma = \tilde{K}(t), \end{align*} hence $\tilde{K}$ is a $\theta$-periodic function. Let now $\mathcal{L}(t) \equiv \mathcal{L}$, $H(t) \equiv H$, $t \in \mathcal{I}$. In this case the representation formula becomes $$ \tilde{K}(t) = \int_t^{\infty} e^{\mathcal{L}^*(s-t)} H \, ds = \int_0^{\infty} e^{\mathcal{L}^*s} H \, ds = \tilde{K}(0) \quad \mbox{for all} \quad t \in \mathcal{I}, $$ which shows that $\tilde{K}$ is constant. On the other hand the fact that $\mathcal{L}$ is a linear operator defined on a finite dimensional Hilbert space together with exponential stability implies that the eigenvalues of the operators $\mathcal{L}$ and $\mathcal{L}^*$ are located in the left half-plane. Hence $\mathcal{L}^*$ is invertible. This shows that $\check{K} := -(\mathcal{L}^*)^{-1}[H]$ is a constant solution of \eqref{eqno:2.14} but from the uniqueness of the bounded solution of \eqref{eqno:2.14} it follows that $\check{K} = \tilde{K}$, thus the proof is complete. \end{proof} Let $H \colon \mathcal{I} \to \mathcal{S}^n$ be a fixed continuous function. If $\mathcal{L}(t) \colon \mathcal{S}^n \to \mathcal{S}^n$ is a linear operator and $t \mapsto \mathcal{L}(t)$ is a continuous function then we associate with $\mathcal{L}$ two linear differential equations on $\mathcal{S}^n$: \begin{equation} \frac{d}{dt} X(t) = \mathcal{L}(t)[X(t)] + H(t) \label{eqno:2.22} \end{equation} and \begin{equation} \frac{d}{dt} Y(t) + \mathcal{L}^*(t)[Y(t)] + H(t) = 0. \label{eqno:2.23} \end{equation} Any solution of equation \eqref{eqno:2.22} has the representation \begin{equation} X(t) = T(t,t_0) X(t_0) + \int_{t_0}^{t} T(t,s) H(s) \, ds \quad \mbox{for} \quad t \geq t_0, \; t, t_0 \in \mathcal{I} \label{eqno:2.24} \end{equation} while any solution of equation \eqref{eqno:2.23} admits the representation \begin{equation} Y(t) = T^*(t_1,t) Y(t_1) + \int_t^{t_1} T^*(s,t) H(s) \, ds \quad \mbox{for} \quad t \leq t_1, \; t \in \mathcal{I}. \label{eqno:2.25} \end{equation} \begin{remark} \label{rmk2.14} \rm \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item The representation formulae \eqref{eqno:2.24} and \eqref{eqno:2.25} suggest to term equation \eqref{eqno:2.22} as the \textbf{forward equation} defined by $\mathcal{L}$ and equation \eqref{eqno:2.23} as the \textbf{backward equation} defined by $\mathcal{L}$. \item From \eqref{eqno:2.24} it is seen that if $H$ is bounded and $\mathcal{L}$ generates an exponentially stable evolution then all solutions of the forward equation \eqref{eqno:2.22} are bounded on any subinterval $[t_0, \infty) \subset \mathcal{I}$. Under the same assumptions the backward equation \eqref{eqno:2.23} has a unique solution which is bounded on all subintervals $[t_0, \infty) \subset \mathcal{I}$, namely the solution given by \eqref{eqno:2.20}. \item From \eqref{eqno:2.24}, \eqref{eqno:2.25} it follows that if $\mathcal{L}$ generates a positive evolution and $H(s) \geq 0$ for $s \in \mathcal{I}$ and if there exists $\tau \in \mathcal{I}$ such that $X(\tau) \geq 0$, $Y(\tau) \geq 0$ respectively, then $X(t) \geq 0$ for all $t \geq \tau$ and $Y(t) \geq 0$ for all $t \in (-\infty, \tau] \cap \mathcal{I}$, respectively. \end{enumerate} \end{remark} The next result shows that, in the case when $\mathcal{I} = \mathbb{R}$, a version of Theorem \ref{thm2.13} for the forward differential equation \eqref{eqno:2.22} holds. \begin{prop} \label{prop2.15} Let $\mathcal{L} \colon \mathbb{R} \to \mathcal{B}(\mathcal{S}^n)$ be a continuous and bounded function. Assume that $\mathcal{L}$ generates an exponentially stable evolution. Then: \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item For each bounded and continuous function $H \colon \mathbb{R} \to \mathcal{S}^n$ the forward differential equation \eqref{eqno:2.22} has a unique bounded solution on $\mathbb{R}$, $$ \tilde X(t) = \int_{-\infty}^t T(t,s) H(s) \, ds. $$ \item If $\mathcal{L}$ generates a positive evolution and $H(t) \geq 0$, then $\tilde{X}(t) \geq 0$ for all $t\in \mathbb{R}$, and if $H(t) \gg 0$, then $\tilde{X}(t) \gg 0$. \item If $\mathcal{L}$ and $H$ are $\theta$-periodic functions, then $\tilde{X}$ is $\theta$-periodic function too. If $\mathcal{L}(t) \equiv \mathcal{L}$ and $H(t) \equiv H$ for all $t \in \mathbb{R}$, then $\tilde{X}$ is constant, and it is given by $$ \tilde{X} = -\mathcal{L}^{-1}[H]. $$ \end{enumerate} \end{prop} \begin{proof} The proof is similar to the one of Theorem \ref{thm2.13} and it is based on the representation formula \eqref{eqno:2.24}. \end{proof} Associated with $\mathcal{L} \colon \mathbb{R} \to \mathcal{B}(\mathcal{S}^n)$ we define $\mathcal{L}^{\sharp} \colon \mathbb{R} \to \mathcal{B}(\mathcal{S}^n)$ by $\mathcal{L}^{\sharp}(t) := \mathcal{L}^*(-t)$. If $t \mapsto \mathcal{L}(t)$ is continuous then $t \mapsto \mathcal{L}^{\sharp}(t)$ is also continuous. Let $T^{\sharp}(t,t_0)$ be the linear evolution operator on $\mathcal{S}^n$ defined by the differential equation \[ \frac{d}{dt} S(t) = \mathcal{L}^{\sharp}(t)[S(t)]. \] \begin{prop} \label{prop2.16} The following assertions hold: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item $T^{\sharp}(t,t_0) = T^*(-t_0,-t)$ for all $t, t_0 \in \mathbb{R}$, $T(\cdot, \cdot)$ being the linear evolution operator defined by \eqref{eqno:2.5}. \item $\mathcal{L}$ generates a positive evolution if and only if $\mathcal{L}^{\sharp}$ generates a positive evolution. \item $\mathcal{L}$ generates an exponentially stable evolution if and only if $\mathcal{L}^{\sharp}$ generates an exponentially stable evolution. \item $X \colon \mathbb{R} \to \mathcal{S}^n$ is a solution of the forward equation \eqref{eqno:2.22} if and only if $Y(t) = X(-t)$ defines a solution of the backward equation $$ \frac{d}{dt} Y(t) + [\mathcal{L}^{\sharp}(t)]^*(Y(t)) + H(-t) = 0. $$ \end{enumerate} \end{prop} \begin{proof} (i) follows directly from the uniqueness of the solution of a linear initial value problem. (ii) and (iii) follow from (i), while (iv) is obtained by direct calculation. \end{proof} \begin{remark} \label{rmk2.17} \rm If $\mathcal{L}(t) \equiv \mathcal{L}$ for all $t \in \mathbb{R}$, then $\mathcal{L}^{\sharp} = \mathcal{L}^*$. In this case Proposition \ref{prop2.16} recovers the well-known fact from stationary framework that the operator $\mathcal{L}$ generates an exponentially stable evolution if and only if its adjoint operator $\mathcal{L}^*$ generates an exponentially stable evolution. In the time-varying case the fact that the operator $\mathcal{L}$ generates an exponentially stable evolution does not guarantee that $\mathcal{L}^*$ generates an exponentially stable evolution. \end{remark} Combining the results from Theorem \ref{thm2.11} and Proposition \ref{prop2.16} one obtains necessary and sufficient conditions for exponential stability expressed in terms of the forward differential equation \eqref{eqno:2.22} in the case $\mathcal{I} = \mathbb{R}$. \begin{theorem} \label{thm2.18} Let $\mathcal{L} \colon \mathbb{R} \to \mathcal{B}(\mathcal{S}^n)$ be a continuous and bounded operator valued function which generates a positive evolution. Then the following are equivalent: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item $\mathcal{L}$ defines an exponentially stable evolution. \item There exists some $\delta > 0$ such that $$ \int_{-\infty}^t T(t,s) I_n \, ds \leq \delta I_n \quad \mbox{for all} \quad t \in \mathbb{R}. $$ \item The forward differential equation $$ \frac{d}{dt} K(t) = \mathcal{L}(t)[K(t)] + I_n $$ has a bounded and uniformly positive solution. \item For any bounded and continuous function $H \colon \mathbb{R} \to \mathcal{S}^n$ with $H(t) \gg 0$ the differential equation \eqref{eqno:2.22} has a bounded and uniformly positive solution on $\mathbb{R}$. \item There is a bounded and continuous function $H \colon \mathbb{R} \to \mathcal{S}^n$ with $H(t) \gg 0$ such that the corresponding forward differential equation \eqref{eqno:2.22} has a bounded solution $\tilde{X} \colon \mathbb{R} \to \mathcal{S}^n$ with $\tilde{X}(t) \geq 0$. \item There exists a bounded $C^1$-function $K \colon \mathbb{R} \to \mathcal{S}^n$ with bounded derivative and $K(t) \gg 0$ with satisfies $$ \frac{d}{dt} K(t) - \mathcal{L}(t) K(t) \gg 0 \quad \mbox{for all} \quad t \in \mathbb{R}. $$ \end{enumerate} \end{theorem} \begin{remark} \label{rmk2.19} \rm \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item When $\mathcal{L}$ is a periodic function with period $\theta$, then the function $K$ in (vi) may be chosen as a $\theta$-periodic one. \item If $\mathcal{L}(t) \equiv \mathcal{L}$ then the results stated in Theorems \ref{thm2.11} and \ref{thm2.18} reduce to the known results from the time-invariant case \cite{DaHi01,FrHo5}. \end{enumerate} \end{remark} At the end of this section we shall prove a time-varying version of the result in \cite[Lemma 3.7]{FrHo5}. First we introduce the following concept of detectability which extends the classical definition of the detectability of a linear system to this general framework. \begin{definition} \label{def2.20} \rm If $\mathcal{L}_{A, \Pi_1}$ is defined as in \eqref{eqno:2.10} and if $C \colon \mathcal{I} \to \mathbb{R}^{p \times n}$ is a bounded and continuous function then the pair $(C, \mathcal{L}_{A, \Pi_1})$ -- or equivalently the triple $(C, A, \Pi_1)$ -- is called \textbf{detectable} if there exists a bounded and continuous function $K \colon \mathcal{I} \to \mathbb{R}^{n\times p}$ such that the operator $\mathcal{L}_{A+KC, \Pi_1}$ generates an exponentially stable evolution. A function $K$ having the above-mentioned properties is called a \textbf{stabilizing injection}. \end{definition} Applying the implication (vii) $\Rightarrow$ (i) in Theorem \ref{thm2.11} and taking into account the form of the adjoint operator of $\mathcal{L}_{A+KC, \Pi_1}(t)$ we obtain immediately: \begin{corollary} \label{coro2.21} Let $A \colon \mathcal{I} \to \mathbb{R}^{n \times n}$, $C \colon \mathcal{I} \to \mathbb{R}^{p \times n}$ and $\Pi_1 \colon \mathcal{I} \to \mathcal{B}(\mathcal{S}^n)$ be continuous functions with $\Pi_1(t) \geq 0$ for all $t \in \mathcal{I}$. Then the following are equivalent: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item $(C, A, \Pi_1)$ is detectable. \item There exist a bounded $C^1$-function $X \colon \mathcal{I} \to \mathcal{S}^n$ with bounded derivative and $X(t) \gg 0$ and a bounded and continuous function $D \colon \mathcal{I} \to \mathbb{R}^{n\times p}$ which solve the linear differential inequality \begin{align} \frac{d}{dt} X(t) + \mathcal{L}_{A, \Pi_1}^*(t)[X(t)] + D(t) C(t) + C^T(t) D^T(t) \ll 0. \label{eqno:2.26} \end{align} \end{enumerate} Moreover if $(X, D)$ is a solution of \eqref{eqno:2.26} with $X(t) \gg 0$ then $K(t) := X^{-1}(t) D(t)$ is a stabilizing injection. \end{corollary} Definition \ref{def2.20} of the detectability can be extended in a natural way to the case of operators which generate a positive evolution: \begin{definition} \label{def2.22} \rm If $\mathcal{L} \colon \mathcal{I} \to \mathcal{B}(\mathcal{S}^n)$ is a bounded and continuous operator valued function such that $\mathcal{L}$ generates a positive evolution and if $C \colon \mathcal{I} \to \mathbb{R}^{p\times n}$ is a bounded and continuous function then the pair $(C,\mathcal{L})$ is called \textbf{detectable} if there exists a bounded and continuous function $K \colon \mathcal{I} \to \mathbb{R}^{n\times p}$ such that the operator $\mathcal{L}^K(t) \colon \mathcal{S}^n \to \mathcal{S}^n$ with \begin{equation} \mathcal{L}^K(t)[S] := \mathcal{L}(t)[S] + K(t) C(t) S + S C^T(t) K^T(t) \label{eqno:2.27} \end{equation} generates a positive and exponentially stable evolution. \end{definition} Now we prove the following result. \begin{theorem} \label{thm2.23} Let $\mathcal{L} \colon \mathcal{I} \to \mathcal{B}(\mathcal{S}^n)$ be a bounded and continuous operator valued function, and let $C \colon \mathcal{I} \to \mathbb{R}^{p \times n}$ be a bounded and continuous function. Assume: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item $\mathcal{L}$ generates a positive evolution; \item $(C,\mathcal{L})$ is detectable; \item The backward differential equation \begin{equation} \frac{d}{dt} P(t) + \mathcal{L}^*(t)[P(t)] + C^T(t) C(t) = 0 \label{eqno:2.28} \end{equation} has a bounded and positive semi-definite solution $P \colon \mathcal{I} \to \mathcal{S}^n$. \end{enumerate} Under these assumptions the operator $\mathcal{L}$ generates an exponentially stable evolution. \end{theorem} \begin{proof} Our proof extends the ideas of \cite[Lemma 3.2]{FCdS98} to the time-varying case. Let $(t_0, H) \in \mathcal{I} \times \mathcal{S}^n_+$ be fixed and let $X(t) = X(t,t_0, H)$ be the solution of the linear differential equation \begin{equation} \frac{d}{dt} X(t) = \mathcal{L}(t)[X(t)], \quad X(t_0) = H. \label{eqno:2.29} \end{equation} Since $\mathcal{L}$ generates a positive evolution we have $X(t) \geq 0$ for all $t \geq t_0$. If $K$ is a stabilizing injection then equation \eqref{eqno:2.29} may be rewritten as \begin{equation} \frac{d}{dt} X(t) = \mathcal{L}^K(t)[X(t)] - K(t)C(t)X(t) - X(t)(K(t)C(t))^T, \label{eqno:2.30} \end{equation} where $\mathcal{L}^K$ is defined in \eqref{eqno:2.27}. Moreover if $T^K(t,s)$ is the linear evolution operator defined by $\mathcal{L}^K$ on $\mathcal{S}^n$, then there exist $\alpha, \beta > 0$ such that $$ \| T^K(t,s) \| \leq \beta e^{-2\alpha(t-s)} \quad \mbox{for} \quad t \geq s, \; t, s \in \mathcal{I}. $$ Consider the perturbed operator $\mathcal{L}_{\varepsilon}$ defined by $\mathcal{L}_{\varepsilon}(t)[X] := \mathcal{L}^K(t)[X] + \varepsilon^2 X$, $t \in \mathcal{I}$, $X \in \mathcal{S}^n$, and let $T_{\varepsilon}(t,s)$ be the linear evolution operator defined by $\mathcal{L}_{\varepsilon}$ on $\mathcal{S}^n$. By a standard argument, based on the Gronwall-Bellman Lemma, one obtains that there exists $\varepsilon_0>0$ such that for arbitrary $\varepsilon \in [0, \varepsilon_0 ]$ \begin{equation} \| T_{\varepsilon}(t,s) \| \leq \beta e^{-\alpha(t-s)} \quad \mbox{for} \quad t \geq s, \; t, s \in \mathcal{I}. \label{eqno:2.31} \end{equation} Let $\varepsilon > 0$ be fixed such that \eqref{eqno:2.31} is fulfilled, and let $Y(t) = Y(t,t_0, H)$ be the solution of the initial value problem \begin{equation} \frac{d}{dt} Y(t) = \mathcal{L}_{\varepsilon}(t)[Y(t)] + \frac{1}{\varepsilon^2} K(t) C(t)X(t) C^T(t) K^T(t), \quad Y(t_0) = H. \label{eqno:2.32} \end{equation} Since $X \mapsto \varepsilon^2 X$ is a positive linear operator and $\mathcal{L}^K$ generates a positive evolution then by Proposition \ref{prop2.5} one obtains that $\mathcal{L}_{\varepsilon}$ generates a positive evolution. Thus we may conclude that the solution of the forward differential equation \eqref{eqno:2.32} satisfies $Y(t) \geq 0$ for all $t \geq t_0$. Set $Z(t) := Y(t) - X(t)$. Subtracting \eqref{eqno:2.30} from \eqref{eqno:2.32} we obtain $$ \frac{d}{dt} Z(t) = \mathcal{L}_{\varepsilon}[Z(t)] + G(t), \quad Z(t_0) = 0, $$ where $G(t) := (\varepsilon I_n + \frac{1}{\varepsilon} K(t) C(t)) X(t)(\varepsilon I_n + \frac{1}{\varepsilon} K(t)C(t))^T \geq 0$ for $t \geq t_0$. Invoking again the fact that $\mathcal{L}_{\varepsilon}$ generates a positive evolution, one obtains that $Z(t)\geq 0$, i.e.\ $0 \leq X(t) \leq Y(t)$, $t \geq t_0$ which leads to \begin{equation} \|X(t)\|_2 \leq \|Y(t)\|_2 \quad \mbox{for} \quad t \geq t_0. \label{eqno:2.33} \end{equation} Further we write $$ Y(t) = T_{\varepsilon}(t,t_0)H + \frac{1}{\varepsilon^2} \int_{t_0}^t T_{\varepsilon}(t,s) K(s) C(s) X(s) C^T(s) K^T(s) \, ds. $$ With \eqref{eqno:2.31} we get \begin{equation} \|Y(t)\|_2 \leq \beta e^{-\alpha(t-t_0)} \|H\|_2 + \frac{\beta}{\varepsilon^2} \rho \int_{t_0}^t e^{-\alpha(t-s)} \|C(s)X(s)C^T(s)\|_2 \, ds, \; t \geq t_0, \label{eqno:2.34} \end{equation} where $\rho := \sup_{s \in \mathcal{I}} \|K(s)\|_2^2$. On the other hand \begin{align*} \|C(s)X(s)C^T(s)\|_2 &= \lambda_{\max} [C(s)X(s)C^T(s)] \\ & \leq \mathop{\rm Tr} [C(s)X(s)C^T(s)] = \langle C^T(s)C(s), X(s)\rangle. \end{align*} Using \eqref{eqno:2.28} we infer \begin{align*} \langle C^T(s)C(s), X(s) \rangle &= - \langle \frac{d}{ds} P(s), X(s) \rangle - \langle \mathcal{L}^*(s)[P(s)], X(s) \rangle \\ & = - \langle \frac{d}{ds}P(s), X(s) \rangle - \langle P(s), \frac{d}{ds} X(s) \rangle = -\frac{d}{ds} \langle P(s), X(s) \rangle. \end{align*} So we have $$ \int_{t_0}^t \|C(s)X(s)C^T(s)\|_2 \, ds \leq \langle P(t_0), X(t_0) \rangle - \langle P(t), X(t) \rangle \quad \mbox{for} \quad t \geq t_0. $$ Since $\langle P(t), X(t) \rangle \geq 0$ and $P$ is bounded, we deduce that there exists $\tilde{\rho} > 0$ such that $$ \int_{t_0}^t \|C(s)X(s)C^T(s)\|_2 \, ds \leq \tilde{\rho} \|H\|_2 \quad \mbox{for} \quad t \geq t_0. $$ Hence, we get for $\tau > t_0$ \begin{equation} \label{eqno:2.35} \begin{aligned} \lefteqn{\int_{t_0}^{\tau} \int_{t_0}^t e^{-\alpha(t-s)} \|C(s)X(s)C^T(s)\|_2 \, ds \, dt} \\ & \quad = \int_{t_0}^{\tau} \int_s^{\tau} e^{-\alpha(t-s)} \, dt \|C(s)X(s)C^T(s)\|_2 \, ds \\ & \quad \leq \frac{1}{\alpha} \int_{t_0}^{\tau} \|C(s)X(s)C^T(s)\|_2 \, ds \leq \frac{\tilde{\rho}}{\alpha}\|H\|_2. \end{aligned} \end{equation} Combining \eqref{eqno:2.34} with \eqref{eqno:2.35} we obtain \begin{equation} \int_{t_0}^{\tau} \|Y(t)\|_2 \, dt \leq \delta \|H\|_2 \quad \mbox{for} \quad \tau \geq t_0, \label{eqno:2.36} \end{equation} where $\delta := \frac{\beta}{\alpha}(1 + \frac{\rho \tilde{\rho}}{\varepsilon^2})$ is independent of $(t_0, H)$. Taking the limit for $\tau \to \infty$ in \eqref{eqno:2.36} and using \eqref{eqno:2.33} we obtain that \begin{equation} \int_{t_0}^{\infty} \|X(t)\|_2 \, dt \leq \delta \|H\|_2 \quad \mbox{for} \quad t_0 \in \mathcal{I}, \; H \in \mathcal{S}^n_+. \label{eqno:2.37} \end{equation} For every $H \in \mathcal{S}^n$ there exist $H_i \in \mathcal{S}^n_+$, $i = 1, 2$, such that $H = H_1-H_2$ and $\|H\|_2 = \max \{ \|H_1\|_2, \|H_2\|_2 \}$. Since $X(t,t_0,H) = X(t,t_0,H_1) - X(t,t_0,H_2)$ we may infer that \eqref{eqno:2.37} holds for arbitrary $t_0 \in \mathcal{I}$ and $H \in \mathcal{S}^n$. The conclusion in the statement follows now from Datko type criteria for exponential stability and the proof is complete. \end{proof} Finally we remark that the result proved in Theorem \ref{thm2.23} is an alternative for implication (vi) $\Rightarrow$ (i) in Theorem \ref{thm2.11} for the case when $H(t)$ is not uniformly positive but only positive semi-definite. Here the loss of the uniform positivity of the forcing term of equation \eqref{eqno:2.14} is compensated by detectability. \section{A general class of Riccati-type differential equations} \subsection{Notation and preliminary remarks} In this section we deal with nonlinear differential equations of the form \begin{equation} \label{eqno:3.1} \begin{aligned} \lefteqn{\frac{d}{dt} X(t) + A^T(t)X(t) + X(t)A(t) + M(t) + \Pi_1(t)[X(t)]} \\ & \quad {} - \big\{ X(t)B(t) +\Pi_{12}(t)[X(t)] + L(t) \big\} \big\{ R(t) + \Pi_2(t)[X(t)]\big\}^{-1} \\ & \qquad \times \big\{ X(t)B(t) + \Pi_{12}(t)[X(t)] + L(t) \big\}^T = 0, \end{aligned} \end{equation} where $A \colon \mathcal{I} \to \mathbb{R}^{n\times n}$, $B \colon \mathcal{I} \to \mathbb{R}^{n \times m}$, $L \colon \mathcal{I} \to \mathbb{R}^{n \times m}$, $M \colon \mathcal{I} \to \mathcal{S}^n$ and $R \colon \mathcal{I} \to \mathcal{S}^m$ are bounded and continuous functions on some right unbounded interval $\mathcal{I} \subset \mathbb{R}$, and $\Pi \colon \mathcal{I} \to C(\mathcal{S}^n, \mathcal{S}^{n+m})$, $C(\mathcal{S}^n, \mathcal{S}^{n+m})$ being the space of continuous operators defined on $\mathcal{S}^n$ with values in $\mathcal{S}^{n+m}$, with \begin{equation} \Pi(t)[X] = \begin{pmatrix} \Pi_1(t)[X] & \Pi_{12}(t)[X] \\ \big( \Pi_{12}(t)[X] \big)^T & \Pi_2(t)[X] \end{pmatrix}, \quad X \in \mathcal{S}^n, \label{eqno:3.2} \end{equation} is a bounded and continuous operator valued function. Throughout this section we assume that $X \to \Pi(t)[X]$ satisfies the assumptions: \begin{itemize} \item[($\Pi_1$)] $X \to \Pi(t)[X]$ is uniformly globally Lipschitz \item[($\Pi_2$)] $\Pi(t)[X_1] \leq \Pi(t)[X_2]$ for $t \in \mathcal{I}$ and $X_1, X_2 \in \mathcal{S}^n$ with $X_1 < X_2$ \item[($\Pi_3$)] $\Pi(t)[0] = 0$. \end{itemize} \begin{remark} \label{rmk3.1} \rm \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item Condition $\mathbf{(\Pi_3)}$ is satisfied without loss of generality since if $\Pi(t)[0] \neq 0$ we can modify the values of $M(t)$, $L(t)$, $R(t)$ to achieve $\Pi(t)[0] = 0$. \item If assumptions $\mathbf{(\Pi_2)}$ and $\mathbf{(\Pi_3)}$ are fulfilled then for each $t$, $\Pi(t)$ is a positive operator. \end{enumerate} \end{remark} In some particular cases of the operator $\Pi$ equation \eqref{eqno:3.1} was investigated in connection with certain control problems for linear control systems. Thus, if $\Pi \equiv 0$, \eqref{eqno:3.1} reduces to the well-known Riccati differential equation intensively studied in particular since the pioneering work of Kalman \cite{Kalm60}; we shall show that the solutions of \eqref{eqno:3.1} have many of the nice properties of the solutions of symmetric matrix Riccati differential equations. In the special case \begin{equation} \Pi(t)[X] = \begin{pmatrix} C(t) & D(t) \end{pmatrix}^T X \begin{pmatrix} C(t) & D(t) \end{pmatrix} \label{eqno:3.3} \end{equation} the corresponding equation \eqref{eqno:3.1} was considered in several papers, in connection with the linear-quadratic optimization problem with indefinite sign for linear stochastic system with multiplicative white noise (see \cite{ARCMZ01,ARZh00,ChLZ98}). Systems of coupled rational differential equations of type \eqref{eqno:3.1} with $\Pi(t)$ as in \eqref{eqno:3.3} were studied in \cite{preprint00}. Equation \eqref{eqno:3.1} was investigated in detail in \cite{FrHo6} and \cite{FrHo5}, where it partially was assumed that $A$, $B$, $L$, $M$, $R$ and $\Pi(X)$ are constant. In this paper we present various results which extend \cite{preprint00,FrHo5} to the general case of equations of type \eqref{eqno:3.1}. We mention that it is also possible to derive discrete-time versions of these results obtained in this section, in the time-invariant case they can be found in \cite{FrHo4} and \cite[Chapter 6.8]{AFIJ03}. \begin{definition} \label{def3.2} \rm A $C^1$-function $X \colon \mathcal{I}_1 \subset \mathcal{I} \to \mathcal{S}^n$ is a solution of equation \eqref{eqno:3.1} if $$ \det \big\{ R(t) + \Pi_2(t)[X(t)] \big\} \neq 0 \quad \mbox{for all} \quad t \in \mathcal{I}_1 $$ and if $X$ verifies the relation \eqref{eqno:3.1} on $\mathcal{I}_1$. \end{definition} We introduce the notation $$ \mathcal{D}(\mathcal{R}) := \left\{ (t,X) \in \mathcal{I} \times \mathcal{S}^n \colon \det \big\{ R(t) + \Pi_2(t)[X] \big\} \neq 0\right\}$$ and $\mathcal{R} \colon \mathcal{D}(\mathcal{R}) \to \mathcal{S}^n$ by \begin{equation} \label{eqno:3.4} \begin{aligned} \mathcal{R}(t,X) & = A^T(t)X + XA(t) + \Pi_1(t)[X] + M(t) \\ & \qquad {} - \big\{ X B(t) + \Pi_{12}(t)[X] + L(t) \big\} \big\{R(t)+ \Pi_2(t)[X] \big\}^{-1} \\ & \qquad {} \times \big\{ XB(t) + \Pi_{12}(t)[X] + L(t) \big\}^T. \end{aligned} \end{equation} So equation \eqref{eqno:3.1} can be written in a compact form as \begin{equation} \frac{d}{dt} X(t) + \mathcal{R}(t,X(t)) = 0. \label{eqno:3.5} \end{equation} As we can see the operator $\mathcal{R}$ and consequently equation \eqref{eqno:3.5} are associated to the quadruple $\Sigma = (A, B, \Pi, \mathcal{Q})$ where $(A, B, \Pi)$ are as before and $\mathcal{Q}$ is defined by $$ \mathcal{Q}(t) := \begin{pmatrix} M(t) & L(t) \\ L(t)^T & R(t) \end{pmatrix}. $$ We introduce the so called generalized dissipation matrix $\lambda^{\Sigma} \colon C^1(\mathcal{I}, \mathcal{S}^n) \to \mathcal{S}^{n+m}$ associated to the quadruple $\Sigma$ by $$ \lambda^{\Sigma}(t,X) = \left( \begin{array}{cc} \lambda_1(t) & X B(t) + \Pi_{12}(t)[X] + L(t) \\ \big\{ X B(t) + \Pi_{12}(t)[X] + L(t) \big\}^T & R(t) + \Pi_2(t)[X] \end{array} \right), $$ where $$ \lambda_1(t) = \frac{d}{dt} X(t) + A^T(t)X(t) + X(t)A(t) + \Pi_1(t)[X(t)] + M(t) $$ and $C^1(\mathcal{I}, \mathcal{S}^n)$ is the space of $C^1$-functions defined on the interval $\mathcal{I}$ taking values in $\mathcal{S}^n$. Notice that \[ \frac{d}{dt} X(t) + \mathcal{R}(t,X(t)) \] is the Schur complement of $R(t) + \Pi_2(t)[X(t)]$ in $\lambda^\Sigma(t,X(t))$. The following two subsets of $$ C^1_b(\mathcal{I}, \mathcal{S}^n) = \big\{ X \in C^1(\mathcal{I}, \mathcal{S}^n) \colon X, \frac{d}{dt} X \mbox{ are bounded} \big\}$$ will play an important role in the next developments: \begin{gather*} \Gamma^{\Sigma} = \left\{ X \in C^1_b(\mathcal{I},\mathcal{S}^n) \colon \lambda^{\Sigma}[X(t)] \geq 0, \; R(t) +\Pi_2(t)[X(t)] \gg 0, \; t \in \mathcal{I} \right\}, \\ \tilde \Gamma^{\Sigma} = \left\{ X \in C^1_b(\mathcal{I},\mathcal{S}^n) \colon \lambda^{\Sigma}[X(t)] \gg 0, \; t \in \mathcal{I} \right\}. \end{gather*} \begin{remark} \label{rmk3.3} \rm \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item In the case when $\Pi_2(t)$ is the zero operator then in the definition of the set $\Gamma^{\Sigma}$ we ask $R(t) \gg 0$ which is the usual condition used in the case of Riccati differential equations of deterministic and stochastic control. If $\Pi_2(t)$ is not the zero operator it is not necessary to make any assumptions concerning the sign of $R(t)$. \item We shall see later that if $A$, $B$, $\Pi$, $\mathcal{Q}$ are $\theta$-periodic functions and if $\Gamma^{\Sigma}$ is not empty ($\tilde{\Gamma}^{\Sigma}$ is not empty, respectively) then $\Gamma^{\Sigma}$ contains also a $\theta$-periodic function ($\tilde{\Gamma}^{\Sigma}$ contains also a $\theta$-periodic function). Moreover, we shall show that if $A(t) = A$, $B(t) = B$, $\Pi(t) = \Pi$, $\mathcal{Q}(t) = \mathcal{Q}$ for all $t \in \mathbb{R}$ and if $\Gamma^{\Sigma}$ is not empty, ($\tilde{\Gamma}^{\Sigma}$ is not empty, respectively) then there exists a constant symmetric matrix $X \in \Gamma^{\Sigma}$ ($X \in \tilde{\Gamma}^{\Sigma}$, respectively). \item Based on the Schur complement one obtains that $\Gamma^{\Sigma}$ contains in particular all bounded solutions of equation \eqref{eqno:3.1} verifying the additional condition $R(t) + \Pi_2(t)[X(t)] \gg 0$. \end{enumerate} \end{remark} The following result will be used frequently in the proofs of the remainder of the paper, it follows easily by direct calculation (see \cite[Lemma 4.2]{FrHo5}). \begin{lemma} \label{lm3.4} If $W \colon \mathcal{I} \to \mathbb{R}^{m \times n}$ is a continuous function, then for all $(t,X) \in\mathcal{D}(\mathcal{R})$ \begin{align*} \mathcal{R}(t,X) & = [A(t)+B(t)W(t)]^T X + X[A(t)+B(t)W(t)] \\ & \qquad {} - \big[ W(t)-F^X(t) \big]^T \big\{ R(t) + \Pi_2(t)[X] \big\} \big[ W(t)-F^X(t) \big] \\ & \qquad {} + \begin{pmatrix} I_n \\ W(t)\end{pmatrix}^T \big[ \mathcal{Q}(t) + \Pi(t)[X] \big] \begin{pmatrix} I_n \\ W(t) \end{pmatrix} \end{align*} where here and below $$ F^X(t) := - \big\{ R(t) + \Pi_2(t)[X] \big\}^{-1} \big\{ X(t)B(t) + \Pi_{12}(t)[X] + L(t) \big\}^T $$ is the \textbf{feedback matrix} defined by $X$. \end{lemma} In the following we will use operators $\mathcal{L}_{A+BW}(t) \colon \mathcal{S}^n \to \mathcal{S}^n$, where $$\mathcal{L}_{A+BW}(t)[X]=[A(t)+B(t)W(t)]X+X[A(t)+B(t)W(t)]^T$$ and $\Pi_W(t) \colon \mathcal{S}^n \to \mathcal{S}^n$ defined by \begin{equation} \Pi_W(t)[X] = \begin{pmatrix} I_n \\ W(t) \end{pmatrix}^T \Pi(t)[X] \begin{pmatrix} I_n \\ W(t) \end{pmatrix}. \label{eqno:3.6} \end{equation} It is clear that $\Pi_W(t)$ is a monotone increasing operator for all $t$. With these notations $\mathcal{R}(t,X)$ can be rewritten as \begin{equation} \label{eqno:3.7} \begin{aligned} \mathcal{R}(t,X) & = \mathcal{L}_{A+BW}^*(t)[X] + \Pi_W(t)[X] + M_W(t) \\ & \qquad {} - \big[ W(t)-F^X(t) \big]^T \big\{R(t) + \Pi_2(t)[X] \big\} \big[ W(t) - F^X(t) \big] \end{aligned} \end{equation} where $$ M_W(t) = \begin{pmatrix} I_n \\ W(t) \end{pmatrix}^T \mathcal{Q}(t) \begin{pmatrix} I_n \\ W(t) \end{pmatrix}. $$ \subsection{A comparison theorem and its consequences} First we present a result which extends the conclusions of Remark \ref{rmk2.14}, (c), to a nonlinear framework. The proof of this result is based on the techniques of Volterra approximations (as in Proposition \ref{prop2.5}) and it is omitted for shortness. \begin{prop} \label{prop3.5} Let $\mathcal{L} \colon \mathcal{I} \to \mathcal{B}(\mathcal{S}^n)$, $\hat{\Pi} \colon \mathcal{I} \to C(\mathcal{S}^n, \mathcal{S}^n)$ be bounded and continuous operator valued functions. Assume that $\mathcal{L}$ generates a positive evolution and $\hat{\Pi}$ satisfies the assumptions $(\mathbf{\Pi_1})$ -- $(\mathbf{\Pi_3})$. Let $H \colon \mathcal{I} \to \mathcal{S}^n_+$ be a continuous and bounded function. Under these conditions the following hold: \begin{enumerate} \item[(i)] If $X \colon \mathcal{I} \to \mathcal{S}^n$ is a global solution of the nonlinear differential equation $$\frac{d}{dt} X(t) = \mathcal{L}(t)[X(t)] + \hat{\Pi}(t)[X(t)] + H(t)$$ such that $X(\tau) \geq 0$ for some $\tau \in \mathcal{I}$, then $X(t) \geq 0$ for all $t \geq \tau$. \item[(ii)] If $Y \colon \mathcal{I} \to \mathcal{S}^n$ is a global solution of the nonlinear differential equation $$\frac{d}{dt} Y(t) + \mathcal{L}^*(t)[Y(t)] + \hat{\Pi}(t)[Y(t)] + H(t) = 0$$ such that $Y(\tau) \geq 0$ for some $\tau \in \mathcal{I}$, then $Y(t) \geq 0$ for all $t \in (-\infty, \tau] \cap \mathcal{I}$. \end{enumerate} \end{prop} As a consequence of the above Proposition one obtains the following important result concerning the monotonic dependence of the solutions of equation \eqref{eqno:3.1} with respect to the data (which was already proved in \cite{FrHo5} under the additional assumption that $X \mapsto \Pi(t)[X]$ is linear; for an elementary special case see \cite[Corollary 3.4]{ARCMZ01}). \begin{theorem}[Comparison Theorem] \label{thm3.6} Let $\hat{\mathcal{R}}$ be the operator \eqref{eqno:3.4} associated to the quadruple $\hat \Sigma = (A, B, \Pi, \hat{\mathcal{Q}})$ and $\tilde{\mathcal{R}}$ be the operator of type \eqref{eqno:3.4} associated to the quadruple $\tilde{\Sigma} = (A, B, \Pi, \tilde{\mathcal{Q}})$ where $A$, $B$, $\Pi$ are as before and $\hat{\mathcal{Q}}(t) = \left( \begin{smallmatrix} \hat{M}(t) & \hat{L}(t) \\ \hat{L}(t)^T & \hat{R}(t) \end{smallmatrix} \right)$, $\tilde{\mathcal{Q}}(t) = \left( \begin{smallmatrix} \tilde{M}(t) & \tilde{L}(t) \\ \tilde{L}(t)^T & \tilde{R}(t) \end{smallmatrix} \right)$ with $\hat{L}(t), \tilde{L}(t) \in \mathbb{R}^{n \times m}$, $\hat{M}(t), \tilde{M}(t) \in \mathcal{S}^n$ and $\hat{R}(t), \tilde{R}(t) \in \mathcal{S}^m$. Let $X_i \colon \mathcal{I}_1 \subset \mathcal{I} \to\mathcal{S}^n$, $i = 1,2$, be solutions of $$ \frac{d}{dt} X_1(t) + \hat{\mathcal{R}}(t, X_1(t)) = 0, \qquad \frac{d}{dt} X_2(t) + \tilde{\mathcal{R}}(t, X_2(t)) = 0. $$ Assume that \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item $\hat{\mathcal{Q}}(t) \geq \tilde{\mathcal{Q}}(t)$ for all $t \in \mathcal{I}$; \item $\tilde{R}(t) + \Pi_2(t)[X_2(t)] > 0$ for $t \in \mathcal{I}_1$; \item there exists $\tau \in \mathcal{I}_1$ such that $X_1(\tau) \geq X_2(\tau)$. \end{enumerate} Under these conditions we have $X_1(t)\geq X_2(t)$ for all $t \in(-\infty, \tau] \cap \mathcal{I}_1$. \end{theorem} \begin{proof} Let $$F_1(t) := - \big\{ \hat{R}(t) + \Pi_2(t)[X_1(t)] \big\}^{-1} \big\{ X_1(t)B(t) + \Pi_{12}(t)[X_1(t)] + \hat{L}(t) \big\}^T $$ and $$ F_2(t) := - \big\{ \tilde{R}(t) + \Pi_2(t)[X_2(t)] \big\}^{-1} \big\{ X_2(t)B(t) + \Pi_{12}(t)[X_2(t)] + \tilde{L}(t) \big\}^T. $$ Applying Lemma \ref{lm3.4} for $W(t)=F_1(t)$ one obtains $$ \frac{d}{dt} X_1(t) + \mathcal{L}_{A+BF_1}^*(t)[X_1(t)] + \Pi_{F_1}(t)[X_1(t)] + \hat{M}_{F_1}(t) = 0 $$ and \begin{align*} \lefteqn{\frac{d}{dt} X_2(t) + \mathcal{L}_{A+BF_1}^*(t)[X_2(t)] + \Pi_{F_1}(t)[X_2(t)] + \tilde{M}_{F_1}(t)} \\ & \quad {} - \big[ F_1(t) - F_2(t) \big]^T \big\{ \tilde{R}(t) + \Pi_2(t)[X_2(t)] \big\} \big[ F_1(t)-F_2(t) \big] = 0. \end{align*} This leads to $$ \frac{d}{dt} \big[ X_1(t) - X_2(t) \big] + \mathcal{L}_{A+BF_1}^*(t) \big[ X_1(t) - X_2(t) \big] + \hat{\Pi}(t) \big[ X_1(t) - X_2(t) \big] + H(t) = 0, $$ where $$\hat{\Pi}(t)[Y] := \Pi_{F_1}(t)[Y + X_2(t)] - \Pi_{F_1}(t)[X_2(t)]$$ and \[ H(t) := \big[ F_1(t) - F_2(t) \big]^T \big\{ \tilde{R}(t) + \Pi_2(t)[X_2(t)] \big\} \big[ F_1(t) - F_2(t) \big] +\hat{M}_{F_1}(t) - \tilde{M}_{F_1}(t). \] Since $$ \hat{M}_{F_1}(t) - \tilde{M}_{F_1}(t) = \begin{pmatrix} I_n \\ F_1(t) \end{pmatrix}^T \big[ \hat{\mathcal{Q}}(t) - \tilde{\mathcal{Q}}(t)\big] \begin{pmatrix} I_n \\ F_1(t) \end{pmatrix} \geq 0 $$ it follows that $H(t)\geq 0$ for all $t \in \mathcal{I}_1$. On the other hand it is not difficult to see that the above operator $\hat{\Pi}(t)$ verifies the hypotheses ($\mathbf{\Pi_1}$) -- ($\mathbf{\Pi_3}$). Applying Proposition \ref{prop3.5}, (ii), to the backward equation verified by $X_1 - X_2$ one gets that $X_1(t) - X_2(t) \geq 0$ for all $t \in (-\infty, \tau] \cap \mathcal{I}_1$ and thus the proof is complete. \end{proof} Using the above theorem we prove the following result concerning the maximal interval of definition of a solution of \eqref{eqno:3.1} with given terminal conditions. \begin{theorem} \label{thm3.7} Assume that $\Sigma = (A, B, \Pi, \mathcal{Q})$ satisfies $\Gamma^{\Sigma} \neq \emptyset$. Let $$ \tilde{\mathcal{D}}(\mathcal{R}) := \left\{ (\tau, X) \in \mathcal{D}(\mathcal{R}) \colon \exists \hat{X} \in \Gamma^{\Sigma} \mbox{ such that } X \geq \hat{X}(\tau) \right\} $$ and let $X(\cdot, \tau, X_0)$ be the solution of equation \eqref{eqno:3.1} with $X(\tau, \tau, X_0) = X_0$. If $(\tau, X_0)\in \tilde{\mathcal{D}}(\mathcal{R})$ then the solution $X(\cdot, \tau, X_0)$ is well defined on $(-\infty, \tau]\cap \mathcal{I}$. \end{theorem} \begin{proof} Let $\mathcal{I}_{\tau, X_0} \subset (-\infty, \tau]$ be the maximal interval on which $X(\cdot, \tau, X_0)$ is defined and let $\hat{X} \in \Gamma^{\Sigma}$ be such that $X_0 \geq \hat{X}(\tau)$. Obviously there exists a bounded and continuous function $M_{\ell} \colon \mathcal{I} \to \mathcal{S}^n$ such that $M_{\ell}(t) \leq 0$ and $$\frac{d}{dt} \hat{X}(t) + \mathcal{R}(t, \hat{X}(t)) + M_{\ell}(t) = 0. $$ Applying Theorem \ref{thm3.6} for the quadruples $\hat{\Sigma} = \Sigma$ and $\tilde{\Sigma} = (A, B, \Pi, \tilde{\mathcal{Q}})$ with $$ \tilde{\mathcal{Q}}(t) := \begin{pmatrix} M(t) + M_{\ell}(t) & L(t) \\ L^T(t) & R(t) \end{pmatrix} = \mathcal{Q}(t) + \begin{pmatrix} M_{\ell}(t) & 0 \\ 0 & 0 \end{pmatrix} $$ we conclude that \begin{equation} X(t, \tau, X_0) \geq \hat{X}(t) \quad \mbox{for all} \quad t \in \mathcal{I}_{\tau, X_0}. \label{eqno:3.8} \end{equation} Let $Y$ be the solution of the terminal value problem \begin{equation} \frac{d}{dt} Y(t) + A^T(t) Y(t) + Y(t) A(t) + \Pi_1(t)[Y(t)] + M(t) = 0, \quad Y(\tau) = X_0. \label{eqno:3.9} \end{equation} Since $Y \to \Pi_1(t)[Y]$ is uniformly globally Lipschitz it follows that $Y(t)$ is well defined for all $t \in \mathcal{I}$. By direct calculation we obtain that \begin{equation} \frac{d}{dt} \big[ Y(t) - X(t) \big] + \mathcal{L}_A^*(t) \big[ Y(t) - X(t) \big] + \hat{\Pi}(t) \big[ Y(t) - X(t) \big] + \hat{H}(t) = 0 \label{eqno:3.10} \end{equation} for $t \in \mathcal{I}_{\tau, X_0}$ where $X(t) = X(t, \tau, X_0)$. Here $$ \hat{H}(t) := F^T(t) \big\{ R + \Pi_2(t)[X(t)] \big\} F(t) $$ with $F(t) := F^X(t)$, and the map $Y \to \hat{\Pi}(t)[Y]$ is defined by $$ \hat{\Pi}(t)[Y] = \Pi_1(t)[Y + X(t)] - \Pi_1(t)[X(t)]. $$ >From \eqref{eqno:3.8} and assumption ($\mathbf{\Pi_2}$) we deduce that $$ R(t) + \Pi_2(t)[X(t)] \geq R(t) + \Pi_2(t)[\hat{X}(t)] \gg 0. $$ So $\hat{H}(t) \geq 0$ for $t \in \mathcal{I}_{\tau, X_0}$. Proposition \ref{prop3.5}, (ii), applied in the case of equation \eqref{eqno:3.10}, gives \begin{equation} X(t) \leq Y(t) \label{eqno:3.11} \end{equation} for all $t \in \mathcal{I}_{\tau, X_0}$. From \eqref{eqno:3.8}, \eqref{eqno:3.11} and $\hat{X} \in \Gamma^\Sigma$ it follows easily that $X(t,\tau,X_0)$ is defined for all $t \in (-\infty, \tau] \cap \mathcal{I}$ and thus the proof ends. \end{proof} The proof of Theorem \ref{thm3.7} shows that -- as a consequence of the Comparison Theorem -- each element $\hat{X} \in \Gamma^\Sigma$ is providing a lower bound for the solution $X(\cdot, \tau, X_0)$ of \eqref{eqno:3.1} (see \eqref{eqno:3.8}), whereas the solution $Y$ of \eqref{eqno:3.9} gives an upper bound. \begin{corollary} \label{coro3.8} Assume that $0 \in \Gamma^{\Sigma}$. Then for all $(\tau, X_0) \in \mathcal{I} \times \mathcal{S}^n_+$ the solution $X(\cdot, \tau, X_0)$ of equation \eqref{eqno:3.1} is defined on the whole interval $(- \infty, \tau] \cap \mathcal{I}$ and fulfills there the inequality $$ 0 \leq X(t, \tau, X_0) \leq Y(t), $$ where $Y$ is the solution of \eqref{eqno:3.9}. Moreover if $X_0 > 0$ then $X(t, \tau, X_0) > 0$ for all $t \in \mathcal{I}$ with $t \leq \tau$. \end{corollary} \begin{proof} Since $0 \in \Gamma^{\Sigma}$ it follows that $\mathcal{I} \times \mathcal{S}_+^n \subset \tilde{\mathcal{D}}(\mathcal{R})$. So from the above theorem one obtains that $X(t, \tau, X_0)$ is well defined for all $t \in (-\infty, \tau] \cap \mathcal{I}$ for arbitrary $(\tau, X_0) \in \mathcal{I} \times \mathcal{S}_+^n$. The inequality $X(t, \tau, X_0) \geq 0$ is just \eqref{eqno:3.8} for $\hat{X}(t) \equiv 0$. On account of \eqref{eqno:3.11} it remains to prove that $X(t, \tau, X_0) > 0$ if $X_0 > 0$. To this end we set $X(t) = X(t, \tau, X_0)$ and $F(t) = F^X(t)$ for $t \in \mathcal{I}$ with $t \leq \tau$. Applying Lemma \ref{lm3.4} for $W(t) = F(t)$ one obtains $$ \frac{d}{dt} X(t) + \mathcal{L}_{A+BF}^*(t)[X(t)] + \Pi_F(t)[X(t)] + M_F(t) = 0. $$ Further we write the representation formula $$ X(t) = \Phi_{A+BF}^T(\tau, t) X_0 \Phi_{A+BF}(\tau, t) + \int_t^\tau \Phi_{A+BF}^T (s,t) H(s) \Phi_{A+BF}(s,t) \, ds, $$ where $\Phi_{A+BF}(s,t)$ is the fundamental matrix solution defined by $$ \frac{d}{ds} \Phi_{A+BF}(s,t) = \big[ A(s) + B(s) F(s) \big] \Phi_{A+BF}(s,t), \quad \Phi_{A+BF}(t,t) = I $$ and where $$ H(s) = \Pi_F(s)[X(s)] + M_F(s) \quad \mbox{for} \quad s \in \mathcal{I} \quad \mbox{with} \quad s \leq \tau. $$ The assumption $0 \in \Gamma^{\Sigma}$ is equivalent to \begin{equation} R(t) \gg 0 \quad \mbox{and} \quad \begin{pmatrix} M(t) & L(t) \\ L^T(t) & R(t) \end{pmatrix} \geq 0. \label{eqno:3.13} \end{equation} From \eqref{eqno:3.13} and the monotonicity of $\Pi(s)[\cdot]$ we conclude that $H(s) \geq 0$ for all $s \in \mathcal{I}$ with $s \leq \tau$. From the representation formula we obtain that $$X(t) \geq \Phi_{A+BF}^T(\tau,t) X_0 \Phi_{A+BF}(\tau,t) > 0. $$ For the last inequality we have taken into account that $X_0 > 0$ and $\Phi_{A+BF}(\tau, t)$ is invertible, thus the proof is complete. \end{proof} The next result provides a set of sufficient conditions which guarantee the existence of the minimal positive semi-definite solution $\tilde{\tilde{X}}$ of equation \eqref{eqno:3.1}. \begin{theorem} \label{thm3.9} Assume that: \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item $0 \in \Gamma^{\Sigma}$; \item The nonlinear differential equation \eqref{eqno:3.9} has a bounded solution $\tilde{X} \colon \mathcal{I} \to \mathcal{S}^n$ such that $\tilde{X}(t) \geq 0$. \end{enumerate} Under these conditions the differential equation \eqref{eqno:3.1} has a solution $\tilde{\tilde{X}} \colon \mathcal{I} \to \mathcal{S}^n$ with the additional property $0 \leq \tilde{\tilde{X}}(t) \leq \bar{X}(t)$ for all $t \in \mathcal{I}$, for any bounded and positive semi-definite solution $\bar{X} \colon \mathcal{I} \to \mathcal{S}^n$ of equation \eqref{eqno:3.1}. Additionally if $A$, $B$, $\Pi$, $\mathcal{Q}$ are periodic functions with period $\theta > 0$, then $\tilde{\tilde{X}}$ is a periodic function with the same period $\theta$. Moreover if $A(t) \equiv A$, $B(t) \equiv B$, $\Pi(t) \equiv \Pi$, $\mathcal{Q}(t) \equiv \mathcal{Q}$ for all $t \in \mathcal{I}$, then $\tilde{\tilde{X}}$ is constant and it solves the nonlinear algebraic equation \begin{equation} \label{eqno:3.14} \begin{aligned} \lefteqn{A^T X + X A + \Pi_1(X) + M - [X B + \Pi_{12}(X) + L]} \\ & \quad \times [R + \Pi_2(X)]^{-1} [X B + \Pi_{12}(X) + L]^T = 0. \end{aligned} \end{equation} \end{theorem} \begin{proof} For each $\tau \in \mathcal{I}$ let $X_{\tau}(t) = X(t,\tau,0)$ be the solution of (3.1) such that $X_{\tau}(\tau)=0$. From Corollary \ref{coro3.8} we have that $X_{\tau}$ is well defined on $(-\infty, \tau] \cap \mathcal{I}$ and $X_{\tau}(t) \geq 0$ for all $t \in \mathcal{I}$ with $t \leq \tau$. We show that: \begin{enumerate} \item[($\alpha)$] $\quad X_{\tau_1}(t)\leq X_{\tau_2}(t)$ for all $t \in \mathcal{I}$ with $t \leq \tau_1 < \tau_2$. \item[($\beta)$] $\quad X_{\tau}(t) \leq \tilde{Y}(t)$ for all $t \in \mathcal{I}$ with $t \leq \tau$, where $\tilde{Y}$ is a bounded and positive semi-definite solution of equation \eqref{eqno:3.9}. \end{enumerate} If $\tau_1 < \tau_2 \in \mathcal{I}$, then $X_{\tau_2}(\tau_1) \geq 0 = X_{\tau_1}(\tau_1)$. Then, based on Theorem \ref{thm3.6} we obtain that $X_{\tau_2}(t) \geq X_{\tau_1}(t)$ for $t \in (-\infty, \tau_1] \cap \mathcal{I}$ and thus item $\alpha)$ is valid. On the other hand if $\tilde{Y} \colon \mathcal{I} \to \mathcal{S}^n$ is a bounded and positive semi-definite solution of equation \eqref{eqno:3.9} then for arbitrary $\tau \in \mathcal{I}$ we can write $$\frac{d}{dt} \left[ \tilde{Y}(t) - X_{\tau}(t) \right] + \mathcal{L}_A^*(t) \left[ \tilde{Y}(t) - X_{\tau}(t) \right] + \hat{\Pi}(t) \left[ \tilde{Y}(t) - X_{\tau}(t) \right] + H_{\tau}(t) =0 $$ where $$ \hat{\Pi}(t)[Z] = \Pi_1(t)[Z + X_{\tau}(t)] - \Pi_1(t)[X_{\tau}(t)] $$ and $$ H_{\tau}(t) = F_\tau^T(t) \big\{ R(t) + \Pi_2(t)[X_{\tau}(t)] \big\} F_\tau(t) $$ where $F_\tau(t) := F^{X_\tau}(t)$. It is easy to verify that $Z \to \hat{\Pi}(t)[Z]$ satisfies the assumptions ($\mathbf{\Pi_1}$) -- ($\mathbf{\Pi_3}$) and that $H_\tau(t) \geq 0$. Taking into account that $\tilde{Y}(\tau) - X_{\tau}(\tau) = \tilde{Y}(\tau) \geq 0$ we obtain via Proposition \ref{prop3.5}, (ii), that $\tilde{Y}(t) - X_{\tau}(t) \geq 0$ for all $t \in \mathcal{I}$ with $t \leq \tau$, and thus item $\beta)$ holds. From $(\alpha)$ and $(\beta)$ we deduce that $\tilde{\tilde{X}} \colon \mathcal{I} \to \mathcal{S}^n$ is well defined by \begin{equation} \tilde{\tilde X}(t) := \lim_{\tau \to \infty} X_{\tau}(t) \quad \mbox{for} \quad t \in \mathcal{I}. \label{eqno:3.15} \end{equation} Obviously $0 \leq \tilde{X}(t) \leq \tilde{Y}(t)$. In a standard way we can show that $\tilde{\tilde{X}}(t)$ is a solution of equation \eqref{eqno:3.1}. If $\bar{X} \colon \mathcal{I} \to \mathcal{S}^n$ is a bounded and positive semi-definite solution of \eqref{eqno:3.1}, then, using Theorem \ref{thm3.6}, we obtain that $X_{\tau}(t) \leq \bar{X}(t)$ for all $t \in \mathcal{I}$ with $t \leq \tau$. So, invoking \eqref{eqno:3.15} we conclude that $0 \leq \tilde{\tilde{X}}(t) \leq \bar{X}(t)$ for all $t \in \mathcal{I}$, which shows that $\tilde{\tilde{X}}$ is the minimal solution in the class of bounded and positive semi-definite solutions of the differential equation \eqref{eqno:3.1}. Let us assume that $A$, $B$, $\Pi$, $\mathcal{Q}$ are periodic functions with period $\theta > 0$. We have to show that the minimal solution $\tilde{\tilde{X}}$ is also a periodic function with period $\theta$. Let $\hat{X}_{\tau}(t) = X_{\tau + \theta}(t + \theta)$. It is easy to see that $\hat{X}_{\tau}$ is a solution of \eqref{eqno:3.1} which satisfies $\hat{X}_{\tau}(\tau) = 0$. From the uniqueness of the solution of this terminal value problem it follows that $\hat{X}_{\tau}(t) = X_{\tau}(t)$ for all $t \in (-\infty, \tau] \cap \mathcal{I}$. We have $$ \tilde{\tilde{X}}(t) = \lim_{\tau \to \infty} X_{\tau}(t) = \lim_{\tau \to \infty} \hat{X}_{\tau}(t) = \lim_{\tau \to \infty} X_{\tau + \theta}(t + \theta) = \tilde{\tilde{X}}(t + \theta) $$ for all $t \in \mathcal{I}$ which shows that $\tilde{\tilde{X}}$ is a periodic function with period $\theta$. Finally, if $A(t) \equiv A$, $B(t) \equiv B$, $\Pi(t) \equiv \Pi$, $\mathcal{Q}(t) \equiv \mathcal{Q}$ then $\tilde{\tilde{X}}$ is a periodic function of arbitrary period. Therefore it is a constant function and thus the proof ends. \end{proof} \begin{lemma} \label{lm3.10} Assume that $A$, $B$, $\mathcal{Q}$ and $\Pi$ are periodic functions with period $\theta > 0$. If the symmetric solution $X_1 = X_1(\cdot, t_0, X_0)$ of \eqref{eqno:3.1} exists on an interval $\mathcal{ I}_1$ containing the interval $[t_0 -2 \theta, t_0]$ such that $R(t)+\Pi_2(t)[X_1(t)]>0$ for all $t\in \mathcal{I}_1$ and if \[ X_1(t_0 - \theta) \; \Box \; X_1(t_0) \quad \mbox{for some} \quad \Box \in \{ <, \leq, =, \geq, > \}, \] then $X_1(t - \theta) \; \Box \;X_1(t)$ holds on the maximal common interval $\mathcal{ I}_1\subset \mathcal{I} \cap (- \infty, t_0]$ of existence of $X$ and $X(\cdot - \theta)$; in this case $X_1$ is called {\bf cyclomonotonic} on $\mathcal{ I}_1$. If in addition $\mathcal{I}_1 = (- \infty, t_0]$ and $X_1$ is bounded on $\mathcal{ I}_1$ with $R(t) + \Pi_2(t)[X_1(t)] \gg 0$, then \begin{eqnarray*} X_\infty \colon \mathbb{R} %set \to \mathcal{S}^n \quad \mbox{with} \quad X_\infty(t) := \lim_{k \to \infty} X_1(t - k \theta), \quad t_0 - \theta < t \leq t_0, \quad(3.15) \label{eqno:3.16} \end{eqnarray*} exists and defines a $\theta$-periodic solution of \eqref{eqno:3.1}. \end{lemma} \begin{proof} Let $\Box \, = \, \geq$ (the other cases are treated similarly). As in the proof of Theorem \ref{thm3.6} it follows that $\Delta(t) := X_1(t - \theta) - X_1(t)$ satisfies (on its maximal interval of existence $\mathcal{I}_1 \subset (- \infty, t_0]$) \[ \frac{d}{dt} \Delta(t) + \mathcal{L}_{A+BF_1}^*(t)[\Delta(t)]+ \hat{\Pi}(t)[\Delta(t)]+ H(t) = 0, \quad \Delta(t_0) \geq 0, \] where $H(t) \geq 0$ for $t \in \mathcal{I}_1$. Therefore Proposition \ref{prop3.5}, (ii), yields that $\Delta(t) \geq 0$ on $\mathcal{I}_1$ -- this proves the first assertion of the lemma. If in addition $\mathcal{I}_1 = (- \infty, t_0]$ and $X_1$ is bounded on $\mathcal{I}_1$ then the limits in \eqref{eqno:3.16} exist since the sequences $\big( X_1(t - k \theta) \big)_{k \in \mathbb{N}}$ are monotonic and bounded. Therefore the $\theta$-periodic function $X_\infty$ defined by \eqref{eqno:3.16} is obviously a solution of \eqref{eqno:3.1} with $R(t) + \Pi_2(t)[X_\infty(t)] \gg 0$. \end{proof} \begin{theorem} \label{thm3.11} The following statements are equivalent: \begin{enumerate} \item[(i)] Equation \eqref{eqno:3.1} has a $\theta$-periodic solution $\hat X$ with the additional property $R(t)+\Pi_2(t)[\hat X(t)]>0$ for all $t\in\mathbb{R}$. \item[(ii)] There exist two $\theta$-periodic functions $X_\ell, X_u$ and $t_0 \in \mathbb{R}$ with \begin{enumerate} \renewcommand{\labelenumii}{\alph{enumii})} \renewcommand{\itemsep}{5pt} \item[(a)] $X_\ell(t) \leq X_u(t)$ for $t \in \mathbb{R}$, \item[(b)] $\frac{d}{dt} X_{\ell}(t) + \mathcal{R}(t, X_{\ell}(t)) \geq 0$ and $R(t)+\Pi_2(t)[X_{\ell}(t)]>0$ for all $t \leq t_0$, \item[(c)] $\frac{d}{dt} X_u(t)+ \mathcal{R}(t, X_u(t)) \leq 0$ for $t \leq t_0$. \end{enumerate} \item[(iii)] There exist symmetric matrices $X_0, X_1$ with: \begin{enumerate} \item[($\alpha$)] $X_0 \leq X_1$. \item[($\beta$)] The solution $X(\cdot, t_0,X_0)$ of \eqref{eqno:3.1} is defined on an interval $\mathcal{I}_1$ containing $[t_0-2\theta, t_0]$ such that $R(t)+\Pi_2(t)[X(t,t_0,X_0)]>0$ for all $t\in \mathcal{I}_1$ and $X_0 \leq X(t_0-\theta, t_0, X_0)$. \item[($\gamma$)] The solution $X(\cdot, t_0,X_1)$ of \eqref{eqno:3.1} is defined on an interval $\mathcal{I}_1$ containing $[t_0-2\theta, t_0]$ such that $R(t)+\Pi_2(t)[X(t,t_0,X_1)]>0 $ for all $t\in \mathcal{ I}_1$ and $X_1 \geq X(t_0 - \theta, t_0, X_1)$. \end{enumerate} \end{enumerate} \end{theorem} \begin{proof} (i) $\Longrightarrow$ (ii) is trivial since a), b), c) are fulfilled with $X_\ell = X_u = \hat{X}$ for any $\theta$-periodic solution $\hat{X}$ of \eqref{eqno:3.1}. (ii) implies that there are functions $Q_\ell, Q_u$ with $Q_\ell(t) \leq 0 \leq Q_u(t)$ and \begin{gather*} \frac{d}{dt} X_\ell(t) + \mathcal{R}(t, X_\ell(t)) + Q_\ell(t) = 0, \\ \frac{d}{dt} X_u(t) + \mathcal{R}(t, X_u(t)) + Q_u(t) = 0 \end{gather*} for $t \leq t_0$. From (ii), a) and the Comparison Theorem we infer that for $t \leq t_0$ \[ X_\ell(t) \leq X(t, t_0, X_\ell(t_0)) \leq X (t, t_0, X_u(t_0)) \leq X_u(t). \] Since $X_\ell$ and $X_u$ are periodic, the last inequalities imply in particular that \[ X_\ell(t_0 - \theta) = X_\ell(t_0) =: X_0 \leq X(t_0 - \theta, t_0 , X_0) \] and \[ X_u(t_0 - \theta) = X_u(t_0) =: X_1 \geq X(t_0 - \theta, t_0, X_1). \] Hence (ii) implies (iii). Assume that (iii) is valid. Then it follows from Lemma \ref{lm3.10} and Theorem \ref{thm3.6} that $X(\cdot, t_0, X_0)$ and $X(\cdot, t_0, X_1)$ are cyclomonotonic with \begin{align*} X(t, t_0, X_0) & \leq X(t - \theta, t_0, X_0) \\ & \leq X(t - \theta, t_0, X_1) \leq X(t, t_0, X_1) \quad \mbox{for} \quad t \leq t_0. \end{align*} Hence the limits \begin{align*} X_{j \infty}(t) & = \lim_{k \to \infty} X (t - k \theta, t_0, X_j) \\ & = \lim_{k \to \infty} X(t - (k \mp 1) \theta), t_0, X_j) = X_{j \infty}(t \pm \theta) \end{align*} exist for $j = 0, 1$ and $t \leq t_0$. Consequently (i) holds, since $X_{0 \infty}$ and $X_{1 \infty}$ are obviously both periodic solutions of \eqref{eqno:3.1} (which may coincide). \end{proof} \begin{remark} \label{rmk3.12}\rm Although it is in general difficult to prove that \eqref{eqno:3.1} has a $\theta$-periodic solution we can use criterion (iii) of Theorem \ref{thm3.11} (in connection with Lemma \ref{lm3.10} and the Comparison Theorem) to test if such an equilibrium exists. Notice that it follows from Corollary \ref{coro5.11} (below) and Corollary \ref{coro3.8} that the conditions of Theorem \ref{thm3.11}, (iii), are fulfilled with $X_0(t) \equiv 0$ and the (stabilizing) function $X_1$ constructed in the first step of the proof of Theorem \ref{thm4.7} (below) if \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item $(A,B,\Pi)$ is stabilizable; \item $0\in \Gamma^{\Sigma}$. \end{enumerate} Here b) ensures the existence of $X_0$ and a) the existence of $X_1 \geq X_0$. \end{remark} \section{Maximal solutions} Throughout this section we assume that $X\to \Pi(t)[X]$ is a linear operator. \begin{definition} \label{def4.1} \rm A solution $\tilde{X} \colon \mathcal{I} \to \mathcal{S}^n$ of equation \eqref{eqno:3.1} is said to be the \textbf{maximal solution with respect to $\Gamma^{\Sigma}$} (or \textbf{maximal solution} for shortness) if $\tilde{X}(t) \geq \hat{X}(t)$ for arbitrary $\hat{X} \in \Gamma^{\Sigma}$. \end{definition} In this section we prove a result concerning the existence of the maximal solution with respect to $\Gamma^{\Sigma}$ of equation \eqref{eqno:3.1}. First, we give a definition which will play a crucial role in the next developments. That is the concept of stabilizability for the triple $(A, B, \Pi)$. \begin{definition} \label{def4.2} \rm We say that the triple $(A, B, \Pi)$ is \textbf{stabilizable} if there exists a bounded and continuous function $F \colon \mathcal{I} \to \mathbb{R}^{m \times n}$ such that the operator $\mathcal{L}_{A+BF, \Pi_F^*}$ generates an exponentially stable evolution. The function $F$ will be termed a \textbf{stabilizing feedback gain}. \end{definition} We shall show later (Corollary \ref{coro5.11}) that if $A$, $B$, $\Pi$ are periodic functions with period $\theta$ and if the triple $(A, B, \Pi)$ is stabilizable then there exists a stabilizing feedback gain which is a periodic function with period $\theta$. Moreover if $A(t) \equiv A$, $B(t) \equiv B$, $\Pi(t) \equiv \Pi$ for $t \in \mathbb{R}$, and if the triple $(A,B, \Pi)$ is stabilizable, then there exists a stabilizing feedback gain which is constant. In the particular case when $\Pi(t)$ is of the form \eqref{eqno:3.3} then the above definition of stabilizability (see also Section 7.3) reduces to the standard definition of stabilizability for stochastic systems (mean-square stabilizability -- see \cite{FCdS98}). Applying Theorem \ref{thm2.11} we have the following result. \begin{corollary} \label{coro4.3} The triple $(A, B, \Pi)$ is stabilizable if and only if there exists some $X \in C^1_b(\mathcal{I}, \mathcal{S}^n)$ with $X(t) \gg 0$ and a bounded and continuous function $F \colon \mathcal{I} \to \mathbb{R}^{m \times n}$ such that $$ \frac{d}{dt} X(t) + \mathcal{L}_{A+BF, \Pi_F^*}^*(t)[X(t)] \ll 0 \quad \mbox{for all} \quad t \in \mathcal{I}. $$ \end{corollary} In the case $\mathcal{I} = \mathbb{R}$, using Theorem \ref{thm2.18}, we get the following corollary. \begin{corollary} \label{coro4.4} For $\mathcal{I} = \mathbb{R}$ the following are equivalent: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item The triple $(A,B,\Pi)$ is stabilizable. \item There exists a bounded $C^1$-function $X \colon \mathbb{R} \to \mathcal{S}^n$ with bounded derivative, $X(t) \gg 0$ and a bounded and continuous function $F \colon \mathbb{R} \to \mathbb{R}^{m\times n}$ which satisfies \begin{equation} \frac{d}{dt} X(t) - \mathcal{L}_{A+BF, \Pi_F^*}(t)[X(t)] \gg 0. \label{eqno:4.1} \end{equation} \end{enumerate} \end{corollary} \begin{remark} \label{rmk4.5} \rm In the particular case when the coefficients do not depend on $t$, \eqref{eqno:4.1} can be converted in an LMI which can be solved using an LMI solver (see \cite{ARZh00}). \end{remark} Now we state an auxiliary result which together with Lemma \ref{lm3.4} plays a crucial role in the proof of the main result of this section and follows directly from \eqref{eqno:3.7}. \begin{lemma} \label{lm4.6} If $W \colon \mathcal{I} \to \mathbb{R}^{m \times n}$ is a continuous function and if $X$ is a solution of the differential equation $$ \frac{d}{dt} X(t) + \mathcal{L}_{A+BW, \Pi_W^*}^*(t)[X(t)] + M_W(t) = 0 $$ and if $\det \big\{ R(t) + \Pi_2(t)[X(t)] \big\} \neq 0$ then $X$ satisfies also the differential equation \begin{align*} \lefteqn{\frac{d}{dt} X(t) + \mathcal{L}_{A+BF, \Pi_F^*}^*(t)[X(t)] + M_F(t)} \\ & \quad {} + \big[ F(t) - W(t)\big]^T \Theta(t,X(t)) \big[ F(t) - W(t) \big] = 0 \end{align*} where $F(t) := F^{X}(t)$ and $\Theta(t,X(t)) := R(t)+\Pi_2(t)[X(t)]$. \end{lemma} The main result of this section is as follows: \begin{theorem} \label{thm4.7} Assume that $(A, B, \Pi)$ is stabilizable. Then the following are equivalent: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item $\Gamma^{\Sigma} \neq \emptyset$. \item Equation \eqref{eqno:3.1} has a maximal and bounded solution $\tilde{X} \colon \mathcal{I} \to \mathcal{S}^n$ with $R(t) + \Pi_2(t)[\tilde{X}(t)] \gg 0$. \end{enumerate} If $A$, $B$, $\Pi$, $\mathcal{Q}$ are $\theta$-periodic functions, then $\tilde{X}$ is also a $\theta$- periodic function. Moreover, if $A(t) \equiv A$, $B(t) \equiv B$, $\Pi(t) \equiv \Pi$, $\mathcal{Q}(t) \equiv \mathcal{Q}$, then the maximal solution of equation \eqref{eqno:3.1} is constant and it solves the nonlinear algebraic equation \eqref{eqno:3.14}. \end{theorem} \begin{proof} (ii) $\Rightarrow$ (i) is obvious, since $\tilde{X} \in \Gamma^{\Sigma}$. It remains to prove the implication (i) $\Rightarrow$ (ii). Since $(A, B, \Pi)$ is stabilizable it follows that there exists a bounded and continuous function $F_0 \colon \mathcal{I} \to \mathbb{R}^{m \times n}$ such that the operator $\mathcal{L}_{A+BF_0, \Pi_{F_0}^*}$ generates an exponentially stable evolution. Let $\varepsilon > 0$ be fixed. Using Theorem \ref{thm2.13} one obtains that equation \begin{equation} \frac{d}{dt} X(t) + \mathcal{L}_{A+BF_0,\Pi_{F_0}^*}^*(t) [X(t)] + M_{F_0}(t) + \varepsilon I_n = 0 \label{eqno:4.2} \end{equation} has a unique bounded solution $X_1 \colon \mathcal{I} \to\mathcal{S}^n$. We shall show that $X_1(t) \gg \hat X(t)$ for arbitrary $\hat{X} \in \Gamma^{\Sigma}$. If $\hat{X} \in \Gamma^{\Sigma}$ then we obtain immediately that $\hat{X}$ fulfills $$ \frac{d}{dt} \hat{X}(t) + \mathcal{R}(t,\hat X(t)) \geq 0 \quad \mbox{for} \quad t \in \mathcal{I}; $$ consequently $\hat{X}$ solves the equation \begin{equation} \frac{d}{dt} \hat{X}(t) + \mathcal{R}(t,\hat{X}(t)) - \hat{M}(t) = 0, \label{eqno:4.3} \end{equation} where $\hat{M}(t)= \frac{d}{dt} \hat{X}(t) + \mathcal{R}(t,\hat{X}(t)) \geq 0$. Applying Lemma \ref{lm3.4}, \eqref{eqno:4.3} may be written as \begin{equation} \label{eqno:4.4} \begin{aligned} \lefteqn{\frac{d}{dt} \hat{X}(t) + \mathcal{L}_{A+BF_0, \Pi_{F_0}^*}^*(t)[\hat{X}(t)] + M_{F_0}(t)} \\ & \quad {} -\big[ F_0(t) - \hat{F}(t) \big]^T \Theta(t,\hat{X}(t)) \big[F_0(t) - \hat{F}(t) \big] - \hat{M}(t) = 0, \end{aligned} \end{equation} where $\hat{F}(t) := F^{\hat{X}}(t)$. From \eqref{eqno:4.2} and \eqref{eqno:4.4} we deduce that $t \mapsto X_1(t) - \hat{X}(t)$ is a bounded solution of the differential equation $$ \frac{d}{dt} Y(t) + \mathcal{L}_{A+BF_0, \Pi_{F_0}^*}^*(t)[Y(t)] + H_1(t) = 0 $$ with $$ H_1(t) = \varepsilon I_n + [F_0(t) - \hat{F}(t)]^T \Theta(t,\hat{X}(t)) [F_0(t) - \hat{F}(t)] + \hat{M}(t). $$ Clearly $H_1(t) \geq \varepsilon I_n> 0$. Hence Theorem \ref{thm2.13} implies $X_1(t) - \hat{X}(t) \gg 0$. Therefore $R(t)+ \Pi_2(t)[X_1(t)] \geq R(t) + \Pi_2(t)[\hat{X}(t)] \gg 0$ for $t\in \mathcal{I}$. Thus we obtain that $F_1(t) := F^{X_1}(t)$ is well defined. We show that $F_1$ is a stabilizing feedback gain for the triple $(A, B, \Pi)$. To this end, based on Lemma \ref{lm4.6}, we rewrite equation \eqref{eqno:4.2} as \begin{equation} \label{eqno:4.5} \begin{aligned} \lefteqn{\frac{d}{dt} X_1(t) + \mathcal{L}_{A+BF_1, \Pi_{F_1}^*}^*(t)[X_1(t)] + M_{F_1}(t) + \varepsilon I_n} \\ & \quad {} + \big[ F_1(t) - F_0(t) \big]^T \Theta(t,X_1(t)) \big[ F_1(t) - F_0(t) \big] = 0. \end{aligned} \end{equation} On the other hand, based on Lemma \ref{lm3.4}, equation \eqref{eqno:4.3} can be rewritten as \begin{align*} \lefteqn{\frac{d}{dt} \hat{X}(t) + \mathcal{L}_{A+BF_1, \Pi_{F_1}^*}^*(t)[\hat{X}(t)] + M_{F_1}(t) - \hat{M}(t)} \nonumber \\ & \quad {} - \big[ F_1(t) - \hat{F}(t) \big]^T \Theta(t,\hat{X}(t)) \big[ F_1(t) - \hat{F}(t) \big] = 0. \end{align*} Subtracting the last equation from \eqref{eqno:4.5} we obtain $$ \frac{d}{dt} \big[ X_1(t) - \hat{X}(t) \big] + \mathcal{L}_{A+BF_1, \Pi_{F_1}^*}^*(t) \big[ X_1(t) - \hat{X}(t) \big] + \tilde{H}(t)=0 $$ where \begin{align*} \tilde{H}(t) & = \varepsilon I_n + \big[ F_1(t) - F_0(t) \big]^T \Theta(t, X_1(t)) \big[ F_1(t) - F_0(t) \big] \\ & \qquad {} + \big[ F_1(t) - \hat F(t) \big]^T \Theta(t,\hat X(t)) \big[ F_1(t) - \hat{F}(t) \big] + \hat{M}(t) \gg 0. \end{align*} Applying the implication (vi) $\Rightarrow$ (i) in Theorem \ref{thm2.11} we infer that $\mathcal{L}_{A+BF_1, \Pi_{F_1}^*}$ generates an exponentially stable evolution. This means that $F_1 = F^{X_1}$ \textit{is a stabilizing feedback gain}; notice that, as a consequence of Theorem \ref{thm2.13}, $F_1$ is constant (or periodic) if the coefficients of \eqref{eqno:3.1} are constant (or periodic, respectively). Taking $X_1$, $F_1$ as a first step we construct two sequences $\{ X_k \}_{k \geq 1} $ and $\{ F_k \}_{k \geq 1}$, where $X_k$ is the unique bounded solution of the differential equation \begin{equation} \frac{d}{dt} X_k(t) + \mathcal{L}_{A+BF_{k-1}, \Pi_{F_{k-1}}^*}^*(t)[X_k(t)] + M_{F_{k-1}}(t) + \frac{\varepsilon} {k} I_n = 0 \label{eqno:4.6} \end{equation} and $F_k(t) := F^{X_k}(t)$. We show inductively that the following items hold: \begin{enumerate} \item[$(a_k)$] $X_k(t) - \hat{X}(t) > \mu_k I_n$ for arbitrary $\hat{X} \in \Gamma^{\Sigma}$, $\mu_k > 0$ independent of $\hat{X}$. \item[$(b_k)$] $F_k$ is a stabilizing feedback gain for the triple $(A, B, \Pi)$. \item[$(c_k)$] $X_k(t) \geq X_{k+1}(t)$ for $t \in \mathcal{I}$. \end{enumerate} For $k=1$, items $(a_1)$, $(b_1)$ were proved before. To prove $(c_1)$ we subtract \eqref{eqno:4.6}, written for $k=2$, from \eqref{eqno:4.5} and get $$ \frac{d}{dt} \big[ X_1(t) - X_2(t) \big] + \mathcal{L}_{A+BF_1, \Pi_{F_1}^*}^*(t) \big[ X_1(t) - X_2(t) \big] + \Delta_1(t) = 0,$$ where $$ \Delta_1(t) := \frac{\varepsilon}{2} I_n + \big[ F_1(t) - F_0(t) \big]^T \Theta(t,X_1(t)) \big[ F_1(t) - F_0(t) \big] \gg 0. $$ Invoking Theorem \ref{thm2.13}, (b), one obtains that $X_1(t) - X_2(t) \gg 0$ and thus $(c_1)$ is fulfilled. Let us assume that $(a_i)$ , $(b_i)$, $(c_i)$ are fulfilled for $i \leq k-1$ and let us prove them for $i=k$. Based on $(b_{k-1})$ and Theorem \ref{thm2.13} we deduce that equation \eqref{eqno:4.6} has a unique solution $X_k \colon \mathcal{I} \to \mathcal{S}^n$. Applying Lemma \ref{lm3.4} with $W(t) :=F_{k-1}(t)$ one obtains that \eqref{eqno:4.3} may be rewritten as \begin{align*} \lefteqn{\frac{d}{dt} \hat{X}(t) + \mathcal{L}_{A+BF_{k-1}, \Pi_{F_{k-1}}^*}^*(t)[\hat{X}(t)] + M_{F_{k-1}}(t)} \\ & \quad {} - \big[ F_{k-1}(t) - \hat{F}(t) \big]^T \Theta(t,\hat{X}(t)) \big[ F_{k-1}(t) - \hat{F}(t) \big] - \hat{M}(t) = 0. \end{align*} Subtracting this equation from \eqref{eqno:4.6} one obtains that $t \mapsto X_k(t) - \hat X(t)$ is a bounded solution of the equation $$ \frac{d}{dt} X(t) + \mathcal{L}_{A+BF_{k-1}, \Pi_{F_{k-1}}^*}^*(t)[X(t)] + H_k(t) = 0, $$ where $$ H_k(t) = \frac{\varepsilon}{k} I_n + [F_{k-1}(t) - \hat{F}(t)]^T \Theta(t,\hat{X}(t)) [F_{k-1}(t) - \hat{F}(t)] + \hat{M}(t) \gg 0.$$ Since $\mathcal{L}_{A+BF_{k-1}, \Pi_{F_{k-1}}^*}$ generates an exponentially stable evolution, we obtain from Theorem \ref{thm2.13}, (b), that there exist \begin{equation} \mu_k > 0 \quad \mbox{such that} \quad X_k(t) - \hat{X}(t) \geq \mu_k I_n \quad \mbox{for} \quad t \in \mathcal{I},\label{eqno:4.7} \end{equation} thus $(a_k)$ is fulfilled. Let us show that $(b_k)$ is fulfilled. Firstly from \eqref{eqno:4.7} we have $$ R(t) + \Pi_2(t)[X_k(t)] \gg 0, $$ therefore $F_k$ is well defined. Applying Lemma \ref{lm4.6} to equation \eqref{eqno:4.6}, one obtains that $X_k$ solves the equation \begin{equation} \label{eqno:4.8} \begin{aligned} \lefteqn{\frac{d}{dt} X_k(t) + \mathcal{L}_{A+BF_k, \Pi_{F_k}^*}^*(t)[X_k(t)] + M_{F_k}(t) + \frac{\varepsilon}{k} I_n} \\ & \quad {} + \big[ F_k(t) - F_{k-1}(t) \big]^T \Theta(t,X_k(t)) \big[ F_k(t) - F_{k-1}(t) \big] = 0. \end{aligned} \end{equation} On the other hand, Lemma \ref{lm3.4} applied to equation \eqref{eqno:4.3} gives \begin{align*} \lefteqn{\frac{d}{dt} \hat{X}(t) + \mathcal{L}_{A+BF_k, \Pi_{F_k}^*}^*(t)[\hat{X}(t)] + M_{F_k}(t)} \nonumber \\ & \quad {} - \big[ F_k(t) - \hat{F}(t) \big]^T \Theta(t,\hat{X}(t)) \big[ F_k(t) - \hat{F}(t) \big] - \hat{M}(t) = 0. \end{align*} >From the last two equations one obtains $$ \frac{d}{dt} \big[ X_k(t) - \hat{X}(t) \big] + \mathcal{L}_{A+BF_k, \Pi_{F_k}^*}^*(t) \big[ X_k(t) - \hat{X}(t) \big] + \tilde{H}_k(t)= 0$$ with \begin{align*} \tilde{H}_k(t) & = \frac{\varepsilon}{k} I_n + \big[ F_k(t) - F_{k-1}(t) \big]^T \Theta(t,X_k(t)) \big[ F_k(t) - F_{k-1}(t) \big] \\ & \qquad {} + \big[ F_k(t) - \hat F(t) \big]^T \Theta(t,\hat X(t)) \big[ F_k(t) - \hat{F}(t) \big] + \hat{M}(t) \geq \frac{\varepsilon}{k} I_n. \end{align*} Implication (vi) $\Rightarrow$ (i) of Theorem \ref{thm2.11} allows us to conclude that $\mathcal{L}_{A+BF_k, \Pi_{F_k}^*}$ generates an exponentially stable evolution which shows that $(b_k)$ is fulfilled. It remains to prove that $(c_k)$ holds. To this end we subtract equation \eqref{eqno:4.6}, written for $k+1$ instead of $k$, from equation \eqref{eqno:4.8} and get \begin{align*} \lefteqn{\frac{d}{dt} \big[ X_k(t) - X_{k+1}(t) \big] + \mathcal{L}_{A+BF_k, \Pi_{F_k}^*}^*(t) \big[ X_k(t) - X_{k+1}(t) \big] + \frac{\varepsilon}{k(k+1)} I_n} \\ & \quad {} + \big[ F_k(t) - F_{k-1}(t) \big]^T \Theta(t,X_k(t)) \big[ F_k(t) - F_{k-1}(t) \big] = 0. \end{align*} Since $\mathcal{L}_{A+BF_k,\Pi_{F_k}^*}$ generates an exponentially stable evolution one obtains, via Theorem \ref{thm2.13}, (b), that equation \eqref{eqno:4.8} has a unique bounded solution which additionally is uniformly positive. Therefore $X_k(t) - X_{k+1}(t) \gg 0$ and thus $(c_k)$ holds. Now from $(a_k)$ and $(c_k)$ we deduce that the sequence $\{ X_k \}_{k \geq 1}$ is monotonically decreasing and bounded, therefore it is convergent. Set $\tilde{X}(t) := \lim\limits_{k \to \infty} X_k(t)$. In a standard way one obtains that $\tilde{X}$ is a solution of equation \eqref{eqno:3.1}. Invoking again $(a_k)$ one obtains that $\tilde{X}(t) \geq \hat{X}(t)$ for arbitrary $\hat{X} \in \Gamma^{\Sigma}$. Hence $\tilde{X}$ is just the maximal solution of equation \eqref{eqno:3.1} and thus (i) $\Rightarrow$ (ii) is proved. To complete the proof, let us remark that if $A$, $B$, $\Pi$, $\mathcal{Q}$ are periodic functions with the same period $\theta$ then via Corollary \ref{coro5.11}, (i), it follows that there exists a stabilizing feedback gain which is a $\theta$-periodic function. Applying Theorem \ref{thm2.13}, (c), one obtains that $X_k$, $F_k$ are $\theta$-periodic functions for all $k$ and thus $\tilde{X}$ will be a $\theta$-periodic function. Also if $A(t) \equiv A$, $B(t) \equiv B$, $\Pi(t) \equiv \Pi$, $\mathcal{Q}(t) \equiv \mathcal{Q}$ and $(A, B, \Pi)$ is stabilizable, then from Corollary \ref{coro5.11}, (ii), we obtain that there exists a stabilizing feedback gain which is constant. Applying again Theorem \ref{thm2.13}, (c), one obtains that $X_k$ and $F_k$ are constant functions for all $k \geq 1$ and therefore $\tilde{X}$ is constant. \end{proof} \section{Stabilizing solutions} In this section we deal with stabilizing solutions of equation \eqref{eqno:3.1} in the case where $X \to \Pi(t)[X]$ is a linear operator. We shall prove the uniqueness of a bounded and stabilizing solution and we shall provide a necessary and sufficient condition for the existence of a bounded and stabilizing solution of equation \eqref{eqno:3.1}. \begin{definition}\label{def5.1} \rm Let $X_s \colon \mathcal{I} \to \mathcal{S}^n$ be a solution of equation \eqref{eqno:3.1} and denote by $F_s(t) := F^{X_s}(t)$ the corresponding feedback matrix. Then $X_s$ is called a \textbf{stabilizing solution} if the operator $\mathcal{L}_{A+BF_s, \Pi_{F_s}^*}$ generates an exponentially stable evolution where $\Pi_{F_s}$ is defined as in \eqref{eqno:3.6} for $W(t) = F_s(t)$. \end{definition} \begin{theorem} \label{thm5.2} Let $\Sigma = (A, B, \Pi, \mathcal{Q})$ be such that $\Gamma^{\Sigma} \neq \emptyset$. If $X_s \colon \mathcal{I} \to \mathcal{S}^n$ is a bounded and stabilizing solution of equation \eqref{eqno:3.1} then $X_s$ coincides with the maximal solution with respect to $\Gamma^{\Sigma}$ of equation \eqref{eqno:3.1}. \end{theorem} \begin{proof} Applying Lemma \ref{lm3.4} we deduce that $X_s$ verifies the equation \begin{equation} \frac{d}{dt} X_s(t) + \mathcal{L}_{A+BF_s, \Pi_{F_s}^*}^*(t)[X_s(t)] + M_{F_s}(t) = 0 \label{eqno:5.1} \end{equation} where $F_s:= F^{X_s}$. Let $\hat{X}$ be arbitrary in $\Gamma^{\Sigma}$. As in the proof of Theorem \ref{thm4.7} one obtains that there exists $\hat{M}(t) \geq 0$ such that $\hat{X}$ verifies a differential equation of the form \eqref{eqno:4.3}. Applying Lemma \ref{lm3.4} to equation \eqref{eqno:4.3} we get \begin{align*} \lefteqn{\frac{d}{dt} \hat{X}(t) + \mathcal{L}_{A+BF_s, \Pi_{F_s}^*}^*(t)[\hat{X}(t)] + M_{F_s}(t) - \hat{M}(t)} \\ & \quad {} - \big[ F_s(t) - \hat{F}(t) \big]^T \Theta(t,\hat{X}(t)) \big[ F_s(t) - \hat{F}(t) \big] = 0. \end{align*} Subtracting the last two equations we obtain that $t\mapsto X_s(t) - \hat{X}(t)$ is a bounded solution of the backward differential equation $$ \frac{d}{dt} X(t) + \mathcal{L}_{A+BF_s, \Pi_{F_s}^*}^*(t)[X(t)] + H_s(t) = 0 $$ where $$ H_s(t) = [F_s(t) - \hat{F}(t)]^T \Theta(t,\hat{X}(t)) [F_s(t) - \hat{F}(t)] + \hat{M}(t) \geq 0. $$ Since $\mathcal{L}_{A+BF_s,\Pi_{F_s}^*}$ generates an exponentially stable evolution one obtains, using Theorem \ref{thm2.13}, that $X_s(t) - \hat{X}(t) \geq 0$ and thus the proof is complete. \end{proof} \begin{remark} \label{rmk5.3} \rm >From Theorem \ref{thm5.2} it follows that if $\Gamma^{\Sigma}$ is not empty then a bounded and stabilizing solution of equation \eqref{eqno:3.1} (if it exists) will satisfy the condition $$ R(t) + \Pi_2(t)[X_s(t)] \gg 0. $$ \end{remark} \begin{corollary} \label{coro5.4} If $\Gamma^{\Sigma}$ is not empty then equation \eqref{eqno:3.1} has at most one bounded and stabilizing solution. \end{corollary} \begin{proof} Let us assume that equation \eqref{eqno:3.1} has two bounded and stabilizing solutions $X_i$, $i = 1,2$. From the above remark we get $R(t) + \Pi_2(t)[X_i(t)] \gg 0$. Arguing as in the proof of Theorem \ref{thm5.2}, we obtain both $X_1(t) \geq X_2(t)$ and $X_2(t) \geq X_1(t)$ hence $X_1(t) = X_2(t)$ and the proof ends. \end{proof} In the particular case, where $\Pi(t)$ is of the form \eqref{eqno:3.3}, in \cite{preprint00} the uniqueness of the bounded and stabilizing solution of equation \eqref{eqno:3.1} was proved without any assumption concerning $\Gamma^{\Sigma}$. In that case $R(t) + \Pi_2(t)[X_s(t)]$ has not a definite sign. \begin{theorem} \label{thm5.5} Assume that $A$, $B$, $\Pi$, $\mathcal{Q}$ are periodic functions with period $\theta$ and that $\Gamma^{\Sigma}$ is not empty. Then the bounded and stabilizing solution of equation \eqref{eqno:3.1} (if it exists) is $\theta$-periodic. \end{theorem} \begin{proof} Let $X_s \colon \mathcal{I} \to \mathcal{S}^n$ be a bounded and stabilizing solution of \eqref{eqno:3.1}. We define $\tilde{X}(t) := X_s(t + \theta)$. By direct computation we obtain that $\tilde{X}$ is also a solution of equation \eqref{eqno:3.1}. We shall prove that $\tilde{X}$ is also a stabilizing solution of equation \eqref{eqno:3.1}. Set $\tilde{F}(t) := F^{\tilde{X}}(t)$. We show that $\mathcal{L}_{A+B\tilde{F}, \Pi_{\tilde{F}}^*}$ generates an exponentially stable evolution. Let $\tilde{T}(t,t_0)$ be the linear evolution operator defined by the linear differential equation \begin{equation} \frac{d}{dt} S(t) = \mathcal{L}_{A+B\tilde{F}, \Pi^*_{\tilde{F}}}(t)[S(t)]. \label{eqno:5.2} \end{equation} Because of the periodicity we obtain that $$ \mathcal{L}_{A+B\tilde{F}, \Pi_{\tilde{F}}^*}(t) = \mathcal{L}_{A+BF_s, \Pi_{F_s}^*}(t+\theta) \quad \mbox{for} \quad t \in \mathcal{I}. $$ If $\tilde{S}(t,t_0, H)$ is the solution of \eqref{eqno:5.2} with $\tilde{S}(t_0,t_0,H) = H$, then we have $$ \frac{d}{dt} \tilde{S}(t,t_0,H) = \mathcal{L}_{A+BF_s, \Pi_{F_s}^*}(t+\theta) \tilde{S}(t,t_0,H). $$ From the uniqueness of the solution of this initial value problem we infer that $$ \tilde{S}(t,t_0,H) = S(t+\theta, t_0+\theta, H), $$ where $t \mapsto S(t,\tau, H)$ is the solution of \begin{equation} \frac{d}{dt} S(t) = \mathcal{L}_{A+BF_s, \Pi_{F_s}^*}(t)[S(t)], \quad S(\tau, \tau, H) = H.\label{eqno:5.3} \end{equation} Thus we get $\tilde{T}(t,t_0) = T_{F_s}(t+\theta, t_0+\theta)$ where $T_{F_s}(t,t_0)$ is the linear evolution operator defined by \eqref{eqno:5.3}. The last equality leads to $$ \|\tilde{T}(t,t_0)\| = \|T_{F_s}(t+\theta, t_0+\theta)\| \leq \beta e^{-\alpha(t-t_0)} \quad \mbox{for} \quad t \geq t_0 $$ with $\alpha, \beta> 0$, which shows that $\tilde{X}$ is also a bounded and stabilizing solution of equation \eqref{eqno:3.1}. Applying Corollary \ref{coro5.4} one obtains that $\tilde{X}(t) = X_s(t)$ for $t \in \mathcal{I}$, that means $X_s(t+\theta) = X_s(t)$ for all $t$, which shows that $X_s$ is a $\theta$-periodic function and thus the proof ends. \end{proof} \begin{corollary} \label{coro5.6} If $\Gamma^{\Sigma} \neq \emptyset$ and $A(t) \equiv A$, $B(t) \equiv B$, $\Pi(t) \equiv \Pi$, $\mathcal{Q}(t) \equiv \mathcal{Q}$, $t \in \mathbb{R}$, then the stabilizing solution of equation \eqref{eqno:3.1} (if it exists) is constant and solves the algebraic equation \eqref{eqno:3.14}. \end{corollary} \begin{proof} Since the matrix coefficients of equation \eqref{eqno:3.1} are constant functions they may be viewed as periodic functions with arbitrary period. Applying Theorem \ref{thm5.5} it follows that the bounded and stabilizing solution of equation \eqref{eqno:3.1} is a periodic function with arbitrary period. Therefore it is a constant function and thus the proof ends. \end{proof} The following lemma which is a generalization of the invariance under feedback transformations of standard Riccati differential equations will be useful in the next developments. Since \begin{align*} \lefteqn{\big[ W(t) - F^X(t) \big]^T \big\{ R(t) + \Pi_2(t)[X(t)] \big\}} \\ & \quad = X(t)B(t) + \Pi_{12}(t)[X(t)] +W^T(t) \Pi_2(t)[X(t)] + L(t) + W^T(t) R(t) \end{align*} the conclusion of this lemma follows immediately from Lemma \ref{lm3.4}. \begin{lemma} \label{lm5.7} Let $W \colon \mathcal{I} \to \mathbb{R}^{m \times n}$ be a bounded and continuous function. Then $X \colon \mathcal{I}_1 \subset \mathcal{I} \to \mathcal{S}^n$ is a solution of equation \eqref{eqno:3.1} associated to the quadruple $\Sigma = (A, B, \Pi, \mathcal{Q})$ if and only if $X$ is a solution of the equation of type \eqref{eqno:3.1} associated to the quadruple $\Sigma^W = (A+BW, B, \Pi^W, \mathcal{Q}^W)$, where $\Pi^W(t) \colon \mathcal{S}^n \to \mathcal{S}^{n+m}$ is given by \begin{align*} \Pi^W(t)[X]& = \begin{pmatrix} I_n & 0 \\ W(t) & I_m \end{pmatrix}^T \begin{pmatrix} \Pi_1(t)[X] & \Pi_{12}(t)[X] \\ \big\{ \Pi_{12}(t)[X] \big\}^T & \Pi_2(t)[X] \end{pmatrix} \begin{pmatrix} I_n & 0\\ W(t) & I_m \end{pmatrix} \\ & = \begin{pmatrix} \Pi_W(t)[X] &\Pi_{12}(t)[X] + W^T(t) \Pi_2(t)[X] \\ \big\{ \Pi_{12}(t)[X] +W^T(t) \Pi_2(t)[X] \big\}^T & \Pi_2(t)[X] \end{pmatrix} \end{align*} and \begin{align*} \mathcal{Q}^W(t) & = \begin{pmatrix} I_n & 0 \\ W(t) & I_m \end{pmatrix}^T \begin{pmatrix} M(t) & L(t) \\ L^T(t) & R(t) \end{pmatrix} \begin{pmatrix} I_n & 0 \\ W(t) & I_m \end{pmatrix} \\ & = \begin{pmatrix} M_W(t) & L(t) + W^T(t) R(t) \\ L^T(t) + R(t) W(t)& R(t) \end{pmatrix}. \end{align*} \end{lemma} \begin{theorem} \label{thm5.8} Under the considered assumptions the following assertions are equivalent: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item $(A,B,\Pi)$ is stabilizable and the set $\tilde{\Gamma}^{\Sigma}$ is not empty; \item \eqref{eqno:3.1} has a stabilizing and bounded solution $X_s \colon \mathcal{I} \to \mathcal{S}^n$ satisfying $$ R(t) + \Pi_2(t)[X_s(t)] \gg 0. $$ \end{enumerate} \end{theorem} \begin{proof} (i) $\Rightarrow$ (ii). If (i) holds then Theorem \ref{thm4.7} yields that equation \eqref{eqno:3.1} has a bounded maximal solution $\tilde{X} \colon \mathcal{I} \to \mathcal{S}^n$. We show that $\tilde{X}$ is just the stabilizing solution. If $\tilde{F}$ is the feedback matrix associated with $\tilde{X}$ then \eqref{eqno:4.3} may be written as \begin{align*} \lefteqn{\frac{d}{dt} \hat{X}(t) + \mathcal{L}_{A+B\tilde{F}, \Pi_{\tilde{F}}^*}^*(t)[\hat{X}(t)] + M_{\tilde{F}}(t) - \hat{M}(t)} \\ & \quad {} - \big[ \tilde{F}(t) - \hat{F}(t) \big]^T \Theta(t, \hat{X}(t)) \big[ \tilde{F}(t) - \hat{F}(t) \big] = 0. \end{align*} Since $\hat{X} \in \tilde{\Gamma}^{\Sigma}$ it is a solution of an equation of type \eqref{eqno:4.3} with $\hat{M}(t) \gg 0$. Using again Lemma \ref{lm3.4} one obtains that $t \mapsto \tilde{X}(t) - \hat{X}(t)$ is a bounded and positive semi-definite solution of the backward differential equation $$ \frac{d}{dt} X(t) + \mathcal{L}_{A+B\tilde{F}, \Pi_{\tilde{F}}^*}^*(t)[X(t)] + H(t) = 0, $$ where $$ H(t) := \hat{M}(t) + \big[ \tilde{F}(t) - \hat{F}(t) \big]^T \Theta(t, \hat{X}(t)) \big[ \tilde{F}(t) - \hat{F}(t) \big]. $$ Since $\hat{M}(t) \gg 0$ it follows that $H(t) \gg 0$. Applying implication (vi) $\Rightarrow$ (i) of Theorem \ref{thm2.11} one gets that $\mathcal{L}_{A+B\tilde{F}, \Pi_{\tilde{F}}^*}$ generates an exponentially stable evolution which shows that $\tilde{X}$ is a stabilizing solution of equation \eqref{eqno:3.1}. We prove now (ii) $\Rightarrow$ (i). If equation \eqref{eqno:3.1} has a bounded and stabilizing solution $X_s \colon \mathcal{I} \to \mathcal{S}^n$ then $F_s := F^{X_s}$ is a stabilizing feedback gain and therefore $(A, B, \Pi)$ is stabilizable. Applying Lemma \ref{lm5.7} with $W(t) = F_s(t)$ we rewrite equation \eqref{eqno:3.1} as \begin{align*} \lefteqn{\frac{d}{dt} X(t) + \mathcal{L}_{A+BF_s, \Pi_{F_s}^*}^*(t)[X(t)] + M_{F_s}(t)} \\ & \quad {} - \mathcal{P}_{F_s}^T(t,X(t)) \Theta(t,X(t))^{-1} \mathcal{P}_{F_s}(t,X(t)) = 0, \end{align*} where $X \mapsto \mathcal{P}_{F_s}(t,X) \colon \mathcal{S}^n \to \mathbb{R}^{m \times n}$ is given by $$ \mathcal{P}_{F_s}(t,X) = \big\{ X B(t) + \Pi_{12}(t)[X] + F_s^T(t) \Pi_2(t)[X] + L(t) + F_s^T(t) R(t) \big\}^T $$ and $\Theta(t,X)$ being as in Lemma \ref{lm4.6}. Let $T_{F_s}(t,t_0)$ be the linear evolution operator defined by $$ \frac{d}{dt} S(t) = \mathcal{L}_{A+BF_s, \Pi_{F_s}^*}(t)[S(t)]. $$ Since $F_s$ is a stabilizing feedback gain it follows that there exist $\alpha, \beta > 0$ such that $\|T_{F_s}(t,t_0)\| \leq \beta e^{-\alpha(t-t_0)}$. Let $C_b(\mathcal{I}, \mathcal{S}^n)$ be the Banach space of bounded and continuous functions $X \colon \mathcal{I} \to \mathcal{S}^n$. Since $\Theta(t,X_s(t)) \gg 0$ for $t \in \mathcal{I}$, it follows that there exist an open set $\mathcal{U} \subset C_b(\mathcal{I}, \mathcal{S}^n)$ such that $X_s \in \mathcal{U}$ and $\Theta(t,X(t)) \gg 0$ for all $X \in \mathcal{U}$. Let $\Psi \colon \mathcal{U} \times \mathbb{R} \to C_b$ be defined by \begin{align*} \Psi(X,\delta)(t) & = \int_t^{\infty} T^*_{F_s}(\sigma,t) \big[ M_{F_s}(\sigma) + \delta I_n \\ & \qquad {} - \mathcal{P}^T_{F_s}(\sigma, X(\sigma)) \Theta^{-1}(\sigma, X(\sigma)) \mathcal{P}_{F_s}(\sigma, X(\sigma)) \big] \, d\sigma - X(t). \end{align*} We apply the implicit function theorem to the equation \begin{equation} \Psi(X, \delta) = 0 \label{eqno:5.4} \end{equation} in order to obtain that there exists a function $X_{\delta} \in \mathcal{U}$ such that \begin{align*} & X_{\delta}(t) \\ & = \int_t^{\infty}T^*_{F_s}(\sigma,t) \big[ M_{F_s}(\sigma) + \delta I_n -\mathcal{P}^T_{F_s}(\sigma, X_{\delta}(\sigma)) \Theta^{-1}(\sigma, X_{\delta}(\sigma)) \mathcal{P}_{F_s}(\sigma, X_{\delta}(\sigma) \big] \, d\sigma \end{align*} for $|\delta|$ small enough. It is clear that $(X_s,0)$ is a solution of \eqref{eqno:5.4}. We show now that $$ d_1 \Psi(X_s(\cdot),0) \colon C_b(\mathcal{I}, \mathcal{S}^n) \to C_b(\mathcal{I}, \mathcal{S}^n) $$ is an isomorphism, $d_1 \Psi$ being the derivative of $\Psi$ with respect to its first argument. Since $$ d_1 \Psi(X_s,0) Y = \lim_{\varepsilon \to 0} \frac{1}{\varepsilon} \big[ \Psi(X_s + \varepsilon Y, 0) - \Psi(X_s, 0) \big] $$ and $\mathcal{P}_{F_s}(\sigma, X_s(\sigma)) \equiv 0$ we obtain that $d_1 \Psi(X_s, 0) Y = -Y$ for all $Y \in C_b(\mathcal{I},\mathcal{S}^n)$. Therefore $d_1 \Psi(X_s,0) = - I_{C_b}$, where $I_{C_b}$ is the identity operator of $C_b(\mathcal{I}, \mathcal{S}^n)$ which is an isomorphism. Also we see that $d_1\Psi(X,\delta)$ is continuous in $(X,\delta)=(X_s,0)$. Applying the implicit function theorem we deduce that there exists $\tilde{\delta} > 0$ and a smooth function $X_{\delta}(\cdot) \colon(-\tilde{\delta}, \tilde{\delta}) \to \mathcal{U}$ which satisfies $\Psi(X_{\delta}(\cdot), \delta) =0$ for all $\delta \in (-\tilde{\delta}, \tilde{\delta})$. It is easy to see that if $\delta \in (-\tilde{\delta}, 0)$ then $X_{\delta}(\cdot) \in \tilde{\Gamma}^{\Sigma}$ and the proof is complete. \end{proof} \begin{corollary} \label{coro5.9} Assume that $A$, $B$, $\Pi$, $\mathcal{Q}$ are periodic functions with period $\theta > 0$. Under these conditions the following are equivalent: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item $(A, B, \Pi)$ is stabilizable and $\tilde{\Gamma}^{\Sigma}$ is not empty. \item Equation \eqref{eqno:3.1} has a bounded, $\theta$-periodic and stabilizing solution $X_s \colon \mathcal{I} \to \mathcal{S}^n$ which verifies $R(t) + \Pi_2(t)[X_s(t)] \gg 0$. \item $(A, B, \Pi)$ is stabilizable and $\tilde{\Gamma}^{\Sigma}$ contains at least a $\theta$-periodic function $\check{X}$. \end{enumerate} \end{corollary} \begin{proof} (i) $\Leftrightarrow$ (ii) follows from Theorem \ref{thm5.8} and Theorem \ref{thm5.5}. (iii) $\Rightarrow$ (i) is obvious. It remains to prove (ii) $\Rightarrow$ (iii). In the proof of the implication (ii) $\Rightarrow$ (i) in Theorem \ref{thm5.8} we have shown that there exist $\tilde{\delta} > 0$ and a smooth function $\delta \mapsto X_{\delta}(\cdot) \colon (-\tilde{\delta}, \tilde{\delta}) \to C_b(\mathcal{I}, \mathcal{S}^n)$ which satisfies $$ \frac{d}{dt} X_{\delta}(t) + \mathcal{R}(t, X_{\delta}(t)) + \delta I_n = 0. $$ Let $\delta_1 \in (-\tilde{\delta}, 0)$ and set $\Sigma_1 := (A, B, \Pi, \mathcal{Q}_1)$ with $$ \mathcal{Q}_1(t) := \begin{pmatrix} M(t) + \delta_1 I_n & L(t) \\ L^T(t) & R(t) \end{pmatrix}. $$ It is easy to see that if $\delta \in (-\tilde{\delta}, \delta_1)$ then $X_{\delta}(\cdot) \in \tilde{\Gamma}^{\Sigma_1}$. Applying implication (i) $\Rightarrow$ (ii) of Theorem \ref{thm5.8} one obtains that the equation $$ \frac{d}{dt} X(t) + \mathcal{R}(t,X(t)) + \delta_1 I_n = 0 $$ has a bounded and stabilizing solution $\hat{X}_{\delta_1}$. Based on Theorem \ref{thm5.5} one obtains that $\hat{X}_{\delta_1}$ is a periodic function. The conclusion follows since $\hat{X}_{\delta_1}(\cdot) \in \tilde{\Gamma}^{\Sigma}$ and the proof is complete. \end{proof} With the similar proof based on Corollary \ref{coro5.6} and Theorem \ref{thm5.8} we obtain: \begin{corollary} \label{coro5.10} Assume that $A(t) \equiv A$, $B(t) \equiv B$, $\Pi(t) \equiv \Pi$ and $\mathcal{Q}(t) \equiv \mathcal{Q}$. Then the following are equivalent: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item $(A, B, \Pi)$ is stabilizable and $\tilde{\Gamma}^{\Sigma}$ is not empty; \item Equation \eqref{eqno:3.1} has a bounded and stabilizing solution $X_s$ which is constant and solves the algebraic equation \eqref{eqno:3.14}. \item $(A, B, \Pi)$ is stabilizable and there exists at least a symmetric matrix $\hat{X}$ such that $\hat{X} \in \tilde{\Gamma}^{\Sigma}$. \end{enumerate} \end{corollary} As a simple consequence of Theorem \ref{thm5.8} we have: \begin{corollary} \label{coro5.11} Assume that $(A, B, \Pi)$ is stabilizable. Then: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item If $A$, $B$, $\Pi$ are periodic functions with period $\theta$ then there exists a stabilizing feedback gain $F \colon \mathbb{R} \to \mathbb{R}^{m \times n}$ which is a periodic function with period $\theta$. \item If $A(t) \equiv A$, $B(t) \equiv B$, $\Pi(t) \equiv \Pi$ for all $t \in \mathbb{R}$ then there exists a stabilizing feedback gain $F \in \mathbb{R}^{m \times n}$. \end{enumerate} \end{corollary} \begin{proof} Consider the differential equation \begin{equation} \label{eqno:5.5} \begin{aligned} \lefteqn{\frac{d}{dt} X(t) + A^T(t)X(t) + X(t)A(t) + I_n + \Pi_1(t)[X(t)]} \\ & \quad {} - \big\{ X(t)B(t) +\Pi_{12}(t)[X(t)] \big\} \big\{ I_m + \Pi_2(t)[X(t)] \big\}^{-1} \\ & \qquad \times \big\{ X(t)B(t) + \Pi_{12}(t)[X(t)] \big\}^T = 0, \end{aligned} \end{equation} Equation \eqref{eqno:5.5} is an equation of the type \eqref{eqno:3.1} corresponding to the quadruple $\Sigma_0 := (A, B, \Pi, \mathcal{Q}_0)$ where $A$, $B$, $\Pi$ are as in \eqref{eqno:3.1} and $\mathcal{Q}_0 = \left( \begin{smallmatrix} I_n & 0 \\ 0 & I_m \end{smallmatrix} \right)$. It is seen that $\lambda^{\Sigma_0}(0) = \left( \begin{smallmatrix} I_n & 0 \\ 0 & I_m \end{smallmatrix} \right) \gg 0$ and hence $0 \in \tilde{\Gamma}^{\Sigma_0}$. Therefore equation \eqref{eqno:5.5} has a bounded and stabilizing solution $X_s$ with the corresponding stabilizing feedback gain $$ F_s(t) = - \big\{ I_m + \Pi_2(t)[X_s(t)] \big\}^{-1} \big\{ X_s(t)B(t) + \Pi_{12}(t)[X_s(t)] \big\}^T. $$ If the matrix coefficients of \eqref{eqno:5.5} are periodic functions with period $\theta$ then by Theorem \ref{thm5.5} we obtain that $F_s$ is a periodic function with the same period $\theta$. If the matrix coefficients of the equation \eqref{eqno:5.5} are constants then by Corollary \ref{coro5.6} one obtains that $F_s$ is constant and thus the proof is complete. \end{proof} The result of Corollary \ref{coro5.11} shows that if $A$, $B$, $\Pi$ are periodic functions then, without loss of generality, we may restrict the definition of stabilizability working only with periodic stabilizing feedback gains. Also, if $A$, $B$, $\Pi$ are constant functions, then, without loss of generality the definition of stabilizability may be restricted only to the class of stabilizing feedback gains which are constant functions. \section{The case $\mathbf{0 \in \Gamma^{\Sigma}}$ and $\mathbf{\Pi(t)}$ is a linear operator} % section 6 In this section we focus our attention on those equations \eqref{eqno:3.1} associated to the quadruple $\Sigma = (A, B, \Pi, \mathcal{Q})$ which have the additional property $0 \in \Gamma^{\Sigma}$ and $X\to \Pi(t)[X]$ is a linear operator. This is equivalent to conditions \eqref{eqno:3.13}. \begin{theorem} \label{thm6.1} Assume that the quadruple $\Sigma = (A, B, \Pi, \mathcal{Q})$ satisfies the following assumptions: \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item $(A, B, \Pi)$ is stabilizable; \item $0 \in \Gamma^{\Sigma}$. \end{enumerate} Then equation \eqref{eqno:3.1} has two bounded solutions $\tilde{X} \colon \mathcal{I} \to \mathcal{S}^n$, $\tilde{\tilde{X}} \colon \mathcal{I} \to \mathcal{S}^n$ with the property $\tilde{X}(t) \geq \bar{X}(t) \geq \tilde{\tilde{X}}(t) \geq 0$ for all $t \in \mathcal{I}$ for arbitrary bounded and positive semi-definite solution $\bar{X} \colon \mathcal{I} \to \mathcal{S}^n$ of equation \eqref{eqno:3.1}. Moreover if $A$, $B$, $\Pi$, $\mathcal{Q}$ are periodic functions with period $\theta > 0$ then both $\tilde{X}$ and $\tilde{\tilde{X}}$ are periodic functions with period $\theta$. If $A(t) \equiv A$, $B(t) \equiv B$, $\Pi(t) \equiv \Pi$ and $\mathcal{Q}(t) \equiv \mathcal{Q}$ then both $\tilde{X}$ and $\tilde{\tilde{X}}$ are constant and solve the algebraic equation \eqref{eqno:3.14}. \end{theorem} \begin{proof} The existence of the solution $\tilde{X}$ is guaranteed by Theorem \ref{thm4.7}. The proof of the existence of $\tilde{\tilde X}$ is essentially the same as the proof of the existence of the minimal solution provided in Theorem \ref{thm3.9}. There is only one difference which consists in proving the boundedness of the sequence $X_{\tau}, \tau \in \mathcal{I}$. There, in Theorem \ref{thm3.9}, the boundedness of that sequence was based on the existence of a bounded and positive semi-definite solution of equation \eqref{eqno:3.9}. Here the boundedness of that sequence is based on the stabilizability of the triple $(A,B,\Pi)$ and on the Comparison Theorem. If $(A, B, \Pi)$ is stabilizable, then there exists a bounded and continuous function $F \colon \mathcal{I} \to \mathbb{R}^{m \times n}$ such that the corresponding operator $\mathcal{L}_{A+BF, \Pi_F^*}$ generates an exponentially stable evolution. Based on Theorem \ref{thm2.13} we deduce that the equation \begin{equation} \frac{d}{dt} Y(t) + \mathcal{L}_{A+BF, \Pi_F^*}^*(t)[Y(t)] + M_F(t) = 0 \label{eqno:6.1}\end{equation} has a unique bounded solution $\tilde{Y}(t) \geq 0$ on $\mathcal{I}$. Let $X_{\tau}$ be the solution of the equation \eqref{eqno:3.1} with $X_{\tau}(\tau)=0$. Let $F_{\tau}(t)$ be the feedback gain associated to the solution $X_{\tau}$. Applying Lemma \ref{lm3.4}, equation \eqref{eqno:3.1} satisfied by $X_{\tau}$ can be rewritten as: \begin{equation} \frac{d}{dt}X_{\tau}(t)+\mathcal{L}_{A+BF_{\tau},\Pi_{F_{\tau}}^*}(t)[X_{\tau}(t)] +M_{F_{\tau}}(t)=0. \label{eqno:6.2} \end{equation} On the other hand applying Lemma \ref{lm4.6} with $W(t) = F_{\tau}$ to equation \eqref{eqno:6.1} one obtains \begin{equation} \label{eqno:6.3} \begin{aligned} \lefteqn{\frac{d}{dt} \tilde{Y}(t) + \mathcal{L}_{A+BF_\tau, \Pi_{F_\tau}^*}^*(t)[\tilde{Y}(t)] + M_{F_{\tau}}(t)} \\ & \quad {} + \big[ F_{\tau}(t) - F(t) \big]^T \big\{ R(t) + \Pi_2(t)[\tilde{Y}(t)] \big\} \big[ F_{\tau}(t) - F(t) \big] = 0 \end{aligned} \end{equation} for $t \in \mathcal{I}_{\tau}$. From \eqref{eqno:6.2} and \eqref{eqno:6.3} one obtains \begin{align*} \lefteqn{\frac{d}{dt} \big[ \tilde{Y}(t) - X_{\tau}(t) \big] + \mathcal{L}_{A+BF_\tau, \Pi_{F_\tau}^*}^*(t) \big[ \tilde{Y}(t) - X_{\tau}(t) \big]} \\ & \quad {} + \big[ F_{\tau}(t) - F(t) \big]^T \big\{ R(t) + \Pi_2(t)[\tilde{Y}(t)] \big\} \big[ F_{\tau}(t) - F(t) \big] = 0. \end{align*} Since $\tilde{Y}(\tau) - X_{\tau}(\tau) = \tilde{Y}(\tau) \geq 0$, invoking Remark \ref{rmk2.14}, (c), we conclude that \begin{equation} \tilde{Y}(t) - X_{\tau}(t) \geq 0 \quad \mbox{for all} \quad t \in (-\infty, \tau]\cap \mathcal{I}. \label{eqno:6.4} \end{equation} Inequality \eqref{eqno:6.4} together with ${\bf \alpha)}$ in the proof of Theorem \ref{thm3.9} shows that the sequence $\{ X_{\tau}(t) \}_{\tau \in \mathcal{I}}$ is increasing and bounded, hence it is convergent. Define $$ \tilde{\tilde{X}}(t) := \lim_{\tau \to \infty} X_{\tau}(t) \quad \mbox{for} \quad t \in \mathcal{I}. $$ In a standard way one obtains that $\tilde{\tilde{X}}$ is a solution of \eqref{eqno:3.1}. The minimality property of $\tilde{\tilde X}$ and the periodicity follows as in the proof of Theorem \ref{thm3.9}. \end{proof} \begin{lemma} \label{lm6.2} Assume that the quadruple $\Sigma = (A, B, \Pi, \mathcal{Q})$ satisfies \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item $0 \in \Gamma^{\Sigma}$. \item The triple $(C, A+BW, \Pi_W^*)$ is detectable, where $W(t) = - R^{-1}(t) L^T(t)$ and $C$ is such that $C^T(t) C(t) = M(t) - L(t) R^{-1}(t) L^T(t)$. \end{enumerate} Under these assumptions any bounded and positive semi-definite solution of equation \eqref{eqno:3.1} is a stabilizing solution. \end{lemma} \begin{proof} The proof has two stages. Firstly, the proof of the lemma is made in the particular case $L(t) \equiv 0$. Secondly we shall show that the general case may be reduced to the particular case of the first step. (i) Assume that $L(t) \equiv 0$. In this case $W(t) \equiv 0$ and $\Pi_W(t) = \Pi_1(t)$ for $t \in \mathcal{I}$ and the assumption b) in the statement is equivalent to the detectability of the triple $(C, A, \Pi_1^*)$ where $C$ is such that $C^T(t) C(t) = M(t)$ for $t \in \mathcal{I}$. Let $X$ be a bounded and positive semi-definite definite solution of equation \eqref{eqno:3.1} and $F := F^X$. We have to show that $\mathcal{L}_{A+BF, \Pi_F^*}$ generates an exponentially stable evolution. Let $(t_0, H) \in \mathcal{I} \times \mathcal{S}^n_+$ be fixed and let $S$ be the solution of the initial value problem \begin{equation} \frac{d}{dt} S(t) = \mathcal{L}_{A+BF, \Pi_F^*}(t)[S(t)], \quad S(t_0) = H. \label{eqno:6.5} \end{equation} We show that $$ \int_{t_0}^{\infty} \| S(t) \|_2 \, dt \leq \delta \| H \|_2, $$ where $\delta > 0$ is constant independent of $t_0$ and $H$. By the detectability assumption it follows that there exists a bounded and continuous function $K$ such that the operator $\mathcal{L}_{A+KC, \Pi_1}$ generates an exponentially stable evolution, where \begin{equation} \mathcal{L}_{A+KC, \Pi_1}(t)[X] = \big[ A(t) + K(t)C(t) \big] X + X \big[ A(t) + K(t)C(t) \big]^T +\Pi_1^*(t)[X]. \label{eqno:6.6} \end{equation} Using \eqref{eqno:6.6} equation \eqref{eqno:6.5} may be written as \begin{equation} \label{eqno:6.7} \begin{aligned} \frac{d}{dt} S(t) & = \mathcal{L}_{A+KC, \Pi_1}(t)[S(t)] - K(t) C(t) S(t) - S(t) C^T(t) K^T(t) \\ & \qquad + \big[\Pi_{F}^*(t)[S(t)] - \Pi_{1}^*(t)[S(t)]\big]. \end{aligned} \end{equation} We introduce the following perturbed operator $X\to \mathcal{L}_{\varepsilon}(t)[X]$ by $$ \mathcal{L}_{\varepsilon}(t)[X] := \mathcal{L}_{A+KC, \Pi_1}(t)[X] + \varepsilon^2 X + \varepsilon^2 \Pi_1^*(t)[X]. $$ Let $T(t,s)$ be the evolution operator on $\mathcal{S}^n$ defined by $$ \frac{d}{dt} S(t) = \mathcal{L}_{A+KC, \Pi_1}(t)[S(t)]. $$ Since $\mathcal{L}_{A+KC, \Pi_1}$ generates an exponentially stable evolution, we have $$ \| T(t,s) \| \leq \beta e^{-2\alpha(t-s)} \quad \mbox{for} \quad t \geq s, \; t, s \in \mathcal{I} $$ for some constants $\alpha, \beta > 0$. By a standard argument based on Gronwall's Lemma one obtains that for $\varepsilon> 0$ small enough \begin{equation} \|T_{\varepsilon}(t,s)\| \leq \beta e^{-\alpha(t-s)} \quad \mbox{for} \quad t \geq s, \; t, s \in \mathcal{I}, \label{eqno:6.8} \end{equation} where $T_{\varepsilon}(t,s)$ is the linear evolution operator on $\mathcal{S}^n$ defined by the linear differential equation $$ \frac{d}{dt} Y(t) = \mathcal{L}_{\varepsilon}(t)[Y(t)]. $$ Let $\varepsilon > 0$ be such that \eqref{eqno:6.8} is fulfilled and let $Y(t)$ be the solution of the following forward differential equation \begin{equation} \frac{d}{dt} Y(t) = \mathcal{L}_{\varepsilon}(t)[Y(t)] + \frac{1}{\varepsilon^2} K(t) C(t) S(t) C^T(t) K^T(t) + \big( 1 + \frac{1}{\varepsilon^2} \big) \hat\Pi_F^*(t)[S(t)], \label{eqno:6.9} \end{equation} where $$\hat\Pi_F(t)[X]=F^T(t) \Pi_2(t)[X] F(t)$$ satisfying the initial condition $Y(t_0) = H$. Set $Z(t) := Y(t) -S(t)$. We obtain from \eqref{eqno:6.7} and \eqref{eqno:6.9} that $$ \frac{d}{dt} Z(t) = \mathcal{L}_{\varepsilon}(t)[Z(t)] + U(t), \quad Z(t_0) = 0, $$ where \[ U(t) = \Big[ \varepsilon I_n + \frac{1}{\varepsilon} K(t) C(t) \Big] S(t) \Big[ \varepsilon I_n + \frac{1}{\varepsilon} K(t) C(t)\Big]^T + \Pi_{\varepsilon,F}^*(t)[S(t)] \] with $$ \Pi_{\varepsilon,F}(t)[X] = \begin{pmatrix} \varepsilon I_n \\ - \frac{1}{\varepsilon} F(t) \end{pmatrix}^T \Pi(t)[X] \begin{pmatrix} \varepsilon I_n \\ - \frac{1}{\varepsilon} F(t) \end{pmatrix}. $$ Taking into account that $S(t_0) = H \geq 0$ it follows that $S(t) \geq 0$ for $t \geq t_0$ and hence $U(t) \geq 0$ for $t \geq t_0$. On the other hand $X \mapsto \varepsilon^2 X + \varepsilon^2 \Pi_1^*(t)[X]$ is a positive linear operator. Since $\mathcal{L}_{A+BK, \Pi_1}$ generates a positive evolution it follows from Proposition \ref{prop2.5} that $\mathcal{L}_{\varepsilon}$ generates a positive evolution. Based on Remark \ref{rmk2.14}, (c), we conclude that $Z(t) \geq 0$ for $t \geq 0$, hence $0 \leq S(t) \leq Y(t)$ which leads to \begin{equation} 0 \leq \| S(t) \|_2 \leq \| Y(t) \|_2. \label{eqno:6.10} \end{equation} Applying the representation formula \eqref{eqno:2.24} to equation \eqref{eqno:6.9} we may write \begin{align} Y(t) = T_{\varepsilon}(t,t_0) H + \int_{t_0}^t T_{\varepsilon}(t,s) U_1(s) \, ds \quad \mbox{for} \quad t \geq t_0, \label{eqno:6.11} \end{align} where \[ U_1(s) = \frac{1}{\varepsilon^2} K(s) C(s) S(s) C^T(s) K^T(s) + \big( 1 + \frac{1}{\varepsilon^2} \big) \hat\Pi_F^*(s)[S(s)]. \] Taking into account the definition of the adjoint operator we obtain $$ \hat\Pi_F^*(s)[S(s)]= \Pi_2^*(s) \big[ F(s) S(s)F^T(s) \big]. $$ This allows us to write $U_1(s)$ as \begin{align*} U_1(s) & = \frac{1}{\varepsilon^2} K(s) C(s) S(s) C^T(s) K^T(s) \notag \\ & \qquad {} + \left( 1 + \frac{1}{\varepsilon^2} \right) \check{\Pi}_2(s) \big[ R^{1/2}(s) F(s) S(s) F^T(s) R^{1/2}(s) \big], \end{align*} where $Y \mapsto \check{\Pi}_2(s)[Y]$ is defined by $$ \check{\Pi}_2(s)[Y] := \Pi_2^*(s) \big[ R^{-1/2}(s) Y R^{-1/2}(s) \big]. $$ Further we have \begin{equation} \label{eqno:6.12} \begin{aligned} &\| U_1(s) \|_2 \leq \big( 1 + \frac{1}{\varepsilon^2} \big) \gamma \big[ \|C(s) S(s) C^T(s)\|_2 + \|R^{1/2}(s) F(s) S(s) F^T(s) R^{1/2}(s) \| \big], \end{aligned} \end{equation} where $\gamma = \max\{\sup_{s \in\mathcal{I}} \| K(s) \|_2^2, \sup_{s \in \mathcal{I}} \|\check{\Pi}_2(s) \| \}$. From the definition of the norm $\| \cdot\|_2$ we deduce \begin{align*} \lefteqn{\| C(s) S(s) C^T(s) \|_2 + \| R^{1/2}(s) F(s) S(s) F^T(s) R^{1/2}(s) \|_2} \\ & \quad \leq \mathop{\rm Tr}[C(s) S(s) C^T(s)] + \mathop{\rm Tr}[R^{1/2}(s) F(s) S(s) F^T(s) R^{1/2}(s)]. \end{align*} Using the properties of the trace together with \eqref{eqno:2.1} we have \begin{equation} \label{eqno:6.13} \begin{gathered} \| C(s) S(s) C^T(s) \|_2 + \| R^{1/2}(s) F(s) S(s) F^T(s) R^{1/2}(s) \|_2 \\ \leq \langle C^T(s) C(s) + F^T(s) R(s) F(s), S(s) \rangle. \end{gathered} \end{equation} Applying Lemma \ref{lm3.4}, we may write equation \eqref{eqno:3.1}, verified by the bounded and positive semi-definite solution $X$, in the form $$ \frac{d}{ds} X(s) + \mathcal{L}_{A+BF, \Pi_F^*}^*(s)[X(s)] + C^T(s) C(s) + F^T(s) R(s) F(s) = 0. $$ Thus we obtain \begin{equation} \label{eqno:6.14} \begin{aligned} \lefteqn{\langle C^T(s) C(s) + F^T(s) R(s) F(s), S(s) \rangle} \\ & \quad = - \left\langle \frac{d}{ds} X(s), S(s) \right\rangle - \left\langle \mathcal{L}_{A+BF, \Pi_F^*}^*(s)[X(s)], S(s) \right\rangle \\ & \quad = - \left\langle \frac{d}{ds} X(s), S(s) \right\rangle - \left\langle X(s), \mathcal{L}_{A+BF, \Pi_F^*}(s)[S(s)] \right\rangle \\ & \quad = - \frac{d}{ds} \langle X(s), S(s) \rangle. \end{aligned} \end{equation} >From \eqref{eqno:6.13}, \eqref{eqno:6.14} we get \begin{align*} &\int_{t_0}^t \big[ \| C(s) S(s) C^T(s) \|_2 + \| R^{1/2}(s) F(s) S(s) F^T(s) R^{1/2}(s) \|_2 \big] ds \\ & \quad \leq \langle X(t_0), S(t_0) \rangle - \langle X(t), S(t) \rangle. \end{align*} Taking into account that $\langle X(t), S(t)\rangle \geq 0$ for $t \geq t_0$ and $\| X(t) \|_2 \leq \rho$ for all $t \in \mathcal{I}$ where $\rho > 0$ is a constant not depending on $t$, we obtain \begin{equation} \int_{t_0}^t \left[ \| C(s) S(s) C^T(s) \|_2 + \| R^{1/2}(s) F(s) S(s) F^T(s) R^{1/2}(s) \|_2 \right] ds \leq \rho \| H \|_2 \label{eqno:6.15} \end{equation} for $t \geq t_0$. From \eqref{eqno:6.8}, \eqref{eqno:6.11} and \eqref{eqno:6.12} we have \begin{align*} \| Y(t) \|_2 & \leq \beta e^{-\alpha(t-t_0)} \| H \|_2 + \beta \gamma \left( 1 + \frac{1}{\varepsilon^2} \right) \int_{t_0}^t e^{-\alpha(t-s)} \big[ \| C(s) S(s) C^T(s) \|_2 \\ & \qquad {} + \| R^{1/2}(s) F(s) S(s) F^T(s) R^{1/2}(s) \|_2 \big] \, ds, \end{align*} which leads to \begin{align*} \int_{t_0}^\tau \| Y(t) \|_2 \, dt & \leq \frac{\beta}{\alpha} \| H \|_2 + \beta \gamma \big( 1 + \frac{1}{\varepsilon^2}\big) \int_{t_0}^{\tau} \int_{t_0}^t e^{-\alpha(t-s)} \big[ \| C(s) S(s) C^T(s) \|_2 \\ & \qquad {} + \| R^{1/2}(s) F(s) S(s) F^T(s) R^{1/2}(s) \|_2 \big] \, ds \, dt. \end{align*} Changing the order of integration and invoking \eqref{eqno:6.15} we obtain $$ \int_{t_0}^\tau \| Y(t) \|_2 \, dt \leq \frac{\beta}{\alpha} \big[ 1 + \big( 1 + \frac{1}{\varepsilon^2} \big) \gamma \rho \big] \| H \|_2 =: \delta \| H \|_2. $$ Taking the limit for $\tau \to \infty $ we deduce $$ \int_{t_0}^\infty \| Y(t) \|_2 \, dt \leq \delta \| H \|_2 \quad \mbox{for all} \quad t_0 \in \mathcal{I}, \; H \in \mathcal{S}^n_+, $$ where $\delta$ is independent of $t_0$ and $H$. From \eqref{eqno:6.10} it follows now that \begin{equation} \int_{t_0}^\infty \| S(t) \|_2 \, dt \leq \delta \| H \|_2 \quad \mbox{for all} \quad t_0 \in \mathcal{I}, \; H \in \mathcal{S}^n_+. \label{eqno:6.16} \end{equation} Since for any $H \in \mathcal{S}^n$ there are $H_i \in \mathcal{S}^n_+$, $i = 1,2$, such that $H = H_1 - H_2$ and $\| H \|_2 = \max \{\| H_1 \|_2, \| H_2 \|_2 \}$ one easily obtains that \eqref{eqno:6.16} holds for arbitrary $t_0 \in \mathcal{I}$ and $H \in \mathcal{S}^n$ and thus the proof is complete for the case $L(t) \equiv 0$. (ii) Let us consider the general case when $L(t) \not \equiv 0$. Let $X$ be a bounded and positive semi-definite definite solution of equation \eqref{eqno:3.1}. Applying Lemma \ref{lm5.7} for $W(t) = -R^{-1}(t)L^T(t)$ we obtain that $X$ is a bounded and positive semi-definite solution of the equation \begin{equation} \label{eqno:6.17} \begin{aligned} \lefteqn{\frac{d}{dt} X(t) + \mathcal{L}_{A - B R^{-1} L^T}^*(t)[X(t)] + C^T(t) C(t) + \Pi_W(t)[X(t)]} \\ & \quad {} - \big\{ X(t)B(t) + \Pi_{12W}(t)[X(t)] \big\} \big\{ R(t) + \Pi_2(t)[X(t)] \big\}^{-1} \\ & \qquad \times \big\{ X(t)B(t) + \Pi_{12W}(t)[X(t)] \big\}^T = 0, \end{aligned} \end{equation} where $\Pi_W(t)$ is defined as in \eqref{eqno:3.6}, $$ \Pi_{12W}(t) := \Pi_{12}(t) - L(t) R^{-1}(t) \Pi_2(t) $$ and $$ C^T(t) C(t) = M(t) - L(t) R^{-1}(t) L^T(t). $$ Equation \eqref{eqno:6.17} is an equation of type \eqref{eqno:3.1} with $L(t) \equiv 0$. Applying the first part of the proof we deduce that $X$ is a stabilizing solution of equation \eqref{eqno:6.17}. Let $$\hat{F}(t) := - \big\{ R(t) + \Pi_2(t)[X(t)] \big\}^{-1} \big\{ X(t)B(t) + \Pi_{12W}(t)[X(t)] \big\}^T $$ be the stabilizing feedback gain associated to the solution $X$ regarded as a solution of \eqref{eqno:6.17}. Then it is easy to see that $\hat{F}(t) - R^{-1}(t) L^T(t) = F(t)$, where $F(t) := F^X(t)$ is defined as in Lemma \ref{lm3.4}. Hence, $$ A(t) - B(t) R^{-1}(t) L^T(t) + B(t) \hat{F}(t) = A(t) + B(t) F(t), $$ and \begin{align*} \lefteqn{ \begin{pmatrix} I_n \\ \hat{F}(t) \end{pmatrix}^T \begin{pmatrix} \Pi_W(t)[X] & \Pi_{12W}(t)[X] \\ \big\{ \Pi_{12W}(t)[X] \big\}^T & \Pi_2(t)[X] \end{pmatrix} \begin{pmatrix} I_n \\ \hat{F}(t) \end{pmatrix}} \\ & \quad = \begin{pmatrix} I_n \\ \hat{F}(t) \end{pmatrix}^T \begin{pmatrix} I_n & - L(t) R^{-1}(t) \\ 0 & I_m \end{pmatrix} \Pi(t)[X] \begin{pmatrix} I_n & 0 \\ - R^{-1}(t) L^T(t) & I_m \end{pmatrix} \begin{pmatrix} I_n \\ \hat{F}(t) \end{pmatrix} \\ & \quad = \begin{pmatrix} I_n \\ F(t) \end{pmatrix}^T \Pi(t)[X] \begin{pmatrix} I_n \\ F(t) \end{pmatrix} = \Pi_F(t)[X]. \end{align*} These facts allow us to conclude that $X$ is a stabilizing solution of equation \eqref{eqno:3.1} and the proof ends. \end{proof} \begin{remark} \label{rmk6.3} \rm Assume that the quadruple $\Sigma = (A, B, \Pi, \mathcal{Q})$ satisfies $0 \in \tilde{\Gamma}^{\Sigma}$. Then any bounded and positive semi-definite solution $X \colon \mathcal{I} \to \mathcal{S}^n_+$ of equation \eqref{eqno:3.1} is a stabilizing solution, and we have $X(t) \gg 0$ for $t \in \mathcal{I}$. \end{remark} \begin{theorem} \label{thm6.4} Assume that the quadruple $\Sigma = (A, B, \Pi, \mathcal{Q})$ satisfies the following assumptions: \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item $0\in \Gamma^{\Sigma}$; \item $(A, B, \Pi)$ is stabilizable. \item $(C, A - B R^{-1} L^T, \Pi^*_W)$ is detectable where $\Pi_W$ is defined as in \eqref{eqno:3.6} for $W(t) = - R^{-1}(t) L^T(t)$ and $C$ is such that $$C^T(t) C(t) = M(t) - L(t) R^{-1}(t) L^T(t).$$ \end{enumerate} Then \eqref{eqno:3.1} has a unique solution $X \colon \mathcal{I} \to \mathcal{S}^n_+$ which is bounded and stabilizing. \end{theorem} \begin{proof} Based on Theorem \ref{thm6.1} we deduce that equation \eqref{eqno:3.1} has both a maximal solution $\tilde{X}$ and a minimal positive semi-definite solution $\tilde{\tilde{X}}$ such that $\tilde{X}(t) \geq \bar{X}(t) \geq \tilde{\tilde{X}}(t) \geq 0$ for all $t \in \mathcal{I}$, where $\bar{X}$ is an arbitrary bounded and positive semi-definite solution of \eqref{eqno:3.1}. Applying Lemma \ref{lm6.2} it follows that both $\tilde{X}$ and $\tilde{\tilde{X}}$ are stabilizing solutions. From the uniqueness of the stabilizing and bounded solution of equation \eqref{eqno:3.1} we conclude that $\tilde{\tilde{X}}(t) = \tilde{X}(t)$ for all $t \in \mathcal{I}$ and thus the proof is complete. \end{proof} \begin{remark} \label{rmk6.5}\rm As we have seen in Theorem \ref{thm6.1} equation \eqref{eqno:3.1} has two remarkable solutions, namely $\tilde{X} \colon \mathcal{I} \to \mathcal{S}^n$, which is the maximal solution, and $\tilde{\tilde{X}} \colon \mathcal{I} \to \mathcal{S}^n$, which is the minimal solution in the class of all bounded and positive semi-definite solutions of \eqref{eqno:3.1}. Theorem \ref{thm6.4} shows that under the assumption of detectability these two solutions coincide. However in the absence of detectability these two solutions may be different. If in addition to the assumptions of Theorem \ref{thm6.4} we assume that $0 \in \tilde{\Gamma}^\Sigma$ -- which is equivalent to $\mathcal{Q}(t) \gg 0$ -- then $\tilde{X} \gg 0$. This results immediately from Theorem \ref{thm2.11} and formula \eqref{eqno:3.7} with $X := \tilde{X}$ and $W := F^{\tilde{X}}$. \end{remark} We can see this in the following simple example. Consider equation \eqref{eqno:3.1} with constant coefficients and $n=2$, $m=1$, \[ A = \begin{pmatrix} 1 & 0 \\ 0 & 3 \end{pmatrix}, \quad B =\begin{pmatrix} 2 \\ 1 \end{pmatrix}, \quad M = \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix}, \] $L=0$, $R=1$, $\Pi_1(X) = X$, $\Pi_{12} = 0$, $\Pi_2 = 0$. One obtains that \[ \tilde{X} = \begin{pmatrix} 8 & -21 \\ -21 & 63 \end{pmatrix} \qquad \mbox{and} \qquad \tilde{\tilde{X}} = \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix}. \] It can be seen that $\tilde{X}$ is just the stabilizing solution. On the other hand if $X_{\tau}$ is the solution of \eqref{eqno:3.1} with $X_{\tau}(\tau) = 0$ we have \[ X_{\tau}(t) = \begin{pmatrix} x(t) & 0 \\ 0 & 0 \end{pmatrix} \quad \mbox{with} \quad x(t) = \frac{1-e^{-5(\tau-t)}}{1+4e^{-5(\tau-t)}}. \] Clearly $\lim_{\tau\to \infty}X_{\tau}(t)=\tilde{\tilde{X}}$ and therefore $\tilde{\tilde{X}}$ is the minimal positive semi-definite solution. \begin{theorem} \label{thm6.6} Assume that $A$, $B$, $\mathcal{Q}$ and $\Pi$ are periodic functions with period $\theta > 0$ and that all the assumptions of Theorem \ref{thm6.4} are fulfilled. Then \begin{equation} \lim_{t \to - \infty} \big( X(t, t_0, X_0) - X_s(t) \big) = 0 \quad \mbox{for all} \quad X_0 \in \mathcal{S}^n_+; \label{eqno:6.18} \end{equation} here $X_s$ is the unique stabilizing $\theta$-periodic solution of \eqref{eqno:3.1}. \end{theorem} \begin{proof} We choose a $\theta$-periodic function $\tilde{M} \colon \mathbb{R} \to \mathcal{S}^n$ such that $\tilde{M}(t) \geq M(t)$ and \[ \tilde{\mathcal{Q}}(t) := \begin{pmatrix} \tilde{M}(t) & L(t) \\ L(t)^T & R(t) \end{pmatrix} \gg 0. \] Then it follows from Corollary \ref{coro5.9} and Remark \ref{rmk6.5} that the generalized Riccati equation \begin{equation} \label{eqno:6.18a} \begin{aligned} \lefteqn{\frac{d}{dt} X(t) + A^T(t)X(t) + X(t)A(t) + \tilde{M}(t) + \Pi_1(t)[X(t)]} \\ & \quad {} - \big\{ X(t)B(t) +\Pi_{12}(t)[X(t)] + L(t) \big\} \big\{ R(t) + \Pi_2(t)[X(t)]\big\}^{-1} \\ & \qquad \times \big\{ X(t)B(t) +\Pi_{12}(t)[X(t)] + L(t) \big\}^T = 0 \end{aligned} \end{equation} has a periodic and stabilizing solution $\tilde{X}_s(t) \gg 0$. For given $X_0 \in \mathcal{S}^n_+$ we choose $\lambda > 1$ such that $X_0 \leq \lambda \tilde{X}_s(0)$ and consider the functions $X_\ell \equiv 0$ and $X_u = \lambda \tilde{X}_s$. We verify that $X_\ell$ and $X_u$ satisfy the conditions of Theorem \ref{thm3.11}, (ii). Since $0 \in \Gamma^\Sigma$ and $X_s(t) \geq 0$ it is sufficient to show that \begin{equation} \frac{d}{dt} X_u(t) + \mathcal{R}(t,X_u(t)) \leq 0 \quad \mbox{for} \quad t \leq 0. \label{eqno:6.19} \end{equation} Denote by $\mathcal{R}_\lambda(t)$ the generalized Riccati-type operator associated to the quadruple $(A, B, \Pi, \lambda \mathcal{Q})$. Since $\mathcal{Q}(t) \leq \lambda \mathcal{Q}(t)$ it follows from \cite[Lemma 4.4]{FrHo5} that $\mathcal{R}(t,X_u(t)) \leq \mathcal{R}_\lambda(t,X_u(t))$. Multiplying \eqref{eqno:6.18a} by $\lambda > 1$ we infer that \[ \frac{d}{dt} X_u(t) + \mathcal{R}_\lambda(t, X_u(t)) = \lambda \big( M(t) - \tilde{M}(t) \big) \leq 0. \] Therefore \eqref{eqno:6.19} is valid. As in the proof of Theorem \ref{thm3.11} it follows that $X(\cdot, 0, 0)$ and $X(\cdot, 0, X_u(0))$ are cyclomonotonically increasing (respectively decreasing) as $t$ decreases, and both converge to $\theta$-periodic solutions of \eqref{eqno:3.1}. Moreover, by Theorem \ref{thm3.6} we have \begin{equation} 0 \leq X(t, 0, 0) \leq X(t, 0, X_0) \leq X(t, 0, X_u(0)) \quad \mbox{for} \quad t \leq 0. \label{eqno:6.20} \end{equation} Since under our assumptions $X_s$ is the unique positive semi-definite $\theta$-periodic solution of \eqref{eqno:3.1}, \eqref{eqno:6.18} follows from \eqref{eqno:6.20}. \end{proof} Theorem \ref{thm6.6} shows that under its assumptions the dynamics defined by \eqref{eqno:3.1} on $\mathcal{S}^n_+$ is very special and comparable to that of symmetric matrix Riccati differential equations -- this is the reason why we call \eqref{eqno:3.1} (generalized) Riccati-type equation. \begin{remark} \label{rmk6.7} \rm The proofs of Theorems \ref{thm4.7} and \ref{thm6.1} show in connection with Theorem \ref{thm3.6} that the maximal and the minimal positive semi-definite solution (provided they exist) both depend monotonically on $\mathcal{Q}$. In the time-invariant case this has already been mentioned in \cite{DaHi01} and \cite{FrHo5}. \end{remark} \section{Generalizations} \subsection{Generalization of the linear part} All the results for equation \eqref{eqno:3.1} obtained in Sections 3-6 are still valid for equations of the form \begin{align*} \lefteqn{\frac{d}{dt} X(t) + \mathcal{L}_0^*(t)[X(t)] + M(t) + \Pi_1(t)[X(t)]} \\ & \quad {} - \big\{ X(t)B(t) + \Pi_{12}(t)[X(t)] + L(t) \big\} \big\{ R(t) + \Pi_2(t)[X(t)] \big\}^{-1} \\ & \qquad \times \big\{ X(t)B(t) + \Pi_{12}(t)[X(t)] + L(t) \big\}^T = 0, \end{align*} where $B, \Pi, \mathcal{Q}$ are as in \eqref{eqno:3.1} and $\mathcal{L}_0(t) \colon \mathcal{S}^n \to \mathcal{S}^n$ is a linear operator with the properties \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item $\mathcal{L}_0$ generates a positive evolution and \item the operator $X\mapsto \mathcal{L}_0(t)[X]+B(t)W(t)X+X(B(t)W(t))^T$ also generates a positive evolution for arbitrary $W \colon \mathcal{I} \to \mathbb{R}^{m \times n}$. \end{enumerate} Equation \eqref{eqno:3.1} corresponds to the particular case $\mathcal{L}_0(t)[X] = A(t)X + X A^T(t)$, and it has the above properties. \subsection{Generalized Riccati operators containing the Moore-Penrose inverse} Define $\mathcal{D}(\mathcal{R}_+)$ as the set of all $(t,X) \in \mathcal{I} \times \mathcal{S}^n$ with $R(t) + \Pi_2(t)[X] \geq 0$ and \[ \ker \big\{ R(t) + \Pi_2(t)[X] \big\} \subseteq \ker \big\{ L(t) + X B(t) + \Pi_{12}(t)[X] \big\}. \] Then we may consider $\mathcal{R}_+\colon \mathcal{D}(\mathcal{R}_+) \to \mathcal{S}^n$ where \begin{align*} \mathcal{R}_+(t,X) & = A^T(t)X + XA(t) + \Pi_1(t)[X] + M(t) \\ & \qquad {} - \big\{ X B(t) + \Pi_{12}(t)[X] + L(t) \big\} \big\{R(t) \Pi_2(t)[X] \big\}^+ \\ & \qquad {} \times \big\{ XB(t) + \Pi_{12}(t)[X] + L(t) \big\}^T; \end{align*} here $Z^+$ denotes the Moore-Penrose inverse of a matrix $Z$. Combining the methods used in \cite{FrHo5} with the approach of this paper it follows that the assertion of the Comparison Theorem \ref{thm3.6} remains analogously valid for two rational differential equations $$ \frac{d}{dt} X(t) + \mathcal{R}_+(t, X(t)) = 0 \quad \mbox{and} \quad \frac{d}{dt} X(t) + \tilde{\mathcal{R}}_+(t, X(t)) = 0, $$ where $\tilde{\mathcal{R}}_+$ is (analogously to $\mathcal{R}_+$) associated to a quadruple $(A, B, \Pi, \tilde{\mathcal{Q}})$. In this case the assumption b) of Theorem \ref{thm3.6} can be replaced by the weaker assumption $(t, X_2(t)) \in \mathcal{D}(\tilde{\mathcal{R}}_+)$ for $t \in \mathcal{I}_1$. Moreover, in generalization of Theorem \ref{thm4.7} we get \begin{theorem} \label{thm7.1} Assume that $(A, B, \Pi)$ is stabilizable. Then the following are equivalent: \begin{enumerate} \renewcommand{\labelenumi}{(\roman{enumi})} \item The set $$ \hat{\Gamma}^\Sigma := \{ X \in C^1_b(\mathcal{I}, \mathcal{S}^n) : \lambda^\Sigma[X(t)] \geq 0, \; t \in \mathcal{I} \} $$ is not empty. \item The equation $$ \frac{d}{dt} X(t) + \mathcal{R}_+(t, X(t)) = 0 $$ has a maximal and bounded solution $\tilde{X} \colon \mathcal{I} \to \mathcal{S}^n$. \end{enumerate} \end{theorem} \subsection{An equivalent definition of stabilizability} Assume that the operator $X \mapsto \Pi(t)[X]$ is linear. Then as a consequence of \cite[Lemma 5.1]{FrHo5} it follows that the Fr\'{e}chet derivative of $$ f(t,X) := \frac{d}{dt} X + \mathcal{R}(t,X) $$ at $X$ is given by \[ f'_X(t,H) = \frac{d}{dt} H + \mathcal{R}'_X(t,H ) = \frac{d}{dt} H + \mathcal{L}_{F^X}^*(t)[H]. \] Therefore $(A, B, \Pi)$ is stabilizable if and only if there is a function $X_0 \colon \mathcal{I} \to \mathcal{S}^n$ such that $\mathcal{R}'_X(t, X_0(t))$ (or the corresponding differential equation) generates an asymptotic stable evolution. It is easy to see (compare \cite{FrHo5}) that the sequence $(X_k)_{k \in \mathbb{N}}$ of functions defined in the proof of Theorem \ref{thm3.11} are generated by the Newton-Kantorovich procedure, applied to the operator equation $f(t,X) = 0$. Notice that the stabilizability condition ensures the existence of a stabilizing initial function $X_0$. The condition $\Gamma^\Sigma \neq \emptyset$ guarantees (as a consequence of the Comparison Theorem) that the decreasing sequence $(X_k)_{k \in \mathbb{N}}$ is bounded below and hence convergent to $\tilde{X}$. In the time-invariant case it has been proved recently by Damm and Hinrichsen that the Newton-Kantorovich procedure is also the adequate tool for proving results on the maximal solution of nonlinear operator equations of the form $f(X) = 0$ in real Banach spaces provided $f$ is concave on some domain $D$ and certain additional conditions are fulfilled (see \cite{DaHi03} for details). \subsection{Sums of generalized Riccati operators} For $1 \leq \kappa \leq k$ let quadruples $\Sigma_\kappa = (A_\kappa, B_\kappa, \Pi_\kappa, \mathcal{Q}_\kappa)$ and associated operators $\mathcal{R}_\kappa \colon \mathcal{D}(\mathcal{R}_k) \to \mathcal{S}^n$ of type \eqref{eqno:3.4} be given. Define $$ \mathcal{R}_0 \colon \mathcal{D}(\mathcal{R}_0) := \bigcap_{\kappa=1}^k \mathcal{D}(\mathcal{R}_\kappa) \to \mathcal{S}^n, \quad (t,X) \mapsto \sum_{\kappa=1}^k \mathcal{R}_\kappa(t,X). $$ Then (under analogous assumptions) all results that were obtained in Sections 3 -- 6 can be derived analogously for the differential equation $$ \frac{d}{dt} X(t) + \mathcal{R}_0(t, X(t)) = 0. $$ For example the following Comparison Theorem can be proved analogously to Theorem \ref{thm3.6}: \begin{theorem} \label{thm7.2} For $1 \leq \kappa \leq k$ let quadruples $\Sigma_\kappa = (A_\kappa, B_\kappa, \Pi_\kappa, \mathcal{Q}_\kappa)$, $\tilde{\Sigma}_\kappa = (A_\kappa, B_\kappa,\Pi_\kappa, \tilde{\mathcal{Q}}_\kappa)$ and associated operators $\mathcal{R}_\kappa \colon \mathcal{D}(\mathcal{R}_k) \to \mathcal{S}^n$, $\tilde{\mathcal{R}}_\kappa \colon \mathcal{D}(\tilde{\mathcal{R}}_k) \to \mathcal{S}^n$ of type \eqref{eqno:3.4} be given. Let $X_i \colon \mathcal{I}_1 \subset \mathcal{I} \to \mathcal{S}^n$, $i = 1,2$, be solutions of $$ \frac{d}{dt} X_1(t) + \sum_{\kappa = 1}^k \mathcal{R}_\kappa(t, X_1(t)) = 0, \qquad \frac{d}{dt} X_2(t) + \sum_{\kappa = 1}^k \tilde{\mathcal{R}}(t, X_2(t)) = 0. $$ Assume that \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item $\mathcal{Q}_\kappa(t) \geq \tilde{\mathcal{Q}}_\kappa(t)$ for $t \in \mathcal{I}$ and $\kappa \in \{ 1, \ldots, k \}$; \item $\tilde{R}_\kappa(t) + \Pi_{2,\kappa}(t)[X_2(t)] \gg 0$ for $t \in \mathcal{I}_1$ and $\kappa \in \{ 1, \ldots, k \}$; \item there exists $\tau \in \mathcal{I}_1$ such that $X_1(\tau) \geq X_2(\tau)$. \end{enumerate} Under these conditions we have $X_1(t)\geq X_2(t)$ for all $t \in(-\infty, \tau] \cap \mathcal{I}_1$. \end{theorem} As it was pointed out in \cite[Section 4]{FrHo5} such a Comparison Theorem can be used in order to derive in an elegant way existence results for nonlinear differential equations. \begin{example} \label{ex7.3} \rm The main results of \cite{LiZh01} on the existence of optimal controls for certain stochastic control problems are based on the existence of the solution of the terminal value problem \begin{equation} \label{eqno:7.1} \begin{gathered} \frac{d}{dt} X(t) - A(t) X(t) - X(t) A^T(t) + B(t) R^{-1}(t) B^T(t) - X(t) Q(t) X(t) \\ {} + C(t) X(t) \big[ S(t) X(t) + I \big]^{-1} C^T(t) = 0, \quad X(t_f) = X_f, \end{gathered} \end{equation} where the coefficients $A$, $B$, $C$ and $S$ are bounded and locally integrable. Additionally it is assumed that $Q(t), S(t), X_f \geq 0$ and $R(t) \gg 0$. Since $S(t)$ has a square root $D(t) := S^{1/2}(t) \geq 0$ and \[ \big[ S(t) X(t) + I \big]^{-1} = I - D(t) \big[ I + D(t) X(t) D(t) \big]^{-1}D(t)X(t), \] we can rewrite \eqref{eqno:7.1} as \begin{equation} \label{eqno:7.2} \begin{aligned} \frac{d}{dt} Y(t) & = A(t)Y(t) + Y(t)A^T(t) + M(t) + \Pi_1(t)[Y(t)] \\ & \qquad {} - \big\{ Y(t)B(t) +\Pi_{12}(t)[Y(t)] + L(t) \big\} \big\{ R(t) + \Pi_2(t)[Y(t)]\big\}^{-1} \\ & \qquad \times \big\{ Y(t)B(t) + \Pi_{12}(t)[Y(t)] + L(t) \big\}^T. \end{aligned} \end{equation} where $\tilde{B}(t) := \begin{pmatrix} Q^{1/2}(t) & 0 \end{pmatrix}$, $$ \Pi_1(t)[X(t)] := C(t) X(t) C^T(t), \quad \Pi_{12}(t)[X(t)] := \big( 0 \quad C(t) X(t) D(t) \big) $$ and $$ \Pi_2(t)[X] := \begin{pmatrix} 0 & 0 \\ 0 & D(t) X(t) D(t) \end{pmatrix}. $$ It is not difficult to see that $$ \Pi(t)[X] = \begin{pmatrix} \Pi_1(t)[X] & \Pi_{12}(t)[X] \\ \big( \Pi_{12}(t)[X] \big)^T & \Pi_2(t)[X] \end{pmatrix} = \begin{pmatrix} C(t) \\ 0 \\ D(t) \end{pmatrix} X \begin{pmatrix} C(t) \\ 0 \\ D(t) \end{pmatrix}^T $$ is a positive linear operator. Corollary \ref{coro3.8} shows now that the solution $X$ of \eqref{eqno:7.2} satisfies $$ 0 \leq X(t) \leq X_u(t) \quad \mbox{for} \quad 0 \leq t \leq t_f, $$ where $X_u$ is the solution of the linear terminal value problem \begin{align*} \lefteqn{\frac{d}{dt} X_u(t) - A(t) X_u(t) - X_u(t) A^T(t) + B(t) R^{-1}(t) B^T(t)} \\ & \quad {} + C(t) X_u(t) C^T(t) = 0, \quad X_u(t_f) = X_f. \end{align*} Under suitable assumptions we may derive the corresponding results concerning the existence of the maximal solution, stabilizing solution, minimal solution respectively of equation \eqref{eqno:7.2} or equivalently of equation \eqref{eqno:7.1}. Notice that Theorem \ref{thm7.2} could also be applied to equation \eqref{eqno:7.1}. \end{example} \subsection{Forward Riccati-type differential equations} Together with \eqref{eqno:3.1} a qua\-druple $\Sigma = (A, B, \Pi, \mathcal{Q})$ defines also a so-called forward Riccati-type differential equation \begin{equation} \label{eqno:7.3} \begin{aligned} \frac{d}{dt} Y(t) & = A(t)Y(t) + Y(t)A^T(t) + M(t) + \Pi_1(t)[Y(t)] \\ & \quad - \big\{ Y(t)B(t) +\Pi_{12}(t)[Y(t)] + L(t) \big\} \big\{ R(t) + \Pi_2(t)[Y(t)]\big\}^{-1} \\ & \quad \times \big\{ Y(t)B(t) + \Pi_{12}(t)[Y(t)] + L(t) \big\}^T. \end{aligned} \end{equation} In the particular case where $\Pi(t) \equiv 0$, $L(t) \equiv 0$ and $B^T(t)$ is the matrix coefficient of an output the equation \eqref{eqno:7.3} appears in connection with the filtering problem \cite{Wonh70}. With the same proof as of Theorem \ref{thm3.6} we may obtain the following comparison theorem for the equation \eqref{eqno:7.3}. \begin{theorem}[Comparison Theorem] \label{thm7.4} Let $\hat{\mathcal{R}}$ be the operator \eqref{eqno:3.4} associated to the quadruple $\hat \Sigma = (A, B, \Pi, \hat{\mathcal{Q}})$ and $\tilde{\mathcal{R}}$ be the operator of type \eqref{eqno:3.4} associated to the quadruple $\tilde{\Sigma} = (A, B, \Pi, \tilde{\mathcal{Q}})$ where $A$, $B$, $\Pi$ are as before and $\hat{\mathcal{Q}}(t) =\left(\begin{smallmatrix} \hat{M}(t) & \hat{L}(t) \\ \hat{L}(t)^T & \hat{R}(t) \end{smallmatrix} \right)$, $\tilde{\mathcal{Q}}(t) =\left(\begin{smallmatrix} \tilde{M}(t) & \tilde{L}(t) \\ \tilde{L}(t)^T & \tilde{R}(t) \end{smallmatrix} \right)$ with $\hat{L}(t),\tilde{L}(t) \in \mathbb{R}^{n \times m}$, $\hat{M}(t),\tilde{M}(t) \in \mathcal{S}^n$ and $\hat{R}(t),\tilde{R}(t) \in \mathcal{S}^m$. Let $Y_i \colon \mathcal{I}_1 \subset \mathcal{I} \to\mathcal{S}^n$, $i = 1,2$, be solutions of $$ \frac{d}{dt} Y_1(t)= \hat{\mathcal{R}}(t, Y_1(t)), \qquad \frac{d}{dt} Y_2(t)= \tilde{\mathcal{R}}(t, Y_2(t)). $$ Assume that \begin{enumerate} \renewcommand{\labelenumi}{(\alph{enumi})} \item $\hat{\mathcal{Q}}(t) \geq \tilde{\mathcal{Q}}(t)$ for all $t \in \mathcal{I}$; \item $\tilde{R}(t) + \Pi_2(t)[Y_2(t)] > 0$ for $t \in \mathcal{I}_1$; \item there exists $\tau \in \mathcal{I}_1$ such that $Y_1(\tau) \geq Y_2(\tau)$. \end{enumerate} Under these conditions we have $Y_1(t)\geq Y_2(t)$ for all $t \in[\tau,\infty)$. \end{theorem} The proof of the above theorem is based on Proposition \ref{prop3.5} (i). In order to derive results concerning the existence of the maximal solution, stabilizing solution, minimal solution respectively for equation \eqref{eqno:7.3} we need to assume that $\mathcal{I}=\mathbb{R}$ and apply the results of Theorem \ref{thm2.18} and Proposition \ref{prop2.15} instead of Theorem \ref{thm2.11} and Theorem \ref{thm2.13}. The stabilizability concept used for the backward differential equation \eqref{eqno:3.1} will be replaced by a dual concept of detectability. \begin{thebibliography}{99} \bibitem{AFIJ03} Abou-Kandil, H.; Freiling, G.; Ionescu, V.; Jank, G.: Matrix Riccati Equations in Control and Systems Theory, \textit{Birkh\"auser, Basel}, 2003. \bibitem{AFJ94} Abou-Kandil, H.; Freiling, G.; Jank, G.: Solution and asymptotic behavior of coupled Riccati equations in jump linear systems, \textit{IEEE Trans.\ Automat.\ Control} \textbf{39} (1994), 1631--1636. \bibitem{ARCMZ01} Ait Rami, M.; Chen, X.; Moore, J.\ B.; Zhou, X.\ Y.: Solvability and asymptotic behavior of generalized Riccati equations arising in indefinite stochastic LQ controls, \textit{IEEE Trans.\ Automat.\ Control} \textbf{46} (2001), 428--440. \bibitem{ARZh00} Ait Rami, M.; Zhou, X.\ Y.: \emph{Linear matrix inequalities, Riccati equations, and indefinite stochastic linear quadratic controls}, \textit{IEEE Trans.\ Automat.\ Control} \textbf{45} (2000), 1131--1143. \bibitem{ChLZ98} Chen, S.; Li, X.; Zhou, X.\ Y.: Stochastic linear quadratic regulators with indefinite control weight costs, \textit{SIAM J.\ Control Optim.} \textbf{36} (1998), 1685--1702. \bibitem{ChYo01} Chen, S.; Yong, J.: Stochastic linear quadratic optimal control problems, \textit{Appl.\ Math.\ Optim.} \textbf{43} (2001), 21--45. \bibitem{ChZh00} Chen, S.; Zhou, X.\ Y.: Stochastic linear quadratic regulators with indefinite control weight costs II, \textit{SIAM J.\ Control Optim.} \textbf{39} (2000), 1065--1081. \bibitem{DPZa92} Da Prato, G.; Zabczyk, J.: Stochastic equations in infinite dimensions, \textit{Cambridge University Press, Cambridge}, 1992. \bibitem{DaHi01} Damm, T.; Hinrichsen, D.: Newton's method for a rational matrix equation occuring in stochastic control, \textit{Linear Algebra Appl.} \textbf{332/334} (2001), 81--109. \bibitem{DaHi03} Damm, T.; Hinrichsen, D.: Newton's method for concave operators with resolvent positive derivatives in ordered Banach spaces, \textit{Linear Algebra Appl.} \textbf{363} (2003), 43--64. \bibitem{preprint99} Dragan, V.; Morozan, T.: Stability and robust stabilization to linear stochastic systems described by differential equations with Markovian jumping and multiplicative white noise, \textit{Stochastic Anal.\ Appl.} \textbf{20} (2002), 33--92. \bibitem{preprint00} Dragan, V.; Morozan, T.: Systems of matrix rational differential equations arising in connection with linear stochastic systems with Markovian jumping, \textit{J.\ Differential Equations} \textbf{194} (2003), 1--38. \bibitem{preprint01} Dragan, V.; Morozan, T.: The linear quadratic optimization problems for a class of linear stochastic systems with multiplicative white noise and Markovian jumping, \textit{IEEE Trans.\ Automat.\ Control} \textbf{49} (2004), 665--675. \bibitem{FCdS98} Fragoso, M.\ D.; Costa, O.\ L.\ V.; de Souza, C.\ E.: A new approach to linearly perturbed Riccati equations arising in stochastic control, \textit{Appl.\ Math.\ Optim.} \textbf{37} (1998), 99--126. \bibitem{FrHo4} Freiling, G.; Hochhaus, A.: Properties of the solutions of rational matrix difference equations, Advances in difference equations, IV, \textit{Comput. Math. Appl.} \textbf{45} (2003), 1137--1154. \bibitem{FrHo6} Freiling, G.; Hochhaus, A.: About a generalized algebraic Riccati equation. Analysis and optimization of differential systems (Constanta, 2002), 169--178, \textit{Kluwer Acad. Publ., Boston, MA}, 2003. \bibitem{FrHo5} Freiling, G.; Hochhaus, A: On a class of rational matrix differential equations arising in stochastic control, \textit{Linear Algebra Appl.} \textbf{379} (2004), 43--68. \bibitem{FrJa96} Freiling, G.; Jank, G.: Existence and comparison theorems for algebraic Riccati equations and Riccati differential and difference equations, \textit{J.\ Dynam.\ Control Systems} \textbf{2} (1996), 529--547. \bibitem{FJAK96} Freiling, G.; Jank, G.; Abou-Kandil, H.: Generalized Riccati difference and differential equations, \textit{Linear Algebra Appl.} \textbf{241/243} (1996), 291--303. \bibitem{Hala66} Halanay, A.: Differential equations: Stability, oscillations, time lags, \textit{Academic Press, New York-London}, 1966. \bibitem{HiPr98} Hinrichsen, D.; Pritchard, A.\ J.: Stochastic $H\sp \infty$, \textit{SIAM J.\ Control Optim.} \textbf{36} (1998), 1504--1538. \bibitem{Kalm60} Kalman, R.\ E.: Contributions to the theory of optimal control, \textit{Bol.\ Soc.\ Mat.\ Mexicana (2)} \textbf{5} (1960), 102--119. \bibitem{LiZh01} Lim, A.\ E.\ B.; Zhou, X.\ Y.: Linear-quadratic control of backward stochastic differential equations, \textit{SIAM J.\ Control Optim.} \textbf{40} (2001), 450--474. \bibitem{Reid72} Reid, W.\ T.: Riccati differential equations, \textit{Academic Press, New York-London}, 1972. \bibitem{Schn65} H.\ Schneider, Positive operators and an inertia theorem, \textit{Numer.\ Math.} \textbf{7} 11--17 (1965). \bibitem{Wonh68} Wonham, W.\ M.: On a matrix Riccati equation of stochastic control, \textit{SIAM J.\ Control} \textbf{6} (1968), 681--697. \bibitem{Wonh70} Wonham, W.\ M.: Random differential equations in control theory, \textit{Probabilistic Methods in Applied Mathematics, Vol. 2}, 131--212, \textit{Academic Press, New York}, 1970. \bibitem{YoZh99} Yong, J.; Zhou, X.\ Y.: Stochastic controls, \textit{Springer-Verlag, New York}, 1999. \end{thebibliography} \end{document}