\documentclass[reqno]{amsart} \usepackage{amsfonts, hyperref} \AtBeginDocument{{\noindent\small {\em Electronic Journal of Differential Equations}, Vol. 2005(2005), No. 120, pp. 1--17.\newline ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu \newline ftp ejde.math.txstate.edu (login: ftp)} \thanks{\copyright 2005 Texas State University - San Marcos.} \vspace{9mm}} \begin{document} \title[\hfilneg EJDE-2005/120\hfil Sturm-Liouville operator] {Sturm-Liouville operator with general boundary conditions} \author[C. G. Gal\hfil EJDE-2005/120\hfilneg] {Ciprian G. Gal} \address{Ciprian G. Gal \hfill\break Department of Mathematical Sciences\\ The University of Memphis\\ Memphis, TN 38152, USA} \email{cgal@memphis.edu} \date{} \thanks{Submitted July 28, 2005. Published October 25, 2005.} \subjclass[2000]{34B24, 34B25, 47E05} \keywords{Sturm-Liouville operator; Wentzell boundary conditions; \hfill\break\indent nonseparated and separated boundary conditions; symmetric operators} \begin{abstract} We classify the general linear boundary conditions involving $u''$, $u'$ and $u$ on the boundary $\{a,b\}$ so that a Sturm-Liouville operator on $[a,b]$ has a unique self-adjoint extension on a suitable Hilbert space. \end{abstract} \maketitle \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \newtheorem{remark}[theorem]{Remark} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{example}[theorem]{Example} \newtheorem{corollary}[theorem]{Corollary} \section{Introduction} The standard regular Sturm-Liouville operator is given by \begin{equation} Au=\frac{1}{k(x)}\big[-\big(p(x)u'\big)'+q(x)u\big] \label{e1.1} \end{equation} with boundary conditions involving \begin{equation*} R_ju=\alpha_{j1}u(a)+\alpha_{j2}u'( a)+\alpha_{j3}u(b)+\alpha_{j4}u'(b). \end{equation*} More precisely, we work in the Hilbert space $H=L^2((a;b);k(x)dx)$, where $-\infty 0 $ on $[a,b]$. The domain of $A$ is \begin{equation*} D_1(A)=\{u\in C^2[a,b]:R_1u=R_2u=0 \}, \end{equation*} where $\alpha_j=(\alpha_{j1},\alpha_{j2},\alpha_{j3},\alpha _{j4})$ $(j=1,2)$ are two linearly independent vectors in $\mathbb{R}^{4}$. If we have separated boundary conditions (i.e., $\mathbf{\alpha }_1=(\alpha_{11,} \alpha_{12},0,0)$, $\mathbf{\alpha }_2=(0,0,\alpha_{23},\alpha_{24})$) so that $R_1u$ (resp. $R_2u$) depends only on the left (resp. right) hand end point), then the closure $\overline{A}$ of $A$ is selfadjoint on $H$ with a compact resolvent. The same is true in the periodic case ($\mathbf{\alpha } _1=(1,0,-1,0)$, $\mathbf{\alpha }_2=(0,1,0,-1)$) provided $p(a)=p(b)$. But often choosing $\mathbf{\alpha }_1\mathbf{,\alpha }_2$ can lead to $A$ having the eigenvalues all of $\mathbb{C}$ or the empty set $\emptyset$ (see \cite{h1}). Hellwig \cite{h1} characterizes the nonseparated boundary conditions with $\mathbf{\alpha }_1\mathbf{,\alpha }_2$ so that $\overline{A} $ is selfadjoint and has compact resolvent. Brown, Binding and Watson \cite{b1, b2, b3} considered Sturm-Liouville problems with eigenparameter in the boundary conditions, that is, \begin{gather} -(pu')'+qu=\lambda ku, \label{e1.2} \\ \begin{gathered} \widetilde{\beta }_1u'(a)+(\gamma _1-\lambda )u(a)=0, \\ \widetilde{\beta }_2u'(b)+(\gamma _2-\lambda )u(b)=0. \end{gathered} \label{e1.3} \end{gather} The type of boundary-value problem \eqref{e1.2}, \eqref{e1.3} was considered in many works (see, for example, \cite{d1,e1,r1,r2} and the references therein). They make a complete study of the matrix Sturm-Liouville problem with spectral parameter $\lambda $ entering polynomially in the boundary conditions and thus its inclusion in the theory of $J-$self-adjoint operators. Whether the approach used is the theory of the $V$-Bezoutian (see \cite{r1,r2}) or other approaach (see \cite{b3,d1,e1}), the authors obtain explicit constructions of the self adjoint extensions and show how this problem is adequate to an eigenvalue problem for a $J-$self-adjoint operator in a wider Pontryagin space, which is a finite dimensional extension of $L^2((a;b);k(x)dx)$. They formulate conditions in terms of the functions of $\lambda $ entering the boundary conditions. For instance, these polynomials satisfy certain symmetric and positivity assumptions in the case of Russakovskii \cite{r1}, or more generally, in Etkin \cite{e1}, they satisfy certain degree and invertibility conditions. In \cite{r1}, the compactness and a general description of the resolvent operator is also obtained. Independently, Favini, Goldstein, Goldstein and Romanelli \cite{f1} considered Sturm - Liouville operators with general Wentzell boundary conditions of the form \begin{equation*} -Au+(-1)^j\beta_ju'+\gamma_ju=0 \end{equation*} at $x=c_j,j=1,2$ when $c_1=a,c_2=b$. Here $\beta_{0},\beta_1$ are positive. Their study was focused toward solving the problem $u_{t}+Au=0$, with $u(0)=f $. They show that its generator (on a suitable domain) is selfadjoint with respect to a uniquely determined inner product defined on a finite dimensional extension of $H$, and thus, the problem is governed by a strongly continuous selfadjoint semigroup. Moreover, they also observe how the coefficients $\beta_j$ need to enter as weights in the definition of their space, so that the corresponding eigenvalue problem is self-adjoint. The eigenvalue problem for the general Wentzell boundary conditions becomes \begin{gather} Au=\lambda u,\quad \text{in }[a,b], \label{e1.4} \\ -Au+(-1)^j\beta_ju'+\gamma_ju=0\quad \text{at } c_j, \label{e1.5} \end{gather} where $c_1=a$, $c_2=b$, and $A$ is defined by \eqref{e1.1}. Replacing $Au$ by $\lambda u$ in \eqref{e1.5} (via \eqref{e1.4}), the boundary conditions then become \begin{equation} (-1)^j\beta_ju'+(\gamma_j-\lambda )u=0. \label{e1.6} \end{equation} Then \eqref{e1.4}-\eqref{e1.5} becomes equivalent to \eqref{e1.2}- \eqref{e1.3}. This raises the question as to whether we can classify the general linear boundary conditions involving $u''$, $u'$ and $u$ on the boundary $\{a,b\}$ so that $A$ has a unique self-adjoint ($m-$accretive, respectively) extension, as obtained in the work of the mentioned authors. Thus, we formulate non-separated boundary conditions for $A$, following ideas of \cite{f1,h1}, and give necessary and sufficient conditions for its symmetry, depending only on the boundary coefficients. We also determine the inner product precisely and show how the boundary functions enter the underlying Hilbert space, using a more direct approach. This is what is investigated in this paper. We structure our paper as follows. In Section $2$ we study the question of how the self-adjointness of $A$ characterizes the general boundary conditions given. We use basic algebraic tools, using the very simple approach of Hellwig \cite{h1}. In Section $3$ we investigate those operators $A$ that may be semi-bounded and generators of $(C_{0})$ semigroups. \section{Formulation of the Problem} \quad Let us consider the Hilbert space \begin{equation} \mathcal{H}=L^2((a,b);k(x)dx)\oplus \mathbb{C}_{\delta }^2 \label{e2.1} \end{equation} with inner product \begin{equation} (u,v)_{\mathcal{H}}=\int_{a}^{b}u(x) \overline{v}(x)k(x)dx+u(a)\overline{v} (a)k(a)\delta_1+u(b)\overline{v} (b)k(b)\delta_2, \label{e2.2} \end{equation} where $\delta_{i}$ are nonnegative constants $(i=1,2)$ that depend on the boundary conditions. Strictly speaking, the $\mathbb{C} _{\delta }^2$ factor in $\mathcal{H}$ refers to the case when $\delta _1,\delta_2>0$. $\mathbb{C} _{\delta }^2$ should be replaced by $\mathbb{C}_{\delta }^j$ if exactly $j\in \{0,1,2\}$ of the numbers $\delta_1,\delta_2$ are positive. The general Sturm-Liouville operator $A$ in $\mathcal{H}$ is defined via \eqref{e1.1} with \begin{equation*} \mathcal{D}_2(A)=\{u\in C^2[a,b]: \mathcal{R}_1u=\mathcal{R}_2u=0\}. \end{equation*} Here the boundary operators $\mathcal{R}_1u,\mathcal{R}_2u$ are of the form \begin{equation} \mathcal{R}_ju=\alpha_{j1}u(a)+\alpha_{j2}u'(a)+\alpha_{j3}u^{\prime \prime}(a)+\alpha _{j4}u(b)+\alpha_{j5}u'(b)+\alpha _{j6}u''(b), \label{e2.3} \end{equation} $j=1,2$ and $\mathbf{\alpha }_1=(\alpha_{11},\alpha _{12},\alpha_{13},\alpha_{14},\alpha_{15},\alpha_{16})$, $\mathbf{\alpha } _2=(\alpha_{21},\alpha_{22},\alpha_{23},\alpha _{24},\alpha_{25},\alpha_{26}) $ are two linearly independent vectors in $\mathbb{R}^{6}$. We assume throughout the paper that $(\alpha_{13}, \alpha_{23})=(0,0)$ and $(\alpha_{16},\alpha_{26})=(0,0)$ when $\delta_1=\delta_2=0$. When $\delta_1,\delta_2>0$, we assume that $(\alpha_{13},\alpha_{23})\neq (0,0),(\alpha_{16},\alpha _{26})\neq (0,0)$. We have the same assumptions on $p,p',q,k$ as in the Introduction. The operators $\mathcal{R}_1u,\mathcal{R}_2u$ are very general boundary conditions. This formulation includes separated boundary conditions ( $\alpha_{14}=\alpha_{15}=\alpha_{16}=\alpha_{21}=\alpha_{22} =\alpha_{23}=0 $), periodic boundary conditions ($\mathbf{\alpha }_1=(0,1,0,0,-1,0),\mathbf{ \alpha }_2=(1,0,0,-1,0,0)$), combination of Dirichlet, Neumann and Robin boundary conditions at each end, as well as, nonseparated and separated Wentzell boundary conditions. The problem \begin{equation*} u_{t}+Au=0 \end{equation*} with $u\in \mathcal{D}_2(A)$ is governed by a strongly continuous semigroup whose norm is bounded by $e^{\omega t}$, for some real $\omega $, if $- \overline{A}$ is $m-$dissipative. When $(\alpha_{13},\alpha_{23})\neq (0,0),(\alpha_{16},\alpha _{26})\neq (0,0)$ (thus, $\delta_1,\delta_2>0$), $A $ is equipped with dynamical or Wentzell boundary conditions. The physical interpretation of Wentzell (and dynamical) boundary conditions is given in \cite{g1}. In the sequel, we will give sufficient conditions for the operator $A$ to be symmetric on $\mathcal{H}$. Let $u,v\in \mathcal{D}_2(A)$. Then $\overline{v} \in \mathcal{D}_2(A)$. Now we compute $(Au,v)_{\mathcal{H}}-(u,Av)_{\mathcal{ H}}$ as follows: \begin{equation} \begin{aligned} (Au,v)_{\mathcal{H}}-(u,Av)_{\mathcal{H}} &=\int_{a}^{b}Au\overline{v}k(x)dx-\int_{a}^{b}uA \overline{v}k(x)dx \\ &\quad +[Au(a)\overline{v}(a)k(a) \delta_1-u(a)A\overline{v}(a)k(a) \delta_1]\\ &\quad + [Au(b)\overline{v}(b)k(b) \delta_2-u(b)A\overline{v}(b)k(b) \delta_2]. \end{aligned} \label{e2.4} \end{equation} Let us denote $S_1=Au(a)\overline{v}(a)k(a)\delta_1-u(a)A\overline{v}(a)k( a)\delta_1$ and $S_2=Au(b)\overline{v}(b)k(b)\delta_2-u(b)A\overline{v}( b)k(b)\delta_2$. We now compute them explicitly. \begin{align*} S_1&=\delta_1\big[\big(-p'(a)u'(a)-p(a)u''(a) +q(a)u(a)\big)\overline{v}(a) \\ &\quad- \big(-p'(a)\overline{v}'(a)-p(a)\overline{v} ''( a)+q(a)\overline{v}(a)\big)u(a)\Big] \\ &=\delta_1\big[p(a)\big(u(a)\overline{v} ''(a)-u''(a) \overline{v}(a)\big)+p'(a)\big( u(a)\overline{v} '(a)-u'(a)\overline{v}(a)\big)\big]. \end{align*} Analogously, we obtain \begin{equation*} S_2=\delta_2\big[p(b)\big(u(b)\overline{v} ''(b)-u''(b) \overline{v}(b)\big)+p'(b)\big( u(b)\overline{v} '(b)-u'(b)\overline{v}(b)\big)\big]. \end{equation*} Then integration by parts in \eqref{e2.4} lead us to \begin{align*} &(Au,v)_{\mathcal{H}}-(u,Av)_{\mathcal{H}} \\ &=p(b)\big[u(b)\overline{v}'(b) -u'(b)\overline{v}(b)\big] -p(a)\big[u(a)\overline{v}'(a) -u'(a)\overline{v}(a)\big] +S_1+S_2, \end{align*} and re-arranging the brackets we obtain the expression \begin{align*} (Au,v)_{\mathcal{H}}-(u,Av)_{\mathcal{H}} &=\big[u(b)\overline{v} '(b)-u'(b)\overline{v}(b)\big] \big(p(b)+p'(b) \delta_2\big) \\ &\quad -\big[u(a)\overline{v}'(a) -u'(a)\overline{v}(a)\big] \big(p(a)-p'(a)\delta_1\big) \\ &\quad +\big[u(a)\overline{v}''(a) -u''(a) \overline{v}(a)\big] (p(a)\delta_1) \\ &\quad +\big[u(b)\overline{v}''(b) -u''(b) \overline{v}(b)\big] (p(b)\delta_2). \end{align*} For simplicity, we set following notation: For all pairs $(m,n)$ with $0\leq m,n\leq 6$, \begin{equation} c_{mn}=\left\vert \begin{matrix} \alpha_{1m} & \alpha_{1n} \\ \alpha_{2m} & \alpha_{2n} \end{matrix} \right\vert \label{e2.5} \end{equation} Also \begin{gather*} X(u,v)=\left\vert \begin{matrix} u(b) & \overline{v}(b) \\ u'(b) & \overline{v}'(b) \end{matrix} \right\vert , \quad Y(u,v)=\left\vert \begin{matrix} u(a) & \overline{v}(a) \\ u'(a) & \overline{v}'(a) \end{matrix} \right\vert , \\ Z(u,v)=\left\vert \begin{matrix} u(a) & \overline{v}(a) \\ u''(a) & \overline{v}''(a) \end{matrix} \right\vert , \quad T(u,v)=\left\vert \begin{matrix} u(b) & \overline{v}(b) \\ u''(b) & \overline{v}''(b) \end{matrix} \right\vert . \end{gather*} Using these notation, we are able to simplify \eqref{e2.4} as follows: \begin{equation} (Au,v)_{\mathcal{H}}-(u,Av)_{\mathcal{H} }=l_1X(u,v)-l_2Y(u,v)+l_{3}Z(u,v) +l_{4}T(u,v), \label{e2.6} \end{equation} where \begin{equation} l_1=p(b)+p'(b)\delta _2,l_2=p(a)-p'(a)\delta _1,l_{3}=p(a)\delta_1,l_{4}=p(b)\delta_2. \label{e2.7} \end{equation} \begin{remark} \label{rmk1} \rm Clearly if $\delta_1=\delta_2=0$, then $\mathcal{H}=H$ and $(u,v)_{\mathcal{H}}=(u,v)_{H}$. Moreover, \begin{equation} l_{3}=l_{4}=0;l_1=p(b);\quad l_2=p(a). \label{e2.8} \end{equation} \end{remark} Now let us consider the following conditions: \begin{itemize} \item[(C1)] $l_1c_{12}=l_2c_{45} \Leftrightarrow l_1\left\vert \begin{matrix} \alpha_{11} & \alpha_{12} \\ \alpha_{21} & \alpha_{22} \end{matrix} \right\vert =l_2\left\vert \begin{matrix} \alpha_{14} & \alpha_{15} \\ \alpha_{24} & \alpha_{25} \end{matrix} \right\vert $ \item[(C2)] $l_{3}c_{46}=-l_{4}c_{13}\Leftrightarrow l_{3}\left\vert \begin{matrix} \alpha_{14} & \alpha_{16} \\ \alpha_{24} & \alpha_{26} \end{matrix} \right\vert =-l_{4}\left\vert \begin{matrix} \alpha_{11} & \alpha_{13} \\ \alpha_{21} & \alpha_{23} \end{matrix} \right\vert $ \item[(C3)] $l_2c_{46}=l_{4}c_{12}\Leftrightarrow l_2\left\vert \begin{matrix} \alpha_{14} & \alpha_{16} \\ \alpha_{24} & \alpha_{26} \end{matrix} \right\vert =l_{4}\left\vert \begin{matrix} \alpha_{11} & \alpha_{12} \\ \alpha_{21} & \alpha_{22} \end{matrix} \right\vert $ \item[(C4)] $l_1c_{46}=l_{4}c_{45}\Leftrightarrow l_1\left\vert \begin{matrix} \alpha_{14} & \alpha_{16} \\ \alpha_{24} & \alpha_{26} \end{matrix} \right\vert =l_{4}\left\vert \begin{matrix} \alpha_{14} & \alpha_{15} \\ \alpha_{24} & \alpha_{25} \end{matrix} \right\vert $ \item[(C5)] $l_2c_{13}=-l_{3}c_{12}\Leftrightarrow l_2\left\vert \begin{matrix} \alpha_{11} & \alpha_{13} \\ \alpha_{21} & \alpha_{23} \end{matrix} \right\vert =-l_{3}\left\vert \begin{matrix} \alpha_{11} & \alpha_{12} \\ \alpha_{21} & \alpha_{22} \end{matrix} \right\vert $ \item[(C6)] $l_1c_{13}=-l_{3}c_{45}\Leftrightarrow l_1\left\vert \begin{matrix} \alpha_{11} & \alpha_{13} \\ \alpha_{21} & \alpha_{23} \end{matrix} \right\vert =-l_{3}\left\vert \begin{matrix} \alpha_{14} & \alpha_{15} \\ \alpha_{24} & \alpha_{25} \end{matrix} \right\vert $. \end{itemize} \begin{proposition} \label{prop1} Let $\delta_1,\delta_2>0$ and $c_{12},c_{13},c_{45},c_{46}\in \mathbb{R}^{\ast }=\mathbb{R}\setminus \{0\}$. The conditions (C1), (C2), (C3) hold if and only if (C4), (C5), (C6) hold as well. \end{proposition} \begin{proof} Suppose that (C1), (C2), (C3) hold. Substitute for $l_2$ (via (C3)) in (C1) and obtain (C4). Substitute again for $l_{4}$ (via (C2)) in (C3) and obtain (C5). In order to obtain (C6) we use a combined substitution in (C2) using (C1), (C3). The converse is similar. \end{proof} In what follows we will have a complete discussion on the weights $\delta_1,\delta_2$ that appear in the definition of \eqref{e2.2}. \begin{theorem} \label{thm2} Let us consider the case $\delta_1=\delta_2=0$ under the assumption that $\mathbf{\alpha }_1=(\alpha_{11},\alpha_{12},0,\alpha_{14},\alpha _{15},0)$ and $\mathbf{\alpha }_2=(\alpha_{21},\alpha _{22},0,\alpha_{24},\alpha_{25},0)$ are linearly independent vectors in $\mathbb{R}^{6}$. Then the operator $A$ in $D_1(A)$ is symmetric if and only if condition (C1) is satisfied. \end{theorem} \begin{proof} If $\delta_1=\delta_2=0$ we notice by Remark \ref{rmk1} that $\mathcal{H}=H$ and $(u,v)_{\mathcal{H}}=(u,v)_{H}$. Moreover, since $\alpha_{i3}= \alpha_{i6}=0$ for $i=1,2$ it follows that $\mathcal{R}_ju=R_ju$ $(j=1,2)$ so that $A$ is defined on $D_1(A)=\mathcal{D}_2(A)$ and \begin{equation*} (Au,v)_{\mathcal{H}}-(u,Av)_{\mathcal{H} }=l_1X(u,v)-l_2Y(u,v). \end{equation*} It is not hard to see that the condition (C1) is equivalent to the condition in \cite[Theorem 1]{b3}, that is \begin{equation*} p(a)\left| \begin{matrix} \alpha_{14} & \alpha_{15} \\ \alpha_{24} & \alpha_{25} \end{matrix} \right| =p(b)\left| \begin{matrix} \alpha_{11} & \alpha_{12} \\ \alpha_{21} & \alpha_{22} \end{matrix} \right| . \end{equation*} For the complete proof of this theorem, see \cite[Theorem 1, sec. 5.2]{d1}. \end{proof} \begin{remark} \label{rmk2} \rm Let us denote $J:=(a,b)$ and $\partial J:=\{a,b\}$. We identify $u\in C^{k}[a,b]$ $(k\geq 0)$ with $U=(u\mid_{J},u\mid_{\partial J})\in \mathcal{H}$. Then the image of $C^{k}[a,b]$ under this map is dense in $\mathcal{H}$, in the norm given by \eqref{e2.2}. Call this image $\widetilde{C}^{k}[a,b]$. Let us define the set \begin{align*} \mathcal{D}=&\big\{U=(u\big|_{J},u\big|_{\partial J})\in \widetilde{C}^{k}[a,b]: \text{$u(x)=0$ for $a\leq x\leq a_1$ and for}\\ &\text{ $b_1\leq x\leq b$ with $a_1,b_1$ depending on $u$ and $a_1>a$, $b_10$ and define \begin{equation*} \widetilde{\mathcal{D}}=\{U=(u\big|_{J},u\big|_{\partial J}) \in \widetilde{C}^{k}[a,b]: \text{$u\equiv d_1$ in $[a,a_1]$ and $u\equiv d_2$ in $[b_1,b]$}\}. \end{equation*} Here $d_1,d_2\in \mathbb{C}$ and $a_1,b_1\in (a,b)$ are arbitrary. Since $\widetilde{\mathcal{D}}$ is dense in $\mathcal{H}=L^2((a,b);k(x)dx)\oplus \mathbb{C} ^2$ (see \cite{b2}) and $\widetilde{\mathcal{D}}\subseteq D_2(A)\subset \mathcal{H}$, we conclude that, for all $\mathbf{\alpha }_1\mathbf{,\alpha }_2\in \mathbb{R}^{6}$, $D_2(A)$ is dense in $\mathcal{H}$, as well. \end{remark} We also note that $A$ satisfies a range condition (as shown in \cite{h1}), that is, $R(A-\mu I)=C[a,b]$, where $\mu \in \rho (\overline{A})$, when $\delta_1=\delta_2=0$. Then, in this case, $(\overline{A}-\mu I)^{-1}$ is the integral operator given by \begin{equation*} (\overline{A}-\mu I)^{-1}h(x) =\int_{a}^{b}G_{\mu }(x,y)h(y)dy, \end{equation*} where the Green's function $G_{\mu }$ is continuous on $[a,b] ^2$. In this case, using known results, $\overline{A}-\mu I$ is a bijection from the set $\{u\in H^2(a,b):R_1u=R_2u=0\}$ to $L^2(a,b)$ and also from $D_1(A)$ to $C[a,b]$. The case when the weights $\delta_1,\delta_2>0$ is similar. The analogous Green's function calculation for the case of $\delta_1,\delta_2>0$ was done in \cite{b1,b2,b3,r1}. Then, in this case, for $\mu\in \rho ( \overline{A})$, $\overline{A}-\mu I$ is a bijection from the set $\{u\in H^2(a,b)\cap \mathbb{C}^2: \mathcal{R}_1u=\mathcal{R}_2u=0\}$ to $\mathcal{H} $ and also from $\mathcal{D}_2(A)$ to $C[a,b]$ (identified with $\widetilde{C }[a,b]\subset \mathcal{H}$). Now we are ready to give sufficient conditions for the symmetry of $A$ in the case when $\delta_1,\delta_2>0$. \begin{theorem} \label{thm3} Assume that $l_1,l_2$ are nonzero. Let $\delta_1,\delta_2>0$ and $A $ be the operator defined on $\mathcal{D}_2(A)$ via (1.1) such that $\mathbf{\alpha }_1\mathbf{,\alpha }_2$ are linearly independent, \begin{gather} \mathop{\rm rank} \begin{pmatrix} \alpha_{11} & \alpha_{12} & \alpha_{13} \\ \alpha_{21} & \alpha_{22} & \alpha_{23} \end{pmatrix}=2, \label{e2.9} \\ \mathop{\rm rank} \begin{pmatrix} \alpha_{14} & \alpha_{15} & \alpha_{16} \\ \alpha_{24} & \alpha_{25} & \alpha_{26} \end{pmatrix}=2, \label{e2.10} \\ \left| \begin{matrix} \alpha_{12} & \alpha_{13} \\ \alpha_{22} & \alpha_{23} \end{matrix}\right| =\left|\begin{matrix} \alpha_{15} & \alpha_{16} \\ \alpha_{25} & \alpha_{26} \end{matrix}\right| =0. \label{e2.11} \end{gather} If the conditions (C1), (C2), (C3) hold, then $A$ is symmetric on $\mathcal{H}$. \end{theorem} \begin{proof} Since $u,v\in \mathcal{D}_{2}(A)$ it follows that $u,\overline{v}\in \mathcal{D}_{2}(A)$ and $u,\overline{v}$ satisfy the boundary conditions $\mathcal{R}_{j}u=0$. Hence \begin{align*} & \left\vert \begin{matrix} \alpha _{11}u(a)+\alpha _{12}u'(a)+\alpha _{13}u''(a) & \alpha _{11}\overline{v}(a)+\alpha _{12}\overline{v}'(a)+\alpha _{13}\overline{v}''(a) \\ \alpha _{21}u(a)+\alpha _{22}u'(a)+\alpha _{23}u''(a) & \alpha _{21}\overline{v}(a)+\alpha _{22}\overline{v}'(a)+\alpha _{23}\overline{v}''(a) \end{matrix} \right\vert \\ & =\left\vert \begin{matrix} \alpha _{14}u(b)+\alpha _{15}u'(b)+\alpha _{16}u''(b) & \alpha _{14}\overline{v}(b)+\alpha _{15}\overline{v}'(b)+\alpha _{16}\overline{v}''(b) \\ \alpha _{24}u(b)+\alpha _{25}u'(b)+\alpha _{26}u''(b) & \alpha _{24}\overline{v}(b)+\alpha _{25}\overline{v}'(b)+\alpha _{26}\overline{v}''(b) \end{matrix} \right\vert . \end{align*} Let us denote the left hand side determinant by $M_{1}$ and the right hand side determinant by $M_{2}$. Now we can expand both $M_{1}$ and $M_{2}$ as a sum of $9$ determinants, where $3$ of them vanish, so that \begin{align*} M_{1}& =\left\vert \begin{matrix} \alpha _{11}u(a) & \alpha _{12}\overline{v}'(a) \\ \alpha _{21}u(a) & \alpha _{22}\overline{v}'(a) \end{matrix} \right\vert +\left\vert \begin{matrix} \alpha _{11}u(a) & \alpha _{13}\overline{v}''(a) \\ \alpha _{21}u(a) & \alpha _{23}\overline{v}''(a) \end{matrix} \right\vert \\ & \quad +\left\vert \begin{matrix} \alpha _{12}u'(a) & \alpha _{11}\overline{v}(a) \\ \alpha _{22}u'(a) & \alpha _{21}\overline{v}(a) \end{matrix} \right\vert +\left\vert \begin{matrix} \alpha _{12}u'(a) & \alpha _{13}\overline{v}''(a) \\ \alpha _{22}u'(a) & \alpha _{23}\overline{v}''(a) \end{matrix} \right\vert \\ & \quad +\left\vert \begin{matrix} \alpha _{13}u''(a) & \alpha _{11}\overline{v}(a) \\ \alpha _{23}u''(a) & \alpha _{21}\overline{v}(a) \end{matrix} \right\vert +\left\vert \begin{matrix} \alpha _{13}u''(a) & \alpha _{12}\overline{v}'(a) \\ \alpha _{23}u''(a) & \alpha _{22}\overline{v}'(a) \end{matrix} \right\vert . \end{align*} Next, we rearrange the determinants such that \begin{align*} M_{1}& =\left\vert \begin{matrix} \alpha _{11} & \alpha _{12} \\ \alpha _{21} & \alpha _{22} \end{matrix} \right\vert \left\vert \begin{matrix} u(a) & \overline{v}(a) \\ u'(a) & \overline{v}'(a) \end{matrix} \right\vert +\left\vert \begin{matrix} \alpha _{11} & \alpha _{13} \\ \alpha _{21} & \alpha _{23} \end{matrix} \right\vert \left\vert \begin{matrix} u(a) & \overline{v}(a) \\ u''(a) & \overline{v}''(a) \end{matrix} \right\vert \\ & \quad +\left\vert \begin{matrix} \alpha _{12} & \alpha _{13} \\ \alpha _{22} & \alpha _{23} \end{matrix} \right\vert \left\vert \begin{matrix} u'(a) & \overline{v}'(a) \\ u''(a) & \overline{v}''(a) \end{matrix} \right\vert . \end{align*} Using \eqref{e2.5} and \eqref{e2.11} we obtain \begin{equation*} M_{1}=c_{12}Y(u,v)+c_{13}Z(u,v). \end{equation*} Similar calculation and assumption \eqref{e2.11} lead us to \begin{equation*} M_{2}=c_{45}X(u,v)+c_{46}T(u,v). \end{equation*} Hence we obtain the equation \begin{equation} -c_{12}Y(u,v)+c_{45}X(u,v)+c_{46}T(u,v)-c_{13}Z(u,v)=0. \label{e2.12} \end{equation} Using \eqref{e2.11} we can also show that \begin{equation*} c_{13}=\frac{\alpha _{23}}{\alpha _{22}}c_{12}\quad \text{and}\quad c_{46}= \frac{\alpha _{26}}{\alpha _{25}}c_{45} \end{equation*} so it follows from \eqref{e2.9}-\eqref{e2.10} that $c_{12},c_{13},c_{45},c_{46}\neq 0$. Let us recall that $(Au,v)_{\mathcal{H} }-(u,Av)_{\mathcal{H}}=l_{1}X(u,v)-l_{2}Y(u,v)+l_{3}Z(u,v)+l_{4}T(u,v)$ and (C1)-(C6) hold by Proposition \ref{prop1}. We can perform the calculation: \begin{equation} (Au,v)_{\mathcal{H}}-(u,Av)_{\mathcal{H}}=l_{1}\big(X(u,v)-\frac{l_{2}}{l_{1} }Y(u,v)\big)+l_{4}\big(\frac{l_{3}}{l_{4}}Z(u,v)+T(u,v)\big). \label{e2.13} \end{equation} By (C1), (C2) we notice that $\frac{l_{2}}{l_{1}}=\frac{c_{12}}{c_{45}}$, respectively $\frac{l_{3}}{l_{4}}=-\frac{c_{13}}{c_{46}}$ so that plugging in \eqref{e2.13} we obtain \begin{align*} & (Au,v)_{\mathcal{H}}-(u,Av)_{\mathcal{H}} \\ & =\frac{l_{1}}{c_{45}}(c_{45}X(u,v)-c_{12}Y(u,v))+\frac{l_{4}}{c_{46}} (-c_{13}Z(u,v)+c_{46}T(u,v)). \end{align*} Applying (C4) and \eqref{e2.12} we obtain $(Au,v)_{\mathcal{H}}-(u,Av)_{ \mathcal{H}}=0$ which completes the proof. \end{proof} Now we give two simple examples of a symmetric operator $A$ with specific coefficients $\mathbf{\alpha }_1\mathbf{,\alpha }_2$ appearing in the boundary conditions \eqref{e2.3}. \begin{example} \label{ex1} \rm Let us consider the case when $p\equiv 1,k\equiv 1,q\equiv 0$ so that $Au=-u''$ on $\mathcal{H}$. Assume that $\mathbf{\alpha }_1=(\alpha_{11},0,0,\alpha_{14},0,0)$, $\mathbf{\alpha }_2=(0,\alpha_{22},\alpha_{23},0,\alpha_{25},\alpha_{26}) \in \mathbb{R}^{6}$, such that $\alpha_{11},\alpha_{14}\neq 0,\alpha _{22}<0$ and $\alpha_{23},\alpha_{25},\alpha_{26}>0$. It is not hard to check that all the hypothesis of Theorem \ref{thm3} hold when \begin{equation*} \delta_2=\frac{\alpha_{26}}{\alpha_{25}}>0,\quad \delta_1=-\frac{\alpha_{23}}{\alpha_{22}}>0, \end{equation*} so the operator $Au=-u''$ with the boundary conditions \begin{gather*} \alpha_{11}u(a)+\alpha_{14}u(b)=0, \\ \alpha_{22}u'(a)+\alpha_{23}u''(a)+\alpha_{25}u'(b)+\alpha _{26}u''(b)=0 \end{gather*} is symmetric on $\mathcal{H}$. \end{example} \begin{example} \label{ex2} \rm Consider the case when $p\equiv 1,k\equiv 1,q\equiv 0,\delta_1=\delta_2=2$; and the vectors $\mathbf{\alpha }_1=(0,1,1,1,1,2)$; $\mathbf{\alpha }_2=(1,1,1,0,-1,-2)$. Then the boundary conditions $\mathcal{R}_ju$ become \begin{gather*} u'(a)+u''(a)+u(b)+u'(b)+2u''(b)=0, \\ u(a)+u'(a)+u''(a)-u'(b)-2u''(b)=0, \end{gather*} and the operator $Au=-u''$ is symmetric on $\mathcal{H}$. \end{example} In Theorem \ref{thm3}, we assumed that \eqref{e2.9}-\eqref{e2.10} hold. In the next theorem we deal with the case when both \eqref{e2.9} and \eqref{e2.10} fail to hold. We remark in this case that equation \eqref{e2.12} cannot be used in the next proof since \begin{equation} c_{12}=c_{13}=c_{45}=c_{46}=0, \label{e2.14} \end{equation} and equation \eqref{e2.12} becomes trivial. \begin{theorem} \label{thm4} Assume that $l_1=p(b)+p'(b)\delta_2,l_2=p(a)-p'(b)\delta_1$ are nonzero. Let $A$ be the operator defined on $\mathcal{D}_2(A)$ via \eqref{e1.1} such that \begin{equation} \mathop{\rm rank} \begin{pmatrix} \alpha_{11} & \alpha_{12} & \alpha_{13} \alpha_{14} & \alpha_{15} & \alpha_{16} \\ \alpha_{21} & \alpha_{22} & \alpha_{23} \alpha_{24} & \alpha_{25} & \alpha_{26} \end{pmatrix} =2, \label{e2.15} \end{equation} but \begin{equation} \mathop{\rm rank} \begin{pmatrix} \alpha_{11} & \alpha_{12} & \alpha_{13} \\ \alpha_{21} & \alpha_{22} & \alpha_{23} \end{pmatrix} =\mathop{\rm rank} \begin{pmatrix} \alpha_{14} & \alpha_{15} & \alpha_{16} \\ \alpha_{24} & \alpha_{25} & \alpha_{26} \end{pmatrix} =1. \label{e2.16} \end{equation} Let us define two sets of equations (see \eqref{e2.8}, for the notation): \begin{gather} c_{2k}l_{3}+c_{3k}l_2=0,\quad \text{for }k\in \{4,5,6\}. \label{ePk} \\ -c_{j5}l_{4}+c_{j6}l_1=0,\text{ for }j\in \{1,2,3\}. \label{ePj'} \end{gather} If both \eqref{ePk} and \ref{ePj'} hold for all $k\in \{4,5,6\}$ and $j\in \{1,2,3\}$, then $A$ is symmetric on $\mathcal{H}$. Conversely, if $A$ is symmetric then \eqref{ePk} and \eqref{ePj'} hold (for those $k$ and $j$, where both $c_{3k}$ and $c_{j6}$ are non-zero). \end{theorem} \begin{proof} Let $u,v\in \mathcal{D}_{2}(A)$. First, we observe that $\mathcal{R}_{j}u=0$ for $j=1,2$ so that we can form the equation $\alpha _{21}\mathcal{R} _{1}u-\alpha _{11}\mathcal{R}_{2}u=0$, that reduces to the equation \begin{equation} c_{12}u'(a)+c_{13}u''(a)+c_{14}u(b)+c_{15}u'(b)+c_{16}u''(b)=0. \label{e2.19} \end{equation} Similarly, $\alpha _{22}\mathcal{R}_{1}u-\alpha _{12}\mathcal{R}_{2}u=0$, or equivalently, \begin{equation} c_{21}u(a)+c_{23}u''(a)+c_{24}u(b)+c_{25}u'(b)+c_{26}u''(b)=0, \label{e2.19'} \end{equation} and $\alpha _{23}\mathcal{R}_{1}u-\alpha _{13}\mathcal{R}_{2}u=0$ becomes \begin{equation} c_{31}u(a)+c_{32}u'(a)+c_{34}u(b)+c_{35}u'(b)+c_{36}u''(b)=0. \label{e2.19''} \end{equation} Using \eqref{e2.16} we notice that \eqref{e2.19}, \eqref{e2.19'} and \eqref{e2.19''} become \begin{equation} c_{14}u(b)+c_{15}u'(b)+c_{16}u''(b)=0, \label{e2.20} \end{equation} \begin{equation} c_{24}u(b)+c_{25}u'(b)+c_{26}u''(b)=0, \label{e2.20'} \end{equation} \begin{equation} c_{34}u(b)+c_{35}u'(b)+c_{36}u''(b)=0. \label{e2.20''} \end{equation} Equations \eqref{e2.20},\eqref{e2.20'} and \eqref{e2.20''} are also satisfied by $\overline{v}(b),\overline{v}'(b),\overline{v}^{\prime \prime }(b)$. Analogously, we form $\alpha _{24}\mathcal{R}_{1}u-\alpha _{14}\mathcal{R} _{2}u=0$, equivalent to \begin{equation} c_{14}u(a)+c_{24}u'(a)+c_{34}u''(a)+c_{54}u^{\prime }(b)+c_{64}u''(b)=0, \label{e2.21} \end{equation} and $\alpha _{25}\mathcal{R}_{1}u-\alpha _{15}\mathcal{R}_{2}u=0$: \begin{equation} c_{15}u(a)+c_{25}u'(a)+c_{35}u^{\prime \prime }(a)+c_{45}u(b)+c_{65}u''(b)=0, \label{e2.21'} \end{equation} The last equation $\alpha _{26}\mathcal{R}_{1}u-\alpha _{16}\mathcal{R} _{2}u=0$ becomes \begin{equation} c_{16}u(a)+c_{26}u'(a)+c_{36}u^{\prime \prime }(a)+c_{46}u(b)+c_{56}u'(b)=0. \label{e2.21''} \end{equation} Again using \eqref{e2.16} we obtain three simplified equations: \begin{equation} c_{14}u(a)+c_{24}u'(a)+c_{34}u''(a)=0, \label{e2.22} \end{equation} \begin{equation} c_{15}u(a)+c_{25}u'(a)+c_{35}u''(a)=0, \label{e2.22'} \end{equation} \begin{equation} c_{16}u(a)+c_{26}u'(a)+c_{36}u''(a)=0. \label{e2.22''} \end{equation} Equations \eqref{e2.22}, \eqref{e2.22'}, \eqref{e2.22''} are also satisfied by $\overline{v}(a),\overline{v}'(a),\overline{v}^{\prime \prime }(a)$. Choosing all equations from \eqref{e2.20}-\eqref{e2.20''} and using \eqref{ePk} for all $k\in \{4,5,6\}$, we have the relations: \begin{equation} c_{1k}u(a)+c_{2k}u'(a)+c_{3k}u''(a)=0 \label{e2.23} \end{equation} \begin{equation*} c_{1k}\overline{v}(a)+c_{2k}\overline{v}'(a)+c_{3k}\overline{v} ''(a)=0 \end{equation*} \begin{equation*} c_{2k}(-l_{3})+c_{3k}(-l_{2})=0, \end{equation*} which we can regard as a system of equations in $c_{1k},c_{2k},c_{3k}$ for all $k\in \{4,5,6\}$. For each fixed $k$, the system above comprises of 3 equations with 3 unknowns and the matrix that gives its solution is \begin{equation*} W= \begin{pmatrix} u(a) & u'(a) & u''(a) \\ \overline{v}(a) & \overline{v}'(a) & \overline{v}''(a) \\ 0 & -l_{3} & -l_{2} \end{pmatrix} . \end{equation*} Now, for all $k\in \{4,5,6\}$, this system also has $9$ equations with $9$ unknowns. The matrix that gives the solution to such an homogeneous system is a $9\times 9$ matrix with $3\times 3$ Jordan blocks (each Jordan block equals $W$) on the main diagonal and zero entries, otherwise. According to \eqref{e2.15} at least one of the unknowns $c_{1k},c_{2k},c_{3k}$ must be different from zero, hence the determinant of the above system must vanish: \begin{equation*} (-l_{2}Y(u,v)+l_{3}Z(u,v))^{3}=W^{3}=\left\vert \begin{matrix} u(a) & u'(a) & u''(a) \\ \overline{v}(a) & \overline{v}'(a) & \overline{v}''(a) \\ 0 & -l_{3} & -l_{2} \end{matrix} \right\vert ^{3}=0. \end{equation*} Analogously, at the right-hand boundary (using a similar argument as with the system above) we obtain: \begin{equation*} (l_{1}X(u,v)+l_{4}T(u,v))^{3}=\left\vert \begin{matrix} u(b) & u'(b) & u''(b) \\ \overline{v}(b) & \overline{v}'(b) & \overline{v}''(b) \\ 0 & -l_{1} & l_{4} \end{matrix} \right\vert ^{3}=0. \end{equation*} It follows from the above arguments and \eqref{e2.6} that $(Au,v)_{\mathcal{H }}=(u,Av)_{\mathcal{H}}$. To prove the converse, we observe from \eqref{e2.6} that $A$ is symmetric if and only if $l_{1}X(u,v)-l_{2}Y(u,v)+l_{3}Z(u,v)+l_{4}T(u,v)=0$, or equivalently, \begin{equation} \left\vert \begin{matrix} \quad l_{1} & -l_{4} \\ T(u,v) & X(u,v) \end{matrix} \right\vert =\left\vert \begin{matrix} \quad l_{2} & \quad l_{3} \\ Z(u,v) & Y(u,v) \end{matrix} \right\vert . \label{e2.24} \end{equation} Now, we shall calculate $T(u,v)$ and $Z(u,v)$, substituting in for $u''(a),\overline{v}''(a)$ from \eqref{e2.22}-\eqref{e2.22''} and $u''(b),\overline{v}''(b)$ from \eqref{e2.20}-\eqref{e2.20''} as follows: \begin{equation*} T(u,v)=\left\vert \begin{matrix} u(b) & \overline{v}(b) \\ -\frac{c_{j4}}{c_{j6}}u(b)-\frac{c_{j5}}{c_{j6}}u'(b) & -\frac{ c_{j4}}{c_{j6}}\overline{v}(b)-\frac{c_{j5}}{c_{j6}}\overline{v}'(b) \end{matrix} \right\vert =-\frac{c_{j5}}{c_{j6}}X(u,v). \end{equation*} Analogously, \begin{equation*} Z(u,v)=-\frac{c_{2k}}{c_{3k}}Y(u,v). \end{equation*} Substituting for the functionals $T(u,v)$ and $Z(u,v)$ into \eqref{e2.24}, we obtain \begin{equation} X(u,v)(l_{1}-\frac{c_{j5}}{c_{j6}}l_{4})=Y(u,v)(l_{2}+\frac{c_{2k}}{c_{3k}} l_{3}), \label{e2.25} \end{equation} that holds for all $u,v\in \mathcal{D}_{2}(A)$. This implies that both $l_{1}-\frac{c_{j5}}{c_{j6}}l_{4}$ and $l_{2}+\frac{c_{2k}}{c_{3k}}l_{3}$ must be zero at those $k$ and $j$ with $c_{j6},c_{3k}\neq 0$. This proves the theorem. \end{proof} In the next theorem, we look at the operator $A$ equipped with separated boundary conditions (or so called general Wentzell boundary conditions) and apply Theorem 4. This problem has been studied by many authors, in particular the authors in the paper \cite{b2} have discovered that $A$ becomes symmetric only when certain weights are used in the definition \eqref{e2.2}. \begin{theorem} \label{thm5} Assume $l_1=p(b)+p'(b)\delta_2$ and $l_2=p(a)-p'(a)\delta_1$ are nonzero. Let $A$ be the operator defined via \eqref{e1.1}, equipped with separated general Wentzell boundary conditions (GWBC) of the form \begin{gather*} \alpha_{11}u(a)+\alpha_{12}u'(a) +\alpha_{13}u''(a)=0, \\ \alpha_{24}u(b)+\alpha_{25}u'(b) +\alpha_{26}u''(b)=0. \end{gather*} Also assume $\alpha_{12}<0,\alpha_{13},\alpha_{25,}\alpha_{26}>0$ and $\alpha_{11},\alpha_{24}\in \mathbb{R}$. Then $A$ is symmetric if the weights are chosen to be \begin{equation} \delta_2=\frac{\alpha_{26}}{\alpha_{25}}>0,\quad \delta_1=-\frac{\alpha_{13}}{\alpha_{12}}>0. \label{e2.26} \end{equation} The converse holds if, in addition, $\alpha_{11},\alpha_{24}\neq 0$. \end{theorem} \begin{proof} Note that in this case $\mathbf{\alpha }_{1}=(\alpha _{11},\alpha _{12},\alpha _{13},0,0,0),\mathbf{\alpha }_{2}=(0,0,0,\alpha _{24},\alpha _{25},\alpha _{26})$, so we are in the case of separated boundary conditions. We want to apply Theorem \ref{thm4}. Our assumption on $\mathbf{\alpha }_{1}$ and $\mathbf{\alpha }_{2}$ implies that \eqref{e2.15} is satisfied. It is not hard to see that \eqref{e2.16} holds for $\mathbf{\alpha }_{1},\mathbf{\alpha }_{2}$. Moreover, we assume that \eqref{ePk} and \eqref{ePj'} hold for all $k\in \{4,5,6\}$ and $j\in \{1,2,3\}$, i.e. \begin{equation} \begin{gathered} c_{2k}l_{3}+c_{3k}l_2=0 \\ -c_{j5}l_{4}+c_{j6}l_1=0, \end{gathered} \label{e2.27} \end{equation} or explicitly \begin{gather*} \alpha _{12}\alpha _{2k}\delta _{1}+\alpha _{13}\alpha _{2k}=0 \\ -\alpha _{1j}\alpha _{25}\delta _{2}+\alpha _{1j}\alpha _{26}=0. \end{gather*} We note that when $\alpha _{12}<0,\alpha _{13},\alpha _{25,}\alpha _{26}>0$ and $\alpha _{11},\alpha _{24}\in \mathbb{R}$, in order for $A$ to be symmetric with respect to $\mathcal{H}$, one has to choose the weights $\delta _{1},\delta _{2}$ as in \eqref{e2.26}. Notice that from the assumptions on the coefficients $\mathbf{\alpha }_{1}\mathbf{,\alpha }_{2}$, we have $c_{35},c_{36},c_{26},c_{36}\neq 0$. On the other hand, $c_{34},c_{16}\neq 0$ if and only if $\alpha _{11},\alpha _{24}\neq 0$. Thus, it follows from Theorem 4 that \eqref{e2.26} is also necessary for the symmetry of $A$ to hold. \end{proof} In Theorems 3 and 4, we assumed that $l_{1}$ and $l_{2}$ are nonzero. Here, we give another theorem that treats the case when $l_{1}$ and $l_{2}$ are both zero. This follows as a consequence from the proof of Theorem 4. \begin{theorem} \label{thm6} Assume $\delta_1=\frac{p(a)}{p'(a)}>0$ and $\delta_2=-\frac{p(b)}{p'(b)}>0$. Let $A$ be the operator defined on $\mathcal{D}_2(A)$ via \eqref{e1.1} and $\mathbf{\alpha }_1$, $\mathbf{\alpha }_2$ are linearly independent such that \eqref{e2.15}-\eqref{e2.16} hold. Let us define two sets of equations: \begin{gather} c_{2k}=0,\quad \text{for }k\in \{4,5,6\}. \label{eQk} \\ c_{j5}=0,\quad \text{for }j\in \{1,2,3\}. \label{eQj'} \end{gather} If both \eqref{eQk} and \eqref{eQj'} hold for all $k\in \{4,5,6\}$ and $j\in \{1,2,3\}$, then $A$ is symmetric on $\mathcal{H}$. Conversely, if $A$ is symmetric then properties \eqref{eQk} and \eqref{eQj'} hold (for those $k$ and $j$, where both $c_{3k}$ and $c_{j6}$ are non-zero). \end{theorem} \begin{proof} First, we note that when $\delta _{1}=\frac{p(a)}{p'(a)}>0$ and $\delta _{2}=-\frac{p(b)}{p'(b)}>0$, this is equivalent to the case when $l_{1}=l_{2}=0$. In this case, \begin{equation} (Au,v)_{\mathcal{H}}-(u,Av)_{\mathcal{H}}=l_{3}Z(u,v)+l_{4}T(u,v). \label{e2.28} \end{equation} We do the same calculations as in Theorem 4. Again, using all equations from \eqref{e2.21}-\eqref{e2.21''} and using \eqref{eQk} for all $k\in \{4,5,6\}$, we have the relations: \begin{equation} \begin{gathered} c_{1k}u(a)+c_{3k}u''(a)=0 \\ c_{1k}\overline{v}(a)+c_{3k}\overline{v}''(a)=0, \end{gathered} \label{e2.29} \end{equation} which we can regard as a system of equations in $c_{1k},c_{3k}$ for all $k\in \{4,5,6\}$. According to \eqref{e2.15} at least one of the unknowns $c_{1k},c_{3k}$ must be different from zero, hence the determinant of the above system must vanish: \begin{equation*} (Z(u,v))^{3}=\left\vert \begin{matrix} u(a) & u''(a) \\ \overline{v}(a) & \overline{v}''(a) \end{matrix} \right\vert ^{3}=0. \end{equation*} Analogously, at the right-hand boundary (using a similar argument as in \eqref{e2.29} we obtain: \begin{equation*} (T(u,v))^{3}=\left\vert \begin{matrix} u(b) & u''(b) \\ \overline{v}(b) & \overline{v}''(b) \end{matrix} \right\vert ^{3}=0. \end{equation*} Apply this in \eqref{e2.28} so that the first part of the proof is complete. To prove the converse, recall from \eqref{e2.25} that \begin{equation} X(u,v)(l_{1}-\frac{c_{j5}}{c_{j6}}l_{4})=Y(u,v)(l_{2}+\frac{c_{2k}}{c_{3k}} l_{3}). \label{e2.30} \end{equation} Since $l_{1}=l_{2}=0$, \eqref{e2.30} becomes \begin{equation*} X(u,v)(\frac{c_{j5}}{c_{j6}}l_{4})=Y(u,v)(\frac{c_{2k}}{c_{3k}}l_{3}), \end{equation*} that holds for all $u,v\in D_{2}(A)$. But this means that $c_{j5}$ and $c_{2k}$ must be zero (at those $k$ and $j$ where $c_{3k},c_{j6}\neq 0$), hence the conclusion. \end{proof} \begin{example} \label{ex3} \rm Let $p(x)=-x^{3}+x+1,k\equiv 1,q\equiv 0$ on the interval $[0,1]$, and $\mathbf{\alpha }_1=(\alpha_{11},0,\alpha_{13},0,0,0)$; $\mathbf{\alpha }_2=(0,0,0,\alpha_{24},0,\alpha_{26})$ in $\mathbb{R}^{6}\backslash \{0\}$. Hence the operator $Au=-((-x^{3}+x+1)u')'$ is equipped with GWBC of the form \begin{gather*} \alpha_{11}u(a)+\alpha_{13}u''(a)=0, \\ \alpha_{24}u(b)+\alpha_{26}u''(b)=0. \end{gather*} Our assumptions on $\mathbf{\alpha }_1$ and $\mathbf{\alpha }_2$ imply that \eqref{e2.15}, \eqref{e2.16} are satisfied. Moreover, it is easy to check that \eqref{eQk} and \eqref{eQj'} hold for all $k\in\{4,5,6\}$ and $j\in \{1,2,3\}$ for our choice of $\mathbf{\alpha }_1$; $\mathbf{\alpha }_2$. Therefore, one can have an operator $A$, equipped with pure Wentzell boundary conditions (that is, $\alpha_{11}=\alpha_{24}=0$, but $\alpha_{13},\alpha_{26}\neq 0$) that is symmetric on $\mathcal{H}$, with respect to the weights \begin{equation*} \delta_1=\frac{p(0)}{p'(0)} =1,\quad \delta_2=-\frac{p(1)}{p'(1)}= \frac{1}{2}. \end{equation*} \end{example} We close this section with a result related to a partial converse of Theorem 3. \begin{theorem} \label{thm7} Assume that $l_1,l_2$ are nonzero. Let $\delta_1,\delta_2>0$ and $A $ be the operator defined on $\mathcal{D}_2(A)$ via (1.1) and $\mathbf{\alpha }_1$, $\mathbf{\alpha }_2$ are linearly independent such that \eqref{e2.9}-\eqref{e2.10} hold. In addition, we assume \begin{equation} c_{24}=c_{34}=c_{15}=c_{16}=0. \label{e2.31} \end{equation} We have the following two cases: \begin{itemize} \item[(i)] If $l_{3}c_{46}=-l_{4}c_{13}$, then the symmetry of $A$ implies that both (C4) and (C5) hold. \item[(ii)] If $l_{4}c_{j1}c_{3k}=l_{3}c_{4k}c_{j6}$ for some $j\in\{2,3\}$ and $k\in \{5,6\}$, where $c_{j6},c_{3k}\neq 0$, then the symmetry of $A$ implies that both \begin{equation*} l_1c_{j6}=l_{4}c_{j1} \quad\text{and}\quad l_2c_{3k}=-l_{3}c_{2k} \end{equation*} hold. \end{itemize} \end{theorem} \begin{proof} Note that \eqref{e2.31} implies $c_{23}=c_{56}=0$, as well. Also, $l_{3}c_{46}=-l_{4}c_{13}$ is the condition (C2). First, we prove $(i)$. Recall that, for $A$ to be symmetric, is equivalent to \eqref{e2.34}. Since by \eqref{e2.9}-\eqref{e2.10}, \eqref{e2.16}, we have $c_{13},c_{46}\neq 0$, substituting for $u''(a)$ and $\overline{v}''(a)$ (respectively, $u''(b)$ and $\overline{v}''(b)$) from \eqref{e2.19}, (respectively, from \eqref{e2.21}) into $T(u,v)$, respectively $Z(u,v)$, we obtain \begin{equation} T(u,v)=-\frac{c_{54}}{c_{64}}X(u,v)-\frac{c_{14}}{c_{64}}\left\vert \begin{matrix} u(b) & \overline{v}(b) \\ u(a) & \overline{v}(a) \end{matrix} \right\vert \label{e2.32} \end{equation} and \begin{equation} Z(u,v)=-\frac{c_{12}}{c_{13}}Y(u,v)-\frac{c_{14}}{c_{13}}\left\vert \begin{matrix} u(a) & \overline{v}(a) \\ u(b) & \overline{v}(b) \end{matrix} \right\vert . \label{e2.33} \end{equation} Substituting \eqref{e2.32}-\eqref{e2.33} in \eqref{e2.24}, we obtain \begin{equation} \begin{aligned} &X(u,v)\big(l_1-\frac{c_{54}}{c_{64}}l_{4}\big)\\ &=Y(u,v)\big(l_2+\frac{c_{12}}{c_{13}}l_{3}\big) -\big(l_{4}\frac{c_{14}}{c_{64}}-l_{3}\frac{c_{14}}{c_{13}}\big)\left| \begin{matrix} u(b)& \overline{v}(b)\\ u(a)& \overline{v}(a) \end{matrix}\right| . \end{aligned} \label{e2.34} \end{equation} Using (C2), we get $l_{4}\frac{c_{14}}{c_{64}}-l_{3}\frac{c_{14}}{c_{13}} \equiv 0$ and \eqref{e2.34} holds for all $u,v\in \mathcal{D}_{2}(A)$. But this implies that both $l_{1}-\frac{c_{54}}{c_{64}}l_{4}$ and $l_{2}+\frac{ c_{12}}{c_{13}}l_{3}$ must be zero, hence the conclusion of the theorem. To prove $(ii)$, we use the equations (see \eqref{e2.19'}-\eqref{e2.19''}) \begin{equation} u''(b)=-\frac{c_{j1}}{c_{j6}}u(a)-\frac{c_{j4}}{c_{j6}}u(b)- \frac{c_{j5}}{c_{j6}}u'(b), \label{e2.35} \end{equation} (for $j=\overline{2,3}$) and \begin{equation} u''(a)=-\frac{c_{1k}}{c_{3k}}u(a)-\frac{c_{4k}}{c_{3k}}u(b)- \frac{c_{2k}}{c_{3k}}u'(a), \label{e2.36} \end{equation} for $k=\overline{5,6}$ (see equations \eqref{e2.21'}-\eqref{e2.21''}). The equations \eqref{e2.35}-\eqref{e2.36} hold for $\overline{v}$ as well. Using \eqref{e2.35} and \eqref{e2.36}, we can simplify $T(u,v)$ and $Z(u,v)$ as follows: \begin{equation} T(u,v)=-\frac{c_{j1}}{c_{j6}}\left\vert \begin{matrix} u(b) & \overline{v}(b) \\ u(a) & \overline{v}(a) \end{matrix} \right\vert -\frac{c_{j5}}{c_{j6}}X(u,v), \label{e2.37} \end{equation} and, similarly, \begin{equation} Z(u,v)=-\frac{c_{4k}}{c_{3k}}\left\vert \begin{matrix} u(a) & \overline{v}(a) \\ u(b) & \overline{v}(b) \end{matrix} \right\vert -\frac{c_{2k}}{c_{3k}}Y(u,v). \label{e2.38} \end{equation} Substitute for $Z$ and $T$, from \eqref{e2.37}-\eqref{e2.38} into \eqref{e2.24} to obtain \begin{equation} \begin{aligned} &\big(l_1-l_{4}\frac{c_{j1}}{c_{j6}}\big)X(u,v)\\ &=\big( l_2+l_{3}\frac{c_{2k}}{c_{3k}}\big)Y(u,v) +\big(l_{4} \frac{c_{j1}}{c_{j6}}-l_{3}\frac{c_{4k}}{c_{3k}}\big) \left|\begin{matrix} u(b)& \overline{v}(b)\\ u(a)& \overline{v}(a) \end{matrix} \right| , \end{aligned} \label{e2.39} \end{equation} that holds for all $u,\overline{v}\in \mathcal{D}_{2}(A)$. But $l_{4}\frac{ c_{j1}}{c_{j6}}-l_{3}\frac{c_{4k}}{c_{3k}}=0$, by assumption, therefore, we get the assertion of the theorem. \end{proof} \begin{remark} \label{rmk3} \rm Note that in Theorem \ref{thm7}, if one assumes that both the conditions (C4) and (C5) hold, then the validity of (C2) follows automatically from that of \eqref{e2.34}. Also, in the second case $(ii)$, if $l_1c_{j6}=l_{4}c_{j1}$ and $l_2c_{3k}=-l_{3}c_{2k}$ hold (for some $k,j$), then $l_{4}c_{j1}c_{3k}=l_{3}c_{4k}c_{j6}$ follows automatically from that of (2.39). \end{remark} \section{Quasi-accretive and semi-bounded operators} Hellwig \cite{h1} proved that the operator $A$ given by \eqref{e1.1} with domain $D_1(A)$ is not only symmetric under certain necessary and sufficient conditions (see Theorem \ref{thm2}), but also is bounded from below, that is, there exists a $\gamma \in \mathbb{R}$ such that $(Au,u)_{H}\geq \gamma \|u\|_{H}^2$. This has as a consequence the fact that all the eigenvalues $\lambda $ of the Sturm-Liouville eigenvalue problem are real and satisfy $\lambda \geq \gamma $. Our goal in this section is to look for those operators $A$ (on $D_2(A)$) that are semi-bounded and, more generally, quasi-accretive, that is, $(A-\gamma I)$ is accretive for some $\gamma \in \mathbb{R}$. That is, $\mathop{\rm Re}((A-\gamma I)u,u)_{\mathcal{H}}\geq 0$ for all $u\in D_2(A)$. Moreover, following the proof of \cite[Theorems 2 and 3]{h1}, one can prove the range condition, that is, $R(A-\mu I)=C[a,b]$, where $\mu \in \rho (\overline{A})$, the resolvent set of $A$. We consider the case of the Hilbert space given by \eqref{e2.1} and the inner product \eqref{e2.2}, when $\delta_1,\delta_2>0$. We start by computing the inner product $(Au,u)_{\mathcal{H}}$ as follows: \begin{equation} \begin{aligned} (Au,u)_{\mathcal{H}} &=\int_{a}^{b}Au\overline{u}k(x)dx+Au(a)\overline{u}(a)k(a) \delta_1+Au(b)\overline{u}(b)k(b) \delta_2 \\ &= p(a)u'(a)\overline{u}(a) -p(b)u'(b)\overline{u}(b)\\ &\quad +\int_{a}^{b}p(x)\vert u'( x)\vert ^2dx+\int_{a}^{b}q(x)\vert u(x)\vert ^2dx \\ &\quad +\delta_1\overline{u}(a)(-p'( a)u'(a)-p(a)u''(a)+q(a)u(a)) \\ &\quad +\delta_2\overline{u}(b)(-p'( b)u'(b)-p(b)u''(b)+q(b)u(b)). \end{aligned} \label{e3.1} \end{equation} Rearranging the factors, we obtain a much simpler expression, \begin{align*} (Au,u)_{\mathcal{H}} &=[-l_{4}u''(b)-l_1u'(b)+q(b)\delta _2u(b)]\overline{u} (b) \\ &\quad +[-l_{3}u''(a)+l_2u'(a)+q(a)\delta_1u(a)] \overline{u}(a) \\ &\quad +\int_{a}^{b}p(x)\vert u'(x)\vert ^2dx+\int_{a}^{b}q( x)\vert u(x)\vert ^2dx, \end{align*} where $l_1,l_2,l_{3},l_{4}$ depend only on the data of the problem and they are given by \eqref{e2.7}. Let us choose \begin{equation} \sigma =\underset{x\in [a,b]}{\min }\frac{q(x)}{ k(x)}. \label{e3.2} \end{equation} Then it is not hard to see that we can transform $\eqref{e3.1}$ into an inequality \begin{equation} \begin{aligned} \mathop{\rm Re}(Au,u)_{\mathcal{H}} &\geq \mathop{\rm Re}([-l_{4}u''(b)-l_1u'( b)]\overline{u}(b))\\ &\quad +\mathop{\rm Re}([-l_{3}u''(a) +l_2u'(a)]\overline{u}(a) ) +\sigma \| u\|_{\mathcal{H}}^2. \end{aligned} \label{e3.3} \end{equation} \begin{theorem} \label{thm8} Let $A$ be the operator defined on $\mathcal{D}_2(A)$ via \eqref{e1.1} and $\mathbf{\alpha }_1$,$\mathbf{\alpha }_2$ are linearly independent such that \eqref{e2.15}-\eqref{e2.16} holds and $c_{3k},c_{j6}$ are nonzero. If \eqref{ePk} and \eqref{ePj'} (see Theorem 4) hold for some $k\in \{5,6\}$ and $j\in \{2,3\}$, then the operator $A$ is quasi-accretive. \end{theorem} \begin{proof} First, we discuss the case when $l_{1},l_{2}$ are nonzero. We perform the same calculation as in that of Theorem 4. We obtain the following equations: \begin{gather} c_{j4}u(b)+c_{j5}u'(b)+c_{j6}u''(b)=0, \label{e3.4} \\ c_{1k}u(a)+c_{2k}u'(a)+c_{3k}u''(a)=0, \label{e3.5} \end{gather} for $k\in \{4,5,6\}$ and $j\in \{1,2,3\}$. By assumption, $c_{3k},c_{j6}$ ($j\in \{2,3\}$) are nonzero and the boundary conditions involve second-order terms and $\delta _{i}>0$ $(i=1,2)$. We divide \eqref{e3.4} by $c_{j6}$ and \eqref{e3.5} by $c_{3k}$ and obtain equivalent equations: \begin{gather} \widehat{c_{j4}}u(b)+\widehat{c_{j5}}u'(b)=-u''(b), \label{e3.6} \\ \widehat{c_{1k}}u(a)+\widehat{c_{2k}}u'(a)=-u''(a), \label{e3.7} \end{gather} where $\widehat{c_{jk}}=\frac{c_{jk}}{c_{j6}}$ (for $k\in \{4,5,6\}$ and fixed $j$) and $\widehat{c_{jk}}=\frac{c_{jk}}{c_{3k}}$ (for $j\in \{1,2,3\}$ and fixed $k$). We substitute $u''(a)$, $u''(b)$ in \eqref{e3.3}, using the equations \eqref{e3.6}-\eqref{e3.7} to obtain: \begin{equation} \begin{aligned} \mathop{\rm Re}(Au,u)_{\mathcal{H}} &\geq p(b)\widehat{ c_{j4}}\left\vert u(b)\right\vert ^2\delta_2+\mathop{\rm Re} (l_{4}\widehat{c_{j5}}-l_1)u'(b) \overline{u}(b) \\ &\quad +p(a)\widehat{c_{1k}}\left\vert u(a)\right\vert ^2\delta_1+\mathop{\rm Re}(l_2+l_{3}\widehat{c_{2k}}) u'(a)\overline{u}(a)+\sigma \left\Vert u\right\Vert_{\mathcal{H}}^2. \end{aligned} \label{e3.8} \end{equation} Let us choose \begin{equation} \gamma_1=\min \{\sigma ,p(b)\widehat{c_{j4}},p( a)\widehat{c_{1k}}\}\in \mathbf{R.} \label{e3.9} \end{equation} Then, by \eqref{e3.8} we get the inequality \begin{equation} \mathop{\rm Re}(Au,u)_{\mathcal{H}}\geq \mathop{\rm Re}(l_{4}\widehat{ c_{j5} }-l_1)u'(b)\overline{u}( b)+\mathop{\rm Re}(l_2+l_{3}\widehat{c_{2k}} )u'(a)\overline{u}(a)+\gamma_1\left\Vert u\right\Vert_{\mathcal{H} }^2. \label{e3.10} \end{equation} But, \eqref{ePk} and \eqref{ePj'} (for some $k,j$) imply \begin{equation*} l_2+l_{3}\widehat{c_{2k}}=0,\quad l_{4}\widehat{c_{j5}}-l_1=0. \end{equation*} This shows that $A-\gamma_1I$ is accretive for some $\gamma_1\in \mathbb{R}$ that is given by $\eqref{e3.9}$. The case when $l_1,l_2 $ are both zero, is done similarly, observing that the condition \eqref{ePk} (respectively, \eqref{ePj'}) is equivalent to \eqref{eQk} (respectively, \eqref{eQj'}), that is, $\widehat{c_{2k}}\equiv 0$ (respectively, $\widehat{c_{j5}}\equiv 0 $). We use this in \eqref{e3.10} again to get the assertion. \end{proof} Having this result, we notice that the Theorem 4 provides us, actually, with an improved result, that is, all operators given by \eqref{e1.1}, equipped with boundary conditions for which the vectors $\mathbf{\alpha }_{1}$, $\mathbf{\alpha }_{2}$ satisfy \eqref{e2.15}-\eqref{e2.16}, are semi-bounded. We state this now. \begin{corollary} \label{coro9} Suppose that the assumptions of Theorem 4 hold and, in addition, the conditions \eqref{ePk} and \eqref{ePj'} hold and $c_{3k},c_{j6}$ are nonzero. Then $A$ is bounded from below. \end{corollary} The proof of the above corollary follows easily from Theorems 4 and 8. \begin{example} \label{ex4} \rm We consider the operator $A$ as in Theorem \ref{thm5}. It can be checked directly that $A$ is semi-bounded and, in fact, $-\overline{A}$ generates a $C_0$ selfadjoint semigroup on $\mathcal{H}$ (see \cite{b2}). \end{example} Now, let us denote the functional \begin{equation} E(u)=\big[-l_{4}u''(b) -l_1u'(b)\big]\overline{u}(b)+ \big[-l_{3}u''(a)+l_2u'( a)\big]\overline{u}(a). \label{e3.11} \end{equation} \begin{theorem} \label{thm10} Assume $\delta_1,\delta_2>0$. Let $A$ be the operator defined on $D_2(A)$ via \eqref{e1.1} and $\mathbf{\alpha }_1$, $\mathbf{\alpha }_2$ be linearly independent such that \begin{equation} \mathop{\rm rank} \begin{pmatrix} \alpha_{11} & \alpha_{12} & \alpha_{13} \\ \alpha_{21} & \alpha_{22} & \alpha_{23} \end{pmatrix} =\mathop{\rm rank} \begin{pmatrix} \alpha_{14} & \alpha_{15} & \alpha_{16} \\ \alpha_{24} & \alpha_{25} & \alpha_{26} \end{pmatrix} =2 \label{e3.12} \end{equation} and \begin{equation} c_{24}=c_{34}=c_{15}=c_{16}=0. \label{e3.13} \end{equation} If \begin{itemize} \item[(C4)] $l_{4}c_{45}=l_1c_{46}$, \item[(C5)] $l_{3}c_{12}=-l_2c_{13}$, \item[(C6)] $l_{4}c_{13}=l_{3}c_{46}$. \end{itemize} %\label{e3.14} hold, then $A-\sigma I$ is accretive with $\sigma $ given by \eqref{e3.2}. \end{theorem} \begin{proof} Note that the first two conditions (C4, (C5) are the same as (C1), (C2). We recall the equation \eqref{e2.19} (respectively, \eqref{e2.21}) is \begin{equation} c_{12}u'(a)+c_{13}u''(a)+c_{14}u(b)+c_{15}u^{\prime }(b)+c_{16}u''(b)=0, \label{e3.14} \end{equation} \begin{equation} c_{14}u(a)+c_{24}u'(a)+c_{34}u''(a)+c_{54}u^{\prime }(b)+c_{64}u''(b)=0, \label{e3.15} \end{equation} respectively. Recall that by \eqref{e3.3} and \eqref{e3.11}, we have the inequality \begin{equation*} \mathop{\rm Re}(Au,u)_{\mathcal{H}}\geq \mathop{\rm Re}E(u)+\sigma \Vert u\Vert _{\mathcal{H}}^{2}. \end{equation*} Note that \eqref{e3.13} implies that $c_{23}=c_{56}=0$, as well. By \eqref{e3.12} and \eqref{e3.13}, it follows that all $c_{13},c_{12},c_{45},c_{46}$ are nonzero. We divide \eqref{e3.14} (respectively, \eqref{e3.15}) by $c_{13}$ (respectively, $c_{64}$). We use our assumption \eqref{e3.12} and rewriting the equations \eqref{e3.14}- \eqref{e3.15}, we obtain \begin{equation} -u''(a)=\frac{c_{12}}{c_{13}}u'(a)+\frac{c_{14}}{ c_{13}}u(b), \label{e3.16} \end{equation} \begin{equation} -u''(b)=\frac{c_{14}}{c_{64}}u(a)+\frac{c_{54}}{c_{64}} u'(b). \label{e3.17} \end{equation} We substitute \eqref{e3.16} ,\eqref{e3.17} in \eqref{e3.11} to get \begin{align*} E(u)& =[l_{4}(\frac{c_{14}}{c_{64}}u(a)+\frac{c_{54}}{c_{64}}u^{\prime }(b))-l_{1}u'(b)]\overline{u}(b) \\ & \quad +[l_{3}(\frac{c_{12}}{c_{13}}u'(a)+\frac{c_{14}}{c_{13}} u(b))+l_{2}u'(a)]\overline{u}(a) \\ & =l_{4}\frac{c_{14}}{c_{64}}u(a)\overline{u}(b)+(l_{4}\frac{c_{54}}{c_{64}} -l_{1})u'(b)\overline{u}(b) \\ & \quad +l_{3}\frac{c_{14}}{c_{13}}u(b)\overline{u}(a)+(l_{3}\frac{c_{12}}{ c_{13}}+l_{2})u'(a)\overline{u}(a). \end{align*} We observe that $l_{4}\frac{c_{54}}{c_{64}}-l_{1}=0$ and $l_{3}\frac{c_{12}}{ c_{13}}+l_{2}=0$ by assumption so that \begin{equation*} \mathop{\rm Re}E(u)=\mathop{\rm Re}\big(l_{4}\frac{c_{14}}{c_{64}}u(a) \overline{u}(b)+l_{3}\frac{c_{14}}{c_{13}}u(b)\overline{u}(a)\big). \end{equation*} But $l_{4}c_{13}=l_{3}c_{46}$ is equivalent to $l_{4}\frac{c_{14}}{c_{64}} =-l_{3}\frac{c_{14}}{c_{13}}$. Therefore, \begin{equation*} \mathop{\rm Re}E(u)=l_{4}\frac{c_{14}}{c_{64}}\mathop{\rm Re}\big(u(a) \overline{u}(b)-u(b)\overline{u}(a)\big)\equiv 0. \end{equation*} The theorem is proved. \end{proof} Now, we close this section with an example, that shows that there are operators (defined on $D_2(A)$), equipped with nonseparated boundary conditions, that are accretive, but not symmetric. \begin{example} \label{ex5} \rm Let the operator $Au=-u''$ be equipped with boundary conditions \begin{gather*} \alpha_{11}u(a)+\alpha_{15}u'(b) +\alpha_{16}u''(b)=0, \\ \alpha_{22}u'(a)+\alpha_{23}u''(a)+\alpha_{24}u(b)=0. \end{gather*} Noe that $\mathbf{\alpha }_1=(\alpha_{11},0,0,0,\alpha _{15},\alpha_{16})$, $\mathbf{\alpha }_2=(0,\alpha_{22},\alpha_{23}, \alpha_{24},0,0)\in \mathbb{R}^{6}\setminus \{0\}$. We assume $\alpha_{22}<0$, $\alpha_{15},\alpha _{16},\alpha_{23}>0$, $\alpha_{11},\alpha_{24}\neq 0$ and \begin{equation} \alpha_{11}\alpha_{22}=\alpha_{15}\alpha_{24}. \label{e3.18} \end{equation} Then conditions $\eqref{e3.12}$, (C4), (C5), (C6) are satisfied for \begin{equation} \delta_1=-\frac{\alpha_{23}}{\alpha_{22}}>0,\quad \delta_2=\frac{\alpha_{16}}{\alpha_{15}}>0. \label{e3.19} \end{equation} Therefore, $A$ is accretive, so that $-\overline{A}$ generates a $( C_{0})$ contraction semigroup on $\mathcal{H}$. On the other hand, (C4) and (C5) are satisfied for this choice of $\delta_1,\delta_2$ in \eqref{e3.19}, so it follows from Remark \ref{rmk3} that if $A$ were symmetric, then (C2) would have to hold as well, that is, $l_{3}c_{46}=-l_{4}c_{13}$, that is equivalent to \begin{equation} -\alpha_{15}\alpha_{24}=\alpha_{11}\alpha_{22}. \label{e3.20} \end{equation} Adding (3.20) to \eqref{e3.18}, we get the contradiction because of the choice of $\mathbf{\alpha }_1=(\alpha_{11},0,0,0,\alpha_{15},\alpha_{16})$, $\mathbf{\alpha }_2=(0,\alpha_{22},\alpha_{23},\alpha_{24},0,0)$ in $\mathbb{R}^{6}\setminus \{0\}$. In conclusion, we have an example of an operator equipped with nonseparated boundary conditions that is accretive, but not symmetric. \end{example} \subsection*{Acknowledgments} The author would like to express his gratitude to Professors Jerome and Gisele Goldstein, for their attention to this paper and the valuable suggestions given. \begin{thebibliography}{99} \bibitem{b1} Binding, P. A., Browne, P. J. and Watson, B. A., \emph{Spectral problems for non-linear Sturm-Liouville equations with eigenparameter dependent boundary conditions}, Canad. J. Math. 52 (2000), 248-264. \bibitem{b2} Binding, P. A., Browne, P. J., \emph{Sturm-Liouville problems with non-separated eigenvalue dependent boundary conditions}, Proc. Royal Soc. Edin. 130A (2000), 239-247. \bibitem{b3} Binding, P. A., Browne, P. J. and Seddighi, K., \emph{ Sturm-Liouville problems with eigenparameter dependent boundary conditions}, Proc. Edin. Math. Soc. 37 (1993), 52-72, $(1993)$. \bibitem{d1} Dijksma, A., \emph{Eigenfuction expansions for a class of $J$ -self-adjoint ordinary differential operator with boundary conditions containing the eigenvalue parameter}, Proc. Royal Soc. Edin. 86A (1980), 1-27. \bibitem{e1} Etkin, A., \emph{On an abstract boundary-value problem with the eigenvalue parameter in the boundary conditions}, Fields Institute Communications, vol. 25 (2000). \bibitem{f1} Favini, A., Goldstein, G. R., Goldstein, J. A. and Romanelli, S., \emph{The heat equation with generalized Wentzell boundary condition}, J. Evol. Equations 2 (2002), 1-19. \bibitem{g1} Goldstein, G. R., \emph{Derivation and physical interpretation of general boundary conditions}, preprint. \bibitem{h1} Hellwig, G., \emph{Differential Operators of Mathematical Physics}, Addison-Wesley $(1964)$. \bibitem{r1} Russakovskii, E. M., \emph{The matrix Sturm-Liouville problem with spectral parameter in the boundary conditions}. Algebraic and operator aspects, Trans. Moscow Math. Soc., 159-184, (1996). \bibitem{r2} Russakovskii, E. M., \emph{Operator treatment of boundary problems with spectral paramater entering via polynomials in the boundary condition}, Funktsional'nyi Analiza i Ego Prilozheniya, vol. 9 (1975), 91-92. \end{thebibliography} \end{document}