\documentclass[11pt]{article}

% Packages

% Packages langues et encodage
\usepackage[english]{babel}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{csquotes}

% Packages maths
\usepackage{amsfonts}
\usepackage{mathtools}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage[g]{esvect}
\usepackage[mathscr]{eucal}
\usepackage{stmaryrd}
\usepackage{subfig}
\usepackage[all]{xy}

% Packages mise en page
\usepackage{fancyhdr}
\usepackage[top=3cm, left=3cm, right=3cm, bottom=2cm]{geometry}
\usepackage[pdftex,pdfborder={0 0 0}]{hyperref}

% Packages graphiques
\usepackage{pgf,tikz}
\usetikzlibrary{arrows}

% Packages autres
\usepackage{color}
\usepackage{enumerate}
\usepackage{multicol}
\usepackage{pdfpages}
\usepackage{soul}


% Definition des environnements
\theoremstyle{plain}
	\newtheorem*{thm}{Theorem}
	\newtheorem*{cor}{Corollary}
	\newtheorem*{lem}{Lemma}
	\newtheorem*{prop}{Proposition}

\theoremstyle{definition}
	\newtheorem*{dfn}{Definition}
	\newtheorem*{ntn}{Notation}
	\newtheorem*{dfns}{Definitions}
	\newtheorem*{ntns}{Notations}
	\newtheorem*{rem}{Remark}
	\newtheorem*{rems}{Remarks}
	\newtheorem*{ex}{Example}
	\newtheorem*{cex}{Counter example}
	\newtheorem*{exs}{Examples}
	\newtheorem*{cexs}{Counter-examples}
	\newtheorem{exo}{Exercise}


% Definiition des commandes
\newcommand{\B}{\mathbb{B}}
\newcommand{\C}{\mathbb{C}}
\newcommand{\D}{\mathbb{D}}
\newcommand{\F}{\mathbb{F}}
\newcommand{\K}{\mathbb{K}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\Q}{\mathbb{Q}}
\newcommand{\R}{\mathbb{R}}
\newcommand{\T}{\mathbb{T}}
\newcommand{\Z}{\mathbb{Z}}

\renewcommand{\H}{\mathbb{H}}
\renewcommand{\P}{\mathbb{P}}
\renewcommand{\S}{\mathbb{S}}

\newcommand{\acts}{\curvearrowright}
\newcommand{\dx}{\dmesure\!}
\newcommand{\deron}[2]{\frac{\partial #1}{\partial #2}}
\newcommand{\derond}[2]{\frac{\partial^2 #1}{\partial #2 ^2}}
\newcommand{\deronc}[3]{\frac{\partial^2 #1}{\partial #2 \partial #3}}
\newcommand{\grad}{\vv{\gradient}}
\newcommand{\lint}{\llbracket}
\newcommand{\rint}{\rrbracket}
\newcommand{\leqclosed}{\trianglelefteqslant}
\newcommand{\mvert}{\mathrel{}\middle|\mathrel{}}
\newcommand{\norm}[1]{\left\lvert #1 \right\rvert}
\newcommand{\Norm}[1]{\left\lVert #1 \right\rVert}
\newcommand{\prsc}[2]{\left\langle #1\,, #2 \right\rangle}\newcommand{\rmes}[1]{\norm{\dmesure\! V_{#1}}}

\renewcommand{\bar}{\overline}
\renewcommand{\epsilon}{\varepsilon}
\renewcommand{\geq}{\geqslant}
\renewcommand{\leq}{\leqslant}
\renewcommand{\tilde}{\widetilde}
\renewcommand{\triangleleft}{\vartriangleleft}

%\renewcommand{\FrenchLabelItem}{\textbullet}


% Definition des operateurs
\DeclareMathOperator{\aff}{Aff}
\DeclareMathOperator{\aut}{Aut}
\DeclareMathOperator{\bij}{Bij}
\DeclareMathOperator{\card}{Card}
\DeclareMathOperator{\dmesure}{d}
\DeclareMathOperator{\End}{End}
\DeclareMathOperator{\fix}{Fix}
\DeclareMathOperator{\id}{id}
\DeclareMathOperator{\Id}{Id}
\DeclareMathOperator{\im}{Im}
\DeclareMathOperator{\stab}{Stab}
\DeclareMathOperator{\Sym}{Sym}
\DeclareMathOperator{\tr}{Tr}
\DeclareMathOperator{\vect}{Span}

\setlength{\parindent}{0pt}
\setlength{\parskip}{5pt}
\setlength{\headsep}{10pt}
\setlength{\headheight}{16pt}

\renewcommand{\headrulewidth}{0.5pt}
\renewcommand{\footrulewidth}{0pt}

\begin{document}

\thispagestyle{fancy}
\lhead{Differential Geometry 2017--2018}
\rhead{ENS de Lyon}
\begin{center}
\Large{Geometric meaning of connections (Solution)}
\end{center}
\vspace{-7mm}
\rule{\linewidth}{0.5pt}

\begin{enumerate}
\item Let $x \in M$ and $y \in E_x$, we denote by $\Pi_y$ the projection onto $V_y$ along $H_y$ in $T_yE$. For any $s \in \Gamma(E)$, we define $\forall x \in M$, $\nabla_xs := \Pi_{s(x)}(d_xs)$. We have $\nabla_xs : T_xM \to V_y \simeq E_x$, hence $\nabla s \in \Gamma(T^*M \otimes E)$ and $\nabla : \Gamma(E) \to \Gamma(T^*M \otimes E)$. Let us check that $\nabla$ is a connection.

\paragraph*{Homogeneity.}
Let $\lambda \in \R$ and $s \in \Gamma(E)$. Let $x \in M$, we denote $y=s(x)$, we have $d_xs = \nabla_xs + (d_xs - \nabla_xs)$ with $\nabla_xs$ taking values in $V_y$ and $(d_xs - \nabla_xs)$ taking values in $H_y$. We have 
\[d_x(\lambda s) = d_x(M_\lambda \circ s) = d_yM_\lambda \circ d_xs = d_yM_\lambda(\nabla_xs) + d_yM_\lambda(d_xs - \nabla_xs).\]

We assumed that $d_yM_\lambda(H_y) = H_{\lambda y}$, hence $d_yM_\lambda(d_xs - \nabla_xs)$ takes values in $H_{\lambda y}$. Besides, $p \circ M_\lambda = p$, hence $d_{\lambda y}p \circ d_yM_\lambda = d_yp$, and  $d_yM_\lambda(V_y) = d_yM_\lambda(\ker d_yp) \subset \ker d_{\lambda y}p = V_{\lambda y}$. In fact $d_{\lambda y}p \circ d_yM_\lambda =  V_{\lambda y}$ by a dimension argument. Thus $d_yM_\lambda(\nabla_xs)$ takes values in $V_{\lambda y}$ and we have $\nabla_x(\lambda s) = d_yM_\lambda(\nabla_xs)$.

Recall that $V_y \simeq E_x \simeq V_{\lambda y}$ canonically, and under this identification $d_yM_\lambda$ is the multiplication by $\lambda$ in $E_x$. Thus $\nabla_x(\lambda s) = \lambda\nabla_xs$.

\paragraph*{Additivity.}
Now, let $s_1,s_2 \in \Gamma(E)$, let $x \in M$, we denote $y_1=s_1(x)$ and $y_2=s_2(x)$. We have:
\begin{align*}
&d_x(s_1+s_2) = d_x(A\circ(s_1,s_2)) = d_{(y_1,y_2)}A\circ d_x(s_1,s_2)\\
\intertext{and}
&d_x(s_1,s_2) =(d_xs_1,d_xs_2) = (\nabla_xs_1,\nabla_xs_2) + (d_xs_1 - \nabla_xs_1, d_xs_2 - \nabla_xs_2).
\end{align*}
On the one hand, $(\nabla_xs_1,\nabla_xs_2)$ takes values in $\left(V_{y_1}\times V_{y_2}\right) \subset T_{(y_1,y_2)}\Delta^*(E \times E)$. Note that:
\[T_{(y_1,y_2)}\Delta^*(E \times E) = \left\{(v_1,v_2) \in T_{y_1}E \times T_{y_2}E \mvert d_{y_1}p\cdot v_1 = d_{y_2}p\cdot v_2\right\}.\]
Since, $\Delta \circ p \circ A = (p,p): \Delta^*(E\times E) \to M\times M$ and $\Delta$ is an immersion, we have $d_{(y_1,y_2)}A(\ker (d_{y_1}p,d_{y_2}p)) \subset \ker d_{A(y_1,y_2)}p$. Thus $d_{(y_1,y_2)}A(V_{y_1}\times V_{y_2})$ is a subspace of $V_{y_1+y_2}$ and $d_{(y_1,y_2)}A \circ (\nabla_xs_1,\nabla_xs_2)$ takes values in $V_{y_1+y_2}$.

On the other hand, $(d_xs_1 - \nabla_xs_1, d_xs_2 - \nabla_xs_2)$ takes values in $H_{y_1}+H_{y_2}$. Moreover,
\[ d_{y_i}p\circ(d_xs_i - \nabla_xs_i) = d_{y_i}p\circ d_xs_i = d_x (p\circ s_i)=\Id,\]
which means that the image of $(d_xs_1 - \nabla_xs_1, d_xs_2 - \nabla_xs_2)$ is in $T_{(y_1,y_2)}\Delta^*\left(E\times E\right)$. Since $H$ is linear, the image of $d_{(y_1,y_2)}A \circ (d_xs_1 - \nabla_xs_1, d_xs_2 - \nabla_xs_2)$ is included in $H_{y_1 + y_2}$.

Finally, we get that:
\begin{align*}
\nabla_x(s_1+s_2) &= \Pi_{y_1+y_2} \circ \left( d_{(y_1,y_2)}A \circ (\nabla_xs_1,\nabla_xs_2) + d_{(y_1,y_2)}A \circ (d_xs_1 - \nabla_xs_1, d_xs_2 - \nabla_xs_2)\right)\\
&= d_{(y_1,y_2)}A \circ (\nabla_xs_1,\nabla_xs_2).
\end{align*}
Under the canonical identifications $V_{y_1+y_2} \simeq E_x$ and $V_{y_1} \simeq E_x \simeq V_{y_2}$, $d_{(y_1,y_2)}A$ reads as the addition of $E_x$. Hence, $\nabla_x(s_1+s_2) = \nabla_xs_1 + \nabla_xs_2$ and $\nabla$ is linear.

\paragraph*{Leibniz's rule.}
Let $s \in \Gamma(E)$ and $f \in \mathcal{C}^\infty(M)$. Let $x \in M$, we denote $y = s(x)$ and $\lambda=f(x)$. Let $\varphi_{\vert U}:E_{\vert U} \to U \times \R^r$ be a local trivialization of $E$ on a neighborhood $U$ of~$x$. We have $\varphi_{\vert U}\circ s = (\Id_U,\sigma)$ for some smooth $\sigma:U\to \R^r$. Then, $\varphi_{\vert U}\circ(fs)=(\Id_U,f\sigma)$ and:
\begin{align*}
d_{\lambda y}\varphi_{\vert U} \circ d_x(fs) &= d_x(\Id_U,f\sigma) = (\Id_{T_xU},d_xf \otimes \sigma(x) + f(x) d_x\sigma)\\
&= (\Id_{T_xU}, \lambda d_x\sigma) + d_xf \otimes (0,\sigma(x)).
\end{align*}
We have $\varphi_{\vert U} \circ M_\lambda \circ s = (\Id_U,\lambda s)$, hence $d_{\lambda y}\varphi_{\vert U} \circ d_yM_\lambda \circ d_xs = (\Id_{T_xU}, \lambda d_x\sigma)$. Moreover, if we see $s(x) \in E_x$ as an element of $V_{\lambda y} \subset T_{\lambda y}E$, we have $d_{\lambda y}\varphi_{\vert U}\cdot s(x) = (0,\sigma(x))$. Finally, we get: $d_{\lambda y}\varphi_{\vert U} \circ d_x(fs) = d_{\lambda y}\varphi_{\vert U} \circ d_yM_\lambda \circ d_xs + d_{\lambda y}\varphi_{\vert U} (d_xf \otimes s(x))$, and
\begin{equation}
\label{differential}
d_x(fs) = d_yM_\lambda \circ d_xs + d_xf \otimes s(x).
\end{equation}
We have already seen that $d_yM_\lambda \circ d_xs = d_yM_\lambda(\nabla_xs) + d_yM_\lambda(d_xs - \nabla_xs)$ where the first term takes values in $V_{\lambda y}$ and the second one takes values in $H_{\lambda y}$. Since $s(x) \in E_x \simeq V_{\lambda y}$, we get$\nabla_x(fs) = d_yM_\lambda(\nabla_xs) + d_xf \otimes s(x)$. Using once again that $V_y \simeq E_x \simeq V_{\lambda y}$ and the fact that $d_yM_\lambda$ reads as the multiplication by $\lambda$ of $E_x$ under these identifications, we proved that $\nabla_x(fs) = f(x)\nabla_xs + d_xf \otimes s(x)$.

\paragraph*{Conclusion.}
$\nabla$ defined by $\nabla_xs = \Pi_{s(x)} \circ d_xs$ is a $\R$-linear map $\Gamma(E) \to \Gamma(T^*M\otimes E)$ that satisfies Leibniz's rule. Hence it is a connection on $E$.

\item Let $s \in \Gamma(E)$, $x\in M$ and $y =s(x)$. Since $d_yp \circ d_xs = \Id_{T_xM}$, $d_xs$ is injective from $T_xM$ to $T_yE$ and its image is transverse to $\ker d_yp = V_y$. Thus $d_xs(T_xM)$ is an horizontal direction in $E_y$ \dots \ that depends heavily on $s$. The point is to prove that the image of $d_xs$ is the same for all $s\in \Gamma(E)$ such that $s(x)=y$ and $\nabla_xs=0$. Then we can define $H_y$ as the image of $d_xs$ for any such section.

\paragraph*{Sections with vanishing derivative.}
Let $(e_1,\dots,e_r)$ be a local frame defined on a neighborhood $U$ of $x$ and let $(x^1,\dots,x^n)$ be local coordinates on $U$ centered at $x$. We denote by $(\Gamma_{ij}^k)$ the Christolffel symbols of $\nabla$ associated with the frame $(e_i)$ and these coordinates. Let $s= \sum_{i=1}^r f^ie_i$ be a smooth section of $E_{\vert U}$. Then, $\nabla_xs$ equals:
\begin{align*}
\sum_{i=1}^r d_xf^i \otimes e_i(x) + f^i(x) \nabla e_i(x) &= \sum_{i=1}^r d_xf^i \otimes e_i(x) + f^i(x) \sum_{j=1}^n\sum_{k=1}^r \Gamma_{ji}^k(x) \dx x^j\otimes e_k(x)\\
&= \sum_{i=1}^r \sum_{j=1}^n \left(\deron{f^i}{x_j}(x) + \sum_{k=1}^r \Gamma_{jk}^i(x)f^k(x)\right) \dx x^j \otimes e_i(x).
\end{align*}
Thus $s(x)=y=\sum y^ie_i(x)$ and $\nabla_xs=0$ if and only if:
\begin{equation}
\label{condition}
\left\{
\begin{aligned}
&\forall i \in \{1,\dots,r\}, & &f^i(x)=y^i\\
&\forall i \in \{1,\dots,r\}, \forall j \in \{1,\dots,n\}, &&\deron{f^i}{x_j}(x) = -\sum_{k=1}^r \Gamma_{jk}^i(x)y^k.
\end{aligned}
\right.
\end{equation}
First, this proves that there exists $s$ such that $s(x)=y$ and $\nabla_xs=0$. We define such a section locally using the frame $(e_i)$ and the coordinates $(x^1,\dots,x^n)$ by $s = \sum f^ie_i$ with:
\begin{equation*}
\forall i \in \{1,\dots,r\}, \qquad f^i(x_1,\dots,x_n) = y^i - \sum_{j=1}^n x^j\left(\sum_{k=1}^r \Gamma_{jk}^i(0)y^k\right).
\end{equation*}
Then we extend $s$ into a global section of $E$ using a smooth bump function that equals~$1$ in a neighborhood of $x$ and whose support in contained in $U$.

Let $s \in \Gamma(E)$ be such that $s(x)=y$ and $\nabla_xs=0$. We write $s = \sum f^ie_i$ in a local chart. Using Eq.~\eqref{differential}, which is still valid in this context, we get:
\begin{equation*}
d_xs = \sum d_x(f^ie_i) = \sum d_{e_i(x)}M_{f^i(x)}\circ d_x e_i + d_xf^i \otimes e_i(x).
\end{equation*}
Then, by Eq.~\eqref{condition}, in local coordinates we get:
\begin{equation}
\label{expression ds}
d_xs = \sum_{i=1}^r d_x(y^ie_i) - \sum_{j=1}^r \sum_{k=1}^r \Gamma_{jk}^i(x) y^k \dx x^j \otimes e_i(x).
\end{equation}
Note that the right-hand side no longer depends on $f=(f^1,\dots,f^n)$. It only depends on $\nabla$ and our choice of coordinates. Thus all sections $s \in \Gamma(E)$ such that $s(x)=y$ and $\nabla_xs=0$ have the same differential. We define $H_y := d_xs(T_xM)$ for any such section. We have already seen that for any $y \in E$, $H_y$ is transverse to $V_y$ and that $d_xs$ is injective. Then $\dim(H_y)=\dim(M)=n$, and since $\dim(V_y) =r$ we have $H_y \oplus V_y = T_yE$.

\paragraph*{Horizontal sub-bundle}
Let $y \in E$, we denote $x=p(y)$. Let $(e_1,\dots,e_r)$ be a local frame around $x$ and let $(x^1,\dots,x^n)$ be local coordinates defined on the same neighborhood $U$ of $x$. From the definition of $H_y$, we see that $(d_xs\cdot \deron{}{x_1},\dots,d_xs\cdot \deron{}{x_n})$ is a basis of $H_y$, where $d_xs:T_xM \to T_yE$ is defined by Eq.~\eqref{expression ds}. Note that we don't use the fact that it is the differential of something, the notation $d_xs$ is formal here.

For any $j \in \{1,\dots,n\}$, we have: $\displaystyle d_xs\cdot \deron{}{x_j} = \sum_{i=1}^r d_x(y^ie_i)\cdot \deron{}{x_j} - \sum_{k=1}^r \Gamma_{jk}^i(x) y^k e_i(x)$.

We define smooth local vector fields $X_1,\dots,X_n$ on $E_{\vert U}$ by:
\begin{equation*}
X_j: y \longmapsto \sum_{i=1}^r d_{p(y)}(y^i e_i)\cdot \deron{}{x_j} - \sum_{k=1}^r \Gamma_{jk}^i(p(y)) y^k e_i(p(y)).
\end{equation*}
Then, for any $y \in E_{\vert U}$, $(X_1(y),\dots,X_n(y))$ is a basis of $H_y$. For $j \in \{1,\dots,r\}$, we define $X_{n+j} : y \mapsto e_j(p(y))\in E_{p(y)}\simeq V_y \subset T_yE$. Then $X_{n+1},\dots,X_{n+r}$ are smooth vector fields on $E_{\vert U}$ such that $(X_{n+1}(y),\dots,X_{n+r}(y))$ is a basis of $V_y$ for any $y \in E_{\vert U}$. Thus $(X_1,\dots,X_{n+r})$ is a local frame for $TE$ on $E_{\vert U}$ such that $\forall y \in E_{\vert U}$, $(X_1(y),\dots,X_n(y))$ is a basis of $H_y$. This proves that $H \to E$ is an horizontal sub-bundle of $TE \to E$.

\paragraph*{Linearity of $H$}
We now need to check that $H$ is linear. Let $y \in E$, $x = p(y)$ and $\lambda \in \R$. There exists $s \in \Gamma(E)$ such that $s(x)=y$ and $\nabla_xs=0$. Then $M_\lambda\circ s(x) = M_\lambda(y)$ and $\nabla_x(M_\lambda\circ s)=\nabla_x(\lambda s) =\lambda \nabla_xs=0$, the operator $\nabla$ being $\R$-linear. By definition $H_{M_\lambda(y)}=d_x(M_\lambda \circ s)(T_xM)= (d_{y}M_\lambda \circ d_x s) (T_xM)= d_yM_\lambda(H_y)$.

Similarly, let $y_1,y_2\in E$ such that $p(y_1)=p(y_2)=x \in M$. For $i \in \{1,2\}$, let $s_i \in \Gamma(E)$ such that $s_i(x)=y_i$ and $\nabla_xs_i =0$, so that $H_{y_i}=d_xs_i(T_xM)$. Then $A\circ (s_1,s_2) \in \Gamma(E)$ is such that $(A\circ (s_1,s_2))(x) = y_1 + y_2$ and $\nabla_x(A\circ (s_1,s_2))=\nabla_x(s_1+s_2)=\nabla_xs_1+\nabla_xs_2=0$. Thus $H_{y_1+y_2} = d_x(A\circ (s_1,s_2))(T_xM) = d_{(y_1,y_2)}A \circ d_x(s_1,s_2)(T_xM)$. One can check that:
\[d_x(s_1,s_2)(T_xM) = (d_xs_1,d_xs_2)(T_xM) = \left(H_{y_1}\times H_{y_2}\right) \cap T_{(y_1,y_2)}\Delta^*(E\times E).\]
This shows that $H$ is compatible with $A$ and concludes the proof of the linearity of $H$.

\item Recall that the zero section of $E$ is $z:M \to E$ defined by $z(x) = 0 \in E_x$. Since $z$ and $p$ are smooth and $p \circ z = \Id_M$, $z$ is an embedding of $M$ into $E$. Indeed, $z$ is an immersive injection and is proper. Let us denote $Z=z(M)$ the image of the zero section.

Let $y \in Z$ and $x =p(y)$. Then $y=z(x)$ and $T_yE=T_yZ \oplus V_y$. Indeed, $d_yp \circ d_xz = \Id_{T_xM}$ so that $T_yZ = d_xz(T_xM)$ has dimension $n$ and is transverse to $V_y = \ker(d_yp)$. That is, we already have a canonical horizontal direction in $T_yE$ which is $T_yZ$.

Let $\nabla$ be any connection on $E$ and let $H$ be the associated linear horizontal sub-bundle of $TE$. Let $f:M\to \R$ be constant equal to $0$. Let also $x \in M$ and $y=z(x) \in E$. We have: $\nabla_xz = \nabla_x(fz) = d_xf \otimes z(x) + f(x)\nabla_x z =0$. Then $z \in \Gamma(E)$ is such that $z(x)=y$ and $\nabla_xz=0$. Thus, by the previous question, $H_y = d_xz(T_xM) = T_yZ$.

In conclusion, let $s \in \Gamma(E)$ and $x\in M$ be such that $y=s(x) =0$. For any connection $\nabla$, the associated horizontal direction in $T_yE$ is $H_y = T_yZ$. Since $\nabla_xs$ is the projection of $d_xs$ onto $V_y$ along $H_y$, it does not depend on the choice of $\nabla$.

\item We defined $h \in \Gamma(E^*\otimes E^*)$. We can also see $h$ as a smooth map from $\Delta^*(E\times E)$ to $\R$, where $\Delta:M\to M\times M$ is defined by $x \mapsto (x,x)$. We define similarly $\Delta_E:E \to E \times E$ by $\Delta_E(y)=(y,y)$. For any $R >0$, we denote by $\mathcal{T}_R$ the tube of radius $R$ in $E$:
\[\mathcal{T}_R := \left\{y \in E \mvert h_{p(y)}(y,y)=R^2 \right\}=(h \circ \Delta_E)^{-1}(R^2).\]

We will prove that $\mathcal{T}_R$ is a smooth hypersurface of $E$ for any $R>0$ and that, if $\nabla$ is a metric connection on $(E,h)$ and $H$ is the associated linear horizontal sub-bundle of $TE$, then for every $y \in \mathcal{T}_R$, $H_y$ is tangent to $\mathcal{T}_R$ at $y$.

Note that we say nothing about what happens along $Z$ (that we can think of as $\mathcal{T}_0$) but, by the previous question, $H_y$ does not depend on $\nabla$ if $y \in Z$. In particular, it does not depend on the fact that $\nabla$ be compatible with $h$.

\paragraph*{Tubes.} First note that $h \circ \Delta_E : E \to \R_+$ is smooth. Let $R>0$ and let $y \in \mathcal{T}_R$. For any $t>0$, we have $h\circ\Delta_E(ty)=h(ty,ty)=t^2h(y,y)=t^2R^2$. Taking the derivative of this expression at $t=1$ we get: $d_y(h\circ \Delta_E)\cdot y = 2R^2$ (recall that $y \in E_x \simeq V_y \subset T_yE$). Hence $d_y(h\circ \Delta_E)\neq 0$. Thus $h\circ \Delta_E$ is a submersion on $E \setminus Z$ and, for any $R>0$, $\mathcal{T}_R$ is smooth hypersurface of $E$.

\paragraph*{Tangency.} Let $R>0$ and $y \in \mathcal{T}_R$, we denote $x = p(y)$. Let $\nabla$ be a connection on $E$ that is compatible with $h$ and let $H$ denote the associated horizontal sub-bundle of $TE$. Let $s \in \Gamma(E)$ such that $s(x)=y$ and $\nabla_xs=0$, so that $H_y = d_xs(T_xM)$. We have:
\begin{equation*}
d_y(h\circ \Delta_E) \circ d_xs = d_x(h\circ\Delta_E\circ s) = d_x(h(s,s)) = 2h_y(\nabla_xs,s(x)) = 0,
\end{equation*}
where we used the compatibility of $\nabla$ with $h$, the symmetry of $h$, and $\nabla_xs=0$. Finally,
\begin{equation*}
H_y = d_xs(T_xM) \subset \ker d_y(h\circ \Delta_E) = T_y\mathcal{T}_R.
\end{equation*}
That is, the horizontal sub-bundle $H \to E$ associated with a connection compatible with the metric $h$ on $E$ is everywhere tangent to the tubes of constant radius in $(E,h)$. Note that this is also true for $R=0$ since $\mathcal{T}_0=Z$ and, for any $y\in Z$, $H_y = T_yZ$.
\end{enumerate}

\end{document}