\rmfamily%

% Farey Series and the Riemann Hypothesis

\documentclass[mathematics,article,submit,oneauthor,pdftex,10pt,a4paper]{mdpi}

\theoremstyle{mdpi}
\newcounter{thm}
\setcounter{thm}{0}
\newcounter{ex}
\setcounter{ex}{0}
\newcounter{re}
\setcounter{re}{0}

\newtheorem{Theorem}[thm]{Theorem}
\newtheorem{Lemma}[thm]{Lemma}
\newtheorem{Corollary}[thm]{Corollary}
\newtheorem{Proposition}[thm]{Proposition}

\theoremstyle{mdpidefinition}
\newtheorem{Characterization}[thm]{Characterization}
\newtheorem{Property}[thm]{Property}
\newtheorem{Problem}[thm]{Problem}
\newtheorem{Example}[ex]{Example}
\newtheorem{ExamplesandDefinitions}[ex]{Examples and Definitions}
\newtheorem{Remark}[re]{Remark}
\newtheorem{Definition}[thm]{Definition}

\Title{Farey Sequences and the Riemann Hypothesis}

\Author{Darrell Cox}

\address{Grayson College}

\corres{Correspondence: dlcox@graysoncable.com}

\firstnote{P.O. Box 2171, Denison, TX 75021}

\abstract{
	Relationships between the Farey sequence and the Riemann hypothesis other than the Franel-Landau theorem are discussed.  Whether a function similar to Chebyshev's second function is square-root close to a line having a slope different from 1 is discussed.  The nontrivial zeros of the Riemann zeta function can be used to approximate many functions in analytic number theory.  For example, it could be said that the nontrival zeta function zeros and the M\"{o}bius function generate in essence the same function - the Mertens function.  A different approach is to start with a sequence that is analogous to the nontrivial zeros of the zeta function and follow the same procedure with both this sequence and the nontrivial zeros of the zeta function to generate in essence the same function.  A procedure for generating such a function is given.}

\keyword{Riemann hypothesis, Farey sequence, Gauss sum associated with a Dirichlet character}
	
\begin{document}

\section{Introduction} 

\noindent The Farey sequence $F_{x}$ of order $x$ is the ascending series of irreducible fractions between 0 and 1 whose denominators do not exceed $x$.  In this article, the fraction $0/1$ is not considered to be in the Farey sequence.  Let $A(x)$ denote the number of fractions in $F_{x}$.   $A(x)=\sum_{i=1}^{x}\phi(i)$ where $\phi$ is Euler's totient function.  For $v=1, 2, 3,..., A(x)$ let $\delta_{v}$ denote the amount by which the $v$th term of the Farey sequence differs from $v/A(x)$.  Franel (in collaboration with Landau)~\cite{fl} proved that the Riemann hypothesis is equivalent to the statement that $|\delta_{1}|+|\delta_{2}|+...+|\delta_{A(x)}|=o(x^{\frac{1}{2}+\epsilon})$ for all $\epsilon>0$ as $x\rightarrow\infty$.  Let $M(x)$ denote the Mertens function ($M(x)=\sum_{i=1}^{x}\mu(i)$ where $\mu(i)$ is the M\"{o}bius function).  Littlewood~\cite{li} proved that the Riemann hypothesis is equivalent to the statement that for every $\epsilon>0$ the function $M(x)x^{-(1/2)-\epsilon}$ approaches zero as $x\rightarrow\infty$.  Mertens conjectured that $|M(x)|<\sqrt x$.  This was disproved by Odlyzko and te Riele~\cite{or}.

\section{Shorter Intervals of Farey Points}

\noindent Let $r_{1}$, $r_{2}$, ..., $r_{A(x)}$ denote the terms of the Farey sequence of order $x$ and let $h(\xi)$ denote the number of $r_{v}$ less than or equal to $\xi$.  Kanemitsu and Yoshimoto~\cite{k} proved that each of the estimates $\sum_{r_{v}\le 1/3}(r_{v}-h(\frac{1}{3})/(2A(x))=O(x^{1/2+\epsilon})$ and $\sum_{r_{v}\le 1/4}(r_{v}-h(\frac{1}{4})/(2A(x))=O(x^{1/2+\epsilon})$ is equivalent to the Riemann hypothesis.  Let $n=4$, 5, 6, ..., and let $j=\lfloor n/2 \rfloor$.  Let $y_{x}(n)$ denote the number of fractions less than $1/n$ and let $z_{x}(n)$ denote the number of fractions greater than $1/n$ and less than $2/n$ in a Farey sequence of order $x$.  (If $x\le n$, set $y_{x}$ to 0.  If $x\le j$, set $z_{x}$ to 0.  If $x>j$ and $x<n$, set $z_{x}$ to $x-j$.  If $x=n$, set $z_{x}$ to $j-1$ if $n$ is even or $j$ if $n$ is odd.)  Franel proved that $M(x)=\sum_{v=1}^{A(x)}e^{2\pi ir_{v}}$, so there should be some discernible relationship between $M(x)$ and $y_{x}(4)-z_{x}(4)$.  The ``curve'' of $y_{x}(4)-z_{x}(4)$ values resembles that of $M(x)$ in that the peaks and valleys occur roughly at the same places and have about the same heights and depths.  See Figure 1 for a plot of $M(x)$ for $x=1$, 2, 3, ..., 5000.  See Figure 2 for a plot of $y_{x}(4)-z_{x}(4)$ for $x=1$, 2, 3, ..., 5000.  Let $h_{x}(n)$ denote $\sum_{i=1}^{x}(z_{\lfloor x/i \rfloor}(n)-y_{\lfloor x/i \rfloor}(n))$.\\

\begin{Theorem} $h_{x+n}(n)=h_{x}(n)+\lfloor (n-1)/2 \rfloor$
\end{Theorem}

\begin{proof} The value of $h_{x}(4)-h_{x-1}(4)$ is determined by the distribution of the fractions $1/x$, $2/x$, $3/x$, ..., $\lfloor (x-1)/2 \rfloor/x$ about 1/4.  The difference in the number of fractions after 1/4 and before 1/4 is 0 unless 4 divides $x+1$, in which case it is 1.  Similar arguments are applicable for $n>4$.
\end{proof}

\noindent While $\sum_{i=1}^{x}M(\lfloor x/i \rfloor)$ has only one value (1), $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n)$ has up to $n$ values.  (For $n=4$, these values are 1/2, 1/4, 0, or $-1/4$.) 

\section{More Comparisons of $M(x)$ and $y_{x}(n)-z_{x}(n)$}

\noindent  Let $\Lambda(i)$ denote the Mangoldt function ($\Lambda(i)$ equals $\log(p)$ if $i=p^{m}$ for some prime $p$ and some $m\ge 1$ or $0$ otherwise). Let $\psi(x)$ denote the second Chebyshev function ($\psi(x)=\sum_{i\le x}\Lambda(i)$).  Mertens~\cite{m1} proved that;

\begin{Theorem} \normalfont $\sum_{i=1}^{x}M(\lfloor x/i \rfloor)\log(i)=\psi(x)$
\end{Theorem}

 \noindent  Additional comparisons of $M(x)$ and $y_{x}(n)-z_{x}(n)$ can then be made by replacing $M(\lfloor x/i \rfloor)$ by $y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n$ in formulas such as $\sum_{i=1}^{x}M(\lfloor x/i \rfloor)\log(i)=\psi(x)$. See Figure 3 for a plot of $\psi(x)$ and $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(4)-z_{\lfloor x/i \rfloor}(4)+1/4)\log(i)$ for $x=1$, 3, 4, ..., 5000 (the prime number theorem is equivalent to the limit relation lim$_{x\rightarrow \infty}\psi(x)/x=1)$.  For a linear least-squares fit of $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(4)-z_{\lfloor x/i \rfloor}(4)+1/4)\log(i)$ for $x=1$, 3, 4, ..., 5000, $p_{1}=0.2188$ with a 95\% confidence interval of (0.2186, 0.219), $p_{2}=0.9636$ with a 95\% confidence interval of (0.3775, 1.55), SSE=5.582e+5, R-square=0.9989, and RMSE=10.57.  Let $\sigma_{x}(i)$ denote the sum of positive divisors function ($\sigma_{x}(i)=\sum_{d|i}d^{x}$).  Let $\lambda(i)$ denote the Liouville function ($\lambda(1)=1$ or if $i=p_{1}^{a_{1}}\cdot\cdot\cdot p_{k}^{a_{k}}$, $\lambda(i)=(-1)^{a_{1}+...+a_{k}}$).  Let $L(x)=\sum_{i\le x}\lambda(i)$.  Let $H(x)=\sum_{i\le x}\mu(i)\log(i)$.  ($H(x)/(x\log(x))\rightarrow 0$ as $x\rightarrow\infty$ and lim$_{x\rightarrow\infty}(M(x)/x-H(x)/(x\log(x)))=0$.  See pp. 91-92 of Apostol's~\cite{ta} book.)  Other relationships that are useful for comparing $M(x)$ and $y_{x}(n)-z_{x}(n)$ are; 

\begin{Theorem} $\sum_{i=1}^{x}M(\lfloor x/i \rfloor)\log(i)\sigma_{0}(i)/2=\log(x!)$
\end{Theorem}

\begin{Theorem} $\sum_{i=1}^{x}M(\lfloor x/i \rfloor)\sigma_{0}(i)=x$
\end{Theorem} 

\begin{Theorem} $\sum_{i=1}^{x}M(\lfloor x/i \rfloor)\sigma_{1}(i)=x(x+1)/2$ 
\end{Theorem} 

\begin{Theorem} $\sum_{i=1}^{x}M(\lfloor x/i \rfloor)\sigma_{2}(i)=x(x+1)(2x+1)/6$
\end{Theorem}

\begin{Theorem} $\sum_{i=1}^{x}M(\lfloor x/i \rfloor)\Lambda(i)=-H(x)$
\end{Theorem}

\begin{Theorem} $\sum_{i=1}^{x}M(\lfloor x/i \rfloor)$ where the  summation is over $i$ values that are perfect squares equals $L(x)$
\end{Theorem}

\noindent  (See Cox~\cite{c} for proofs of Theorems 3 through 8.)  See Figure 4 for a plot of $\log(x!)$ and $4.38\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(4)-z_{\lfloor x/i \rfloor}(4)+1/4)\log(i)\sigma_{0}(i)/2$ (superimposed on each other) for $x=1$, 2, 3, ..., 1000.  See Figure 5 for a plot of $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(5)-z_{\lfloor x/i \rfloor}(5)+2/5)\sigma_{0}(i)$ for $x=1$, 2, 3, ..., 1000.  For a linear least-squares fit of $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(5)-z_{\lfloor x/i \rfloor}(5)+2/5)\sigma_{0}(i)$ for $x=1$, 2, 3, ..., 1000, $p_{1}=0.3734$ with a 95\% confidence interval of (0.3731, 0.3738), $p_{2}=0.1249$ with a 95\% confidence interval of ($-0.08543$, 0.3353), SSE=2863, R-square=0.9998, and RMSE=1.694.  See Figure 6 for a plot of $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(6)-z_{\lfloor x/i \rfloor}(6)+1/3)\sigma_{1}(i)$ for $x=1$, 2, 3, ..., 200.  For a quadratic least-squares fit of $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(6)-z_{\lfloor x/i \rfloor}(6)+1/3)\sigma_{1}(i)$ for $x=1$, 2, 3, ..., 200, SSE=2.531e+4, R-square=1, and RMSE=11.33.  See Figure 7 for a plot of $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(7)-z_{\lfloor x/i \rfloor}(7)+3/7)\sigma_{2}(i)$ for $x=1$, 2, 3, ..., 100.  For a cubic least-squares fit of $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(7)-z_{\lfloor x/i \rfloor}(7)+3/7)\sigma_{2}(i)$ for $x=1$, 2, 3, ..., 100, SSE=1.454e+6, R-square=1, and RMSE=123.1.  See Figure 8 for a plot of $\frac{1}{x\log(x)}\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(4)-z_{\lfloor x/i \rfloor}(4)+1/4)\Lambda(i)$ for $x=2$, 3, 4, ..., 5000.  See Figure 9 for a plot of $L(x)$ and $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(4)-z_{\lfloor x/i \rfloor}(4)+1/4)$ where the summation is over $i$ values that are perfect squares for $x=1$, 2, 3, ..., 1000.  (P$\acute{o}$lya conjectured that $L(x)\le0$ for $x\ge 2$.  This was disproved by Haselgrove~\cite{h}.)   

\section{lim$_{n\rightarrow\infty}(y_{n}(n)-z_{n}(n))/n$ and Similar Limits}

\noindent See Figure 10 for a plot of $y_{x}(65)-z_{x}(65)$ for $x=2$, 3, 4, ..., 1625.  See Figure 11 for a plot of $y_{x}(200)-z_{x}(200)$ for $x=2$, 3, 4, ..., 5000.  See Figure 12 for a plot of $y_{x}(200)-z_{x}(200)$ for $x=100$, 200, 300, ..., 5000.  Note that the values of $y_{x}(200)-z_{x}(200)$ in the $x$ intervals of (100, 200), (200, 300), (300, 400), ..., can be approximated by linear interpolation.  For even $n$, the limits of $(y_{n/2}(n)-z_{n/2}(n))/n$, $(y_{n}(n)-z_{n}(n))/n$, $(y_{3n/2}(n)-z_{3n/2}(n))/n$, ...., as $n\rightarrow\infty$ appear to be $-\frac{1}{2}$, $-\frac{1}{4}$, $-\frac{1}{3}$, $-\frac{1}{6}$, $-\frac{2}{5}$, $-\frac{2}{15}$, $-\frac{31}{105}$, $-\frac{29}{140}$, $-\frac{19}{42}$, $-\frac{41}{420}$, $-\frac{76}{385}$, $-\frac{201}{1540}$, $-\frac{751}{1430}$, $-\frac{1109}{4004}$, $-\frac{4436}{15015}$, $-\frac{857}{13411}$, $-\frac{3700}{12213}$, $-\frac{721}{17163}$, $-\frac{738}{2897}$, .... (these values are based on data collected for $n=10,000,000,000$).  $\frac{0}{1}$ is considered to be the first limit.  Let $\delta_{1}(1)$, $\delta_{1}(2)$, $\delta_{1}(3)$, ..., denote these limits and let $\delta_{m}(x)$, $m=2$, 3, 4, ..., denote the limits and $m-1$ values that have been linearly interpolated between successive limits.  See Figure 13 for a plot of $\sum_{i=1}^{x}\delta_{4}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 79 (including $\frac{0}{1}$, 20 limits were used).  For a linear least-squares fit of $\sum_{i=1}^{x}\delta_{4}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 79, $p_{1}=-0.1276$ with a 95\% confidence interval of ($-0.1287$, $-0.1264$), $p_{2}=0.05253$ with a 95\% confidence interval of ($-8.043$e$-5$, 0.1051), SSE=1.041, R-square=0.9984, and RMSE=0.1163.  See Figure 14 for a plot of $\sum_{i=1}^{x}(\delta_{4}(\lfloor x/i \rfloor)+0.1276)\log(i)$ for $x=1$, 2, 3, ..., 79.  For a linear least-squares fit of $\sum_{i=1}^{x}(\delta_{4}(\lfloor x/i \rfloor)+0.1276)\log(i)$ for $x=1$, 2, 3, ..., 79, SSE=2.415, R-square=0.9949, and RMSE=0.1771.  See Figure 15 for a plot of $\sum_{i=1}^{x}(\delta_{4}(\lfloor x/i \rfloor)+0.1276)\sigma_{0}(i)$ for $x=1$, 2, 3, ..., 79.  For a linear least-squares fit of $\sum_{i=1}^{x}(\delta_{4}(\lfloor x/i \rfloor)+0.1276)\sigma_{0}(i)$ for $x=1$, 2, 3, ..., 79, SSE=5.123, R-square=0.9892, and RMSE=0.2579.  See Figure 16 for a plot of $\sum_{i=1}^{x}(\delta_{4}(\lfloor x/i \rfloor)+0.1276)\sigma_{1}(i)$ for $x=1$, 2, 3, ..., 79.  For a quadratic least-squares fit of $\sum_{i=1}^{x}(\delta_{4}(\lfloor x/i \rfloor)+0.1276)\sigma_{1}(i)$ for $x=1$, 2, 3, ..., 79, SSE=92.93, R-square=0.9999, and RMSE=1.106.  See Figure 17 for a plot of $\sum_{i=1}^{x}(\delta_{4}(\lfloor x/i \rfloor)+0.1276)\sigma_{2}(i)$ for $x=1$, 2, 3, ..., 79.  For a cubic least-squares fit of $\sum_{i=1}^{x}(\delta_{4}(\lfloor x/i \rfloor)+0.1276)\sigma_{2}(i)$ for $x=1$, 2, 3, ..., 79, SSE=6289, R-square=1, and RMSE=9.157.  See Figure 18 for a plot of $\log(x!)$ and $8.7\sum_{i=1}^{x}(\delta_{4}(\lfloor x/i \rfloor)+0.1276)\log(i)\sigma_{0}(i)/2$ (superimposed on each other) for $x=1$, 2, 3, ..., 79.  See Figure 19 for a plot of $\frac{1}{x\log(x)}\sum_{i=1}^{x}(\delta_{4}(\lfloor x/i \rfloor)+0.1276)\Lambda(i)$ for $x=2$, 3, 4, ..., 79.

\section{An Analogue of $\psi(x)$}

\noindent  A reformulation of the Riemann hypothesis is that $\psi(x)$ is essentially square- root close to the function $f(x)=x$.  See Figure 20 for a plot of $\sqrt{0.17x}$, $-\sqrt{0.17x}$, and $\psi(x)-x$ for $x=1$, 2, 3, ..., 999. See Figure 21 for a plot of $\sum_{i=1}^{x}\delta_{1}(\lfloor x/i \rfloor)$ and $\sum_{i=1}^{x}\delta_{1}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 999 (these values were computed using 1000 approximate limits accurate to about 6 decimal places).  For a linear least-squares fit of $\sum_{i=1}^{x}\delta_{1}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 999, $p_{1}=-0.1704$ with a 95\% confidence interval of ($-0.1076$, $-0.1703$), $p_{2}=0.04535$ with a 95\% confidence interval of ($-0.03865$, 0.1293), SSE=455.6, R-square=0.9998, and RMSE=0.676.  For a linear least-squares fit of $\sum_{i=1}^{x}\delta_{1}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 999, $p_{1}=-0.17$ with a 95\% confidence interval of ($-0.1705$, $-0.1695$), $p_{2}=0.2791$ with a 95\% confidence interval of ($-0.009398$, 0.5676), SSE=5374, R-square=0.9978, and RMSE=2.322.  See Figure 22 for a plot of $\sqrt{0.17x}$, $-\sqrt{0.17x}$, and $0.17x+\sum_{i=1}^{x}(\delta_{1}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 999.  $\psi(x)$ appears to deviate from $x$ more than $\sum_{i=1}^{x}\delta_{1}(\lfloor x/i \rfloor)\Lambda(i)$ deviates from $-0.17x$.  See Figure 23 for a plot of the $p_{1}$ values of the linear least-squares fits of $\sum_{i=1}^{x}\delta_{1}(\lfloor x/i \rfloor)$, $\sum_{i=1}^{x}\delta_{2}(\lfloor x/i \rfloor)$, $\sum_{i=1}^{x}\delta_{3}(\lfloor x/i \rfloor)$, ..., $\sum_{i=1}^{x}\delta_{40}(\lfloor x/i \rfloor)$ for respective $x$ values up to 999, 1999, 2999, ..., 39999.  See Figure 24 for a plot of the corresponding $p_{2}$ values.    Denote these $p_{1}$ and $p_{2}$ values by $p_{1}(n)$ and $p_{2}(n)$, $n=1$, 2, 3, ..., 40.  See Figure 25 for a plot of $\sqrt{-np_{1}(n)}$ versus $\log(n)$ for $n=1$, 2, 3, ..., 40.  For a linear least-squares fit of these quantities, $p_{1}=0.2208$ with a 95\% confidence interval of (0.219, 0.2226), $p_{2}=0.4146$ with a 95\% confidence interval of (0.4093, 0.4198), SSE=0.0009158, R-square=0.9994, and RMSE=0.004909.  See Figure 26 for a plot of $np_{2}(n)$ for $n=1$, 2, 3, ..., 40.  For a quadratic least-squares fit of $np_{2}(n)$ for $n=1$, 2, 3, ..., 40, $p_{1}=0.0004067$ with a 95\% confidence interval of (0.0003452, 0.0004683), $p_{2}=0.08821$ with a 95\% confidence interval of (0.0856, 0.09081), $p_{3}=-0.1282$ with a 95\% confidence interval of ($-0.1513$, $-0.105$), SSE=0.01937, R-square=0.9997, and RMSE=0.02288. \\

\noindent  See Figure 27 for a plot of the $p_{1}$ values of the linear least-squares fits of $\sum_{i=1}^{x}\delta_{1}(\lfloor x/i \rfloor)\Lambda(i)$, $\sum_{i=1}^{x}\delta_{2}(\lfloor x/i \rfloor)\Lambda(i)$, $\sum_{i=1}^{x}\delta_{3}(\lfloor x/i \rfloor)\Lambda(i)$, ..., $\sum_{i=1}^{x}\delta_{40}(\lfloor x/i \rfloor)\Lambda(i)$ for respective $x$ values up to 999, 1999, 2999, ..., 39999.  (The $p_{1}$ values are the same as the above $p_{1}$ values for the first three or four decimal places.)  See Figure 28 for a plot of the corresponding $p_{2}$ values (the values are erratic, possibly due to the small number of approximate limits used).  Denote these $p_{1}$ and $p_{2}$ values by $p'_{1}(n)$ and $p'_{2}(n)$, $n=1$, 2, 3, ..., 40.  See Figure 29 for a plot of $\sqrt{-np'_{1}(n)}$ versus $\log(n)$ for $n=1$, 2, 3, ..., 40.  For a linear least-squares fit of these quantities, $p_{1}=0.2209$ with a 95\% confidence interval of (0.2191, 0.2227), $p_{2}=0.4145$ with a 95\% confidence interval of (0.4092, 0.4198), SSE=0.0009235, R-square=0.9994, and RMSE=0.00493.  See Figure 30 for a plot of $np'_{2}(n)$ for $n=1$, 2, 3, ..., 40.  For a linear least-squares fit of $np'_{2}(n)$ for $n=1$, 2, 3, ..., 40, $p_{1}=0.6569$ with a 95\% confidence interval of (0.6251, 0.6886), $p_{2}=-0.3898$ with a 95\% confidence interval of ($-1.137$, 0.3578), SSE=49.89, R-square=0.9788, and RMSE=1.146.  \\

\noindent  See Figure 31 for a plot of $\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)$ and $\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)\Lambda(i)$ (superimposed on each other) for $x=1$, 2, 3, ..., 99999.  For a linear least-squares fit of $\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 99999, $p_{1}=-0.01936$ with a 95\% confidence interval of ($-0.01936$, $-0.01936$), $p_{2}=0.1094$ with a 95\% confidence interval of (0.1034, 0.1154), SSE=2.347e+4, R-square=1, and RMSE=0.4845.  For a linear least-squares fit of $\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 99999, $p_{1}=-0.01936$ with a 95\% confidence interval of ($-0.01936$, $-0.01936$), $p_{2}=0.6391$ with a 95\% confidence interval of (0.6198, 0.6583), SSE=2.415e+4, R-square=1, and RMSE=1.554.  See Figure 32 for a plot of $\frac{1}{10}\sqrt{0.01936x}$, $0.01936x+\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)$, and  $0.01936x+\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 20000.  The peaks and valleys of the two corresponding curves occur at about the same places. \\

\noindent  For a linear least-squares fit of $\sum_{i=1}^{x}\delta_{500}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 50000, $p_{1}=-0.005467$ with a 95\% confidence interval of ($-0.005467$, $-0.005467$), $p_{2}=0.1394$ with a 95\% confidence interval of (0.1354, 0.1435), SSE=2664, R-square=1, and RMSE=0.2308.  For a linear least-squares fit of $\sum_{i=1}^{x}\delta_{500}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 50000, $p_{1}=-0.005469$ with a 95\% confidence interval of ($-0.005469$, $-0.005469$), $p_{2}=0.5193$ with a 95\% confidence interval of (0.5138, 0.5249), SSE=5027, R-square=1, and RMSE=0.3171.  See Figure 33 for a plot of $\frac{1}{10}\sqrt{0.005469x}$, $0.005467x+\sum_{i=1}^{x}\delta_{500}(\lfloor x/i \rfloor)$, and $0.005469x+\sum_{i=1}^{x}\delta_{500}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 20000.  The peaks and valleys of the curves in this plot resemble the peaks and valleys of the curves in the first fifth of Figure 32.  See Figure 34 for a comparison of the $0.01936x+\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)$ and $0.005467x+\sum_{i=1}^{x}\delta_{500}(\lfloor x/i \rfloor)$ values (each successive $0.01936x+\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)$ value has been written 5 times). The two curves are almost the same.  See Figure 35 for a comparison of the $0.01936x+\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)\Lambda(i)$ and $0.005469x+\sum_{i=1}^{x}\delta_{500}(\lfloor x/i \rfloor)\Lambda(i)$ values (each successive $0.01936x+\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)\Lambda(i)$ value has been written 5 times).  The two curves are roughly the same. \\

\noindent  For a linear least-squares fit of $\sum_{i=1}^{x}\delta_{2500}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 50000, $p_{1}=-0.001414$ with a 95\% confidence interval of ($-0.001414$, $-0.001414$), $p_{2}=0.1175$ with a 95\% confidence interval of (0.1156, 0.1194), SSE=597.2, R-square=1, and RMSE=0.1093.  For a linear least-squares fit of $\sum_{i=1}^{x}\delta_{2500}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 50000, $p_{1}=-0.001416$ with a 95\% confidence interval of ($-0.001416$, $-0.001416$), $p_{2}=0.5083$ with a 95\% confidence interval of (0.5072, 0.5094), SSE=208.3, R-square=1, and RMSE=0.06454.  See Figure 36 for a plot of $\frac{1}{10}\sqrt{0.001416x}$, $0.001414x+\sum_{i=1}^{x}\delta_{2500}(\lfloor x/i \rfloor)$, and $0.001416x+\sum_{i=1}^{x}\delta_{2500}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 20000.  The peaks and valleys of the curves in this plot resemble the peaks and valleys of the curves in the first fifth of Figure 33.  See Figure 37 for a comparison of the $0.005467x+\sum_{i=1}^{x}\delta_{500}(\lfloor x/i \rfloor)$ values and $0.001414x+\sum_{i=1}^{x}\delta_{2500}(\lfloor x/i \rfloor)$ values (each successsive  $0.005467x+\sum_{i=1}^{x}\delta_{500}(\lfloor x/i \rfloor)$ value has been written 5 times).  The two curves are almost the same.  See Figure 38 for a comparison of  $0.005469x+\sum_{i=1}^{x}\delta_{500}(\lfloor x/i \rfloor)\Lambda(i)$ and $0.001416x+\sum_{i=1}^{x}\delta_{2500}(\lfloor x/i \rfloor)\Lambda(i)$ values  (each $0.005469x+\sum_{i=1}^{x}\delta_{500}(\lfloor x/i \rfloor)\Lambda(i)$ value has been written 5 times).  The two curves are roughly the same.  \\ 

\noindent $-p'_{1}(10)$ equals 0.08617.  See Figure 39 for a plot of $\sqrt{0.08617x}$ and $0.08617x+\sum_{i=1}^{x}\delta_{10}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 20.  For this range of $x$ values (up to $2 \cdot 10$), $\sqrt{0.08617x}$ is greater than $0.08617x+\sum_{i=1}^{x}\delta_{10}(\lfloor x/i \rfloor)\Lambda(i)$.  See Figure 40 for a plot of $\sqrt{0.01936x}$ and $0.01936x+\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 200.  For this range of $x$ values (up to $2 \cdot 100$), $\sqrt{0.01936x}$ is greater than $0.01936x+\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)\Lambda(i)$.  This appears to be the case for arbitrary $p'_{1}(n)$ values and corresponding ranges of $x$ values.  \\

\noindent \bfseries Conjecture 1.  \normalfont $\sqrt{-p'_{1}(n)x}> -p'_{1}(n)x+\sum_{i=1}^{x}\delta_{n}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., $2n$ and $n=1$, 2, 3, .... \\

\noindent $0.17x+\sum_{i=1}^{x}\delta_{1}(\lfloor x/i \rfloor)\Lambda(i)$ may eventually exceed $\sqrt{0.17x}$.  (Using 13000 approximate limits accurate to about 3 decimal places gives a $p'_{1}$ value of $-0.1695$ and a $p'_{2}$ value of $-1.693$ for a linear least-squares fit of $\sum_{i=1}^{x}\delta_{1}(\lfloor x/i \rfloor)\Lambda(i)$, $x=1$, 2, 3, ..., 12999.  See Figure 41 for a plot of $\sqrt{0.1695x}$, $-\sqrt{0.1695x}$, and $0.1695x+\sum_{i=1}^{x}\delta_{1}(\lfloor x/i \rfloor)\Lambda(i)$, $x=1$, 2, 3, ..., 12999.)  As previously shown, $\sqrt{-p'_{1}(n)}\approx (0.2209\log(n)+0.4145)/\sqrt{n}$, $n=1$, 2, 3, ....  The ratio of this value for $n=100$ to the value for $n=1$ is approximately 0.338 (0.14/0.4145), but based on the above empirical evidence, the $-p'_{1}(1)x+\sum_{i=1}^{x}\delta_{1}(\lfloor x/i \rfloor)\Lambda(i)$ values are "stretched" by a factor of 100 to approximately give the $-p'_{1}(100)x+\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)\Lambda(i)$ values.  See Figure 42 for a plot of $\sqrt{-p'_{1}(1)x}$, $-\sqrt{-p'_{1}(1)x}$, $\sqrt{-p'_{1}(100)x}$, $-\sqrt{-p'_{1}(100)x}$, $-p'_{1}(1)x+\sum_{i=1}^{x}\delta_{1}(\lfloor x/i \rfloor)\Lambda(i)$, and $-p'_{1}(100)x+\sum_{i=1}^{x}\delta_{100}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ...., 999.  If $-p'_{1}(n)x+\sum_{i=1}^{x}\delta_{n}(\lfloor x/i \rfloor)\Lambda(i)$ is $O(\sqrt{x})$, it should be possible to use this technique to find a $\sqrt{-p'_{1}(n)x}$ upper bound.

\section{Convolutions Involving Ramanujan's Sum}

\noindent Let $c_{k}(x)$ denote Ramanujan's sum ($c_{k}(x):=\sum$$_{m}$ $_{mod}$ $_{k,}$$_{ (m,k)=1}e^{2\pi imx/k}$). \\

\noindent \bfseries Conjecture 2.  \normalfont $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n)c_{k}(i)$ is a periodic function with period $nk$. \\

\noindent See Figure 43 for a plot of $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n)c_{k}(i)$ where $n=13$, $k=13$, and $x=1$, 2, 3, ..., 169 and $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n)$ where $n=169$ and $x=1$, 2, 3, ..., 169.  See Figure 44 for a plot of $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n)c_{k}(i)$ where $n=12$, $k=10$, and $x=1$, 2, 3, ..., 120 and $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n)$ where $n=120$ and $x=1$, 2, 3, ..., 120.  See Figure 45 for a plot of the real parts of the Fourier coefficients of $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n)c_{k}(i)$ where $n=4$, $k=19$, and $x=1$, 2, 3, ..., 76. \\

\noindent  See Figure 46 for a plot of $\sum_{i=1}^{x}M(\lfloor x/i \rfloor)c_{k}(i)$ where $k=150$ and $x=1$, 2, 3, ..., 300. \\

\noindent \bfseries Conjecture 3.  \normalfont  $\sum_{i=1}^{x}M(\lfloor x/i \rfloor)c_{k}(i)=\phi(k)$ for $x\ge k$. \\

\noindent See Figure 47 for a plot of $\sum_{i=1}^{x}c_{k}(\lfloor x/i \rfloor)$ for $k=17$ and $x=1$, 2, 3, ..., 500. \\

\noindent \bfseries Conjecture 4.  \normalfont When $k$ is prime, the $\sum_{i=1}^{x}c_{k}(\lfloor x/i \rfloor)$, $x=1$, 2, 3, ..., values fall on the line segments $y=-x'+kn$, $0<x'<(n+1)k^{2}$, $n=0$, 1, 2, ....  If $k>2$, at least one value falls on every line segment.  \\

\noindent  See Figure 48 for a plot of $\sum_{i=1}^{x}c_{k}(\lfloor x/i \rfloor)$ for $k=7$ and $x=1$, 2, 3, ..., 10000.  For a linear least-squares fit of $\sum_{i=1}^{x}c_{k}(\lfloor x/i \rfloor)$ for $k=7$ and $x=1$, 2, 3, ..., 10000, $p_{1}=-0.7868$ with a 95\% confidence interval of ($-0.787$, $-0.7866$), $p_{2}=-0.2229$ with a 95\% confidence interval of ($-1.563$, 1.118), SSE=1.169e+7, R-square=0.9998, and RMSE=34.19.  See Figure 49 for a plot of $\sum_{i=1}^{x}(c_{k}(\lfloor x/i \rfloor)+0.7868)\log(i)$ for $k=7$ and $x=1$, 2, 3, ..., 2000.  For a linear least-squares fit of $\sum_{i=1}^{x}(c_{k}(\lfloor x/i \rfloor)+0.7868)\log(i)$ for $k=7$ and $x=1$, 2, 3, ..., 2000, SSE=6.434e+6, R-square=0.924, and RMSE=56.75.  For such convolutions with $\log(i)$ and $\sigma_{0}(i)$, the points are typically scattered when $k$ is prime, accounting for the relatively poor linear least-squares fits.  See Figure 50 for a plot of $\sum_{i=1}^{x}(c_{k}(\lfloor x/i \rfloor)+0.7868)\sigma_{1}(i)$ for $k=7$ and $x=1$, 2, 3, ..., 2000.  For a quadratic least-squares fit of $\sum_{i=1}^{x}(c_{k}(\lfloor x/i \rfloor)+0.7868)\sigma_{1}(i)$ for $k=7$ and $x=1$, 2, 3, ..., 2000, SSE=5.623e+9, R-square=0.9999, and RMSE=1678.  \\

\noindent See Figure 51 for a plot of $\sum_{i=1}^{x}c_{k}(\lfloor x/i \rfloor)$ for $k=6$ and $x=1$, 2, 3, ..., 10000.  For a linear least-squares fit of $\sum_{i=1}^{x}c_{k}(\lfloor x/i \rfloor)$ for $k=6$ and $x=1$, 2, 3, ..., 10000, $p_{1}=0.1862$ with a 95\% confidence interval of (.1861, 0.1863), $p_{2}=0.1341$ with a 95\% confidence interval of ($-0.2456$, 0.5139), SSE=9.379e+5, R-square=0.9997, and RMSE=9.686.  See Figure 52 for a plot of $\sum_{i=1}^{x}(c_{k}(\lfloor x/i \rfloor)-0.1862)\sigma_{0}(i)$ for $k=6$ and $x=1$, 2, 3, ..., 2000.  For a linear least-squares fit of $\sum_{i=1}^{x}(c_{k}(\lfloor x/i \rfloor)-0.1862)\sigma_{0}(i)$ for $k=6$ and $x=1$, 2, 3, ..., 2000, SSE=1.419e+6, R-square=0.9821, and RMSE=26.65.  See Figure 53 for a plot of $\sum_{i=1}^{x}(c_{k}(\lfloor x/i \rfloor)-0.1862)\sigma_{2}(i)$ for $k=6$ and $x=1$, 2, 3, ..., 2000.  For a cubic least-squares fit of $\sum_{i=1}^{x}(c_{k}(\lfloor x/i \rfloor)-0.1862)\sigma_{2}(i)$ for $k=6$ and $x=1$, 2, 3, ..., 2000, SSE=5.281e+13, R-square=1, and RMSE=1.627e+5.  See Figure 54 for a plot of $2.65\sum_{i=1}^{x}(c_{k}(\lfloor x/i \rfloor)-0.1862)\log(i)\sigma_{0}(i)/2$ and $\log(x!)$ (superimposed on each other) for $k=6$ and $x=1$, 2, 3, ..., 2000.  See Figure 55 for a plot of $\frac{x}{\log(x)}\sum_{i=1}^{x}(c_{k}(\lfloor x/i \rfloor)-0.1862)\Lambda(i)$ for $k=6$ and $x=2$, 3, 4, ..., 2000.\\

\noindent  See Figure 56 for a plot of $\sum_{i=1}^{x}c_{k}(\lfloor x/i \rfloor)M(i)$ for $k=6$ and $x=1$, 2, 3, ..., 1000.  See Figure 57 for a plot of $\sum_{i=1}^{x}c_{k}(\lfloor x/i \rfloor)(y_{i}(n)-z_{i}(n))$ for $k=6$, $n=4$, and $x=1$, 2, 3, ..., 1000.  The oscillations of these two functions appear to be due to the first non-trivial zero of the Riemann zeta function.  See Figure 58 for a plot of $\sum_{i=1}^{x}(c_{k}(\lfloor x/i \rfloor)+1)(y_{i}(n)-z_{i}(n))$ for $k=17$, $n=50$, and $x=1$, 2, 3, ..., 1500. 

\section{Convolutions Involving Gauss Sums Associated with Dirichlet Characters}

\noindent $\chi_{3}(n)$ for $n=1$, 2, 3, ..., 7 (a Dirichlet character mod 7) equal $1$, $\omega^{2}$, $\omega$, $-\omega$, $-\omega^{2}$, $-1$, and $0$ respectively where $\omega=e^{\pi i/3}$.  Let $G(n,\chi)$ denote the Gauss sum associated with the Dirichlet character $\chi$ ($G(n,\chi)=\sum_{m=1}^{k}\chi(m)e^{2\pi imn/k}$).  See Figure 59 for a plot of the real and imaginary components of $\sum_{i=1}^{x}G(\lfloor x/i \rfloor, \chi)$ for $\chi_{3}$ mod 7 and $x=1$, 2, 3, ..., 10000. For a linear least-squares fit of the real components, $p_{1}=-0.9076$ with a 95\% confidence interval of ($-0.9077$, $-0.9075$), $p_{2}=-0.5372$ with a 95\% confidence interval of ($-1.155$, 0.0835), SSE=2.481e+6, R-square=1, and RMSE=15.75.  For a linear least-squares fit of the imaginary components, $p_{1}=0.8163$ with a 95\% confidence interval of (0.8163, 0.8164), $p_{2}=0.434$ with a 95\% confidence interval of (0.0005785, 0.8675), SSE=1.222e+6, R-square=1, and RMSE=11.06.  See Figure 60 for a plot of the real components of $\sum_{i=1}^{x}(G(\lfloor x/i \rfloor, \chi)+0.9076)\log(i)$ for $x=1$, 2, 3, ..., 10000.  For a linear least-squares fit of the real components, $p_{1}=-0.7922$ with a 95\% confidence interval of ($-0.7968$, $-0.7875$), $p_{2}=0.5904$ with a 95\% confidence interval of ($-2.107$, 3.288), SSE=4.707e+5, R-square=0.9911, and RMSE=21.72.  See Figure 61 for a plot of the imaginary components of $\sum_{i=1}^{x}(G(\lfloor x/i \rfloor, \chi)-0.8163)\log(i)$ for $x=1$, 2, 3, ..., 10000.  For a linear least-squares fit of the imaginary components, $p_{1}=0.426$ with a 95\% confidence interval of ($0.4228$, $0.4292$), $p_{2}=-0.3983$ with a 95\% confidence interval of ($-2.256$, 1.459), SSE=2.232e+5, R-square=0.9855, and RMSE=14.95.  See Figure 62 for a plot of the real components of $\sum_{i=1}^{x}(G(\lfloor x/i \rfloor, \chi)+0.9076)\sigma_{1}(i)$ for $x=1$, 2, 3, ..., 1000.  For a quadratic least-squares fit of the real components, $p_{1}=-0.5349$ with a 95\% confidence interval of ($-0.5355$, $-0.5344$), $p_{2}=-0.5832$ with a 95\% confidence interval of ($-1.132$, $-0.03472$), $p_{3}=6.079$ with a 95\% confidence interval of ($-112.8$, 125), SSE=4.049e+8, R-square=1, and RMSE=637.3.  (Note that the $p_{1}$ and $p_{2}$ values are almost equal.)  See Figure 63 for a plot of the imaginary components of $\sum_{i=1}^{x}(G(\lfloor x/i \rfloor, \chi)-0.8763)\sigma_{1}(i)$ for $x=1$, 2, 3, ..., 1000.  For a quadratic least-squares fit of the imaginary components, $p_{1}=0.1821$ with a 95\% confidence interval of (0.1818, 0.1824), $p_{2}=0.1867$ with a 95\% confidence interval of ($-0.129$, 0.5024), $p_{3}=-0.6868$ with a 95\% confidence interval of ($-69.11$, 67.73), SSE=1.341e+8, R-square=1, and RMSE=366.8. \\

\noindent  For a linear least-squares fit of the real components of $\sum_{i=1}^{x}G(\lfloor x/i \rfloor, \chi)$ for a Dirichlet character mod 13 and $x=1$, 2, 3, ..., 10000, $p_{1}=-1.247$ with a 95\% confidence interval of ($-1.247$, $-1.247$), $p_{2}=-0.7454$ with a 95\% confidence interval of ($-1.438$, $-0.05244$), SSE=3.123e+6, R-square=1, and RMSE=17.67.  For a linear least-squares fit of the imaginary components, $p_{1}=0.08855$ with a 95\% confidence interval of (0.08847, 0.08863), $p_{2}=0.004716$ with a 95\% confidence interval of ($-0.4692$, 0.4787), SSE=1.461e+6, R-square=0.9978, and RMSE=12.09.  See Figure 64 for a plot of the real components of $\sum_{i=1}^{x}(G(\lfloor x/i \rfloor, \chi)+1.247)\log(i)\sigma_{0}(i)/2$ for the Dirichlet character mod 13, the imaginary components of $\sum_{i=1}^{x}(G(\lfloor x/i \rfloor, \chi)-0.08855)\log(i)\sigma_{0}(i)/2$ for the Dirichlet character mod 13, $-1.25\log(x!)$, and $-0.2289\log(x!)$ for $x=1$, 2, 3, ..., 1000. 

\section{Limits Associated with Convolutions of Gauss Sums Associated with Dirichlet Characters}

\noindent $\chi_{2}(n)$ for $n=1$, 2, 3, ..., 11 (a Dirichlet character mod 11) equal $1$, $\omega$, $-\omega^{3}$, $\omega^{2}$, $\omega^{4}$, $-\omega^{4}$, $-\omega^{2}$, $\omega^{3}$, $-\omega$, $-1$, and $0$ respectively where $\omega=e^{\pi i/5}$.  See Figure 65 for a plot of the real and imaginary components of $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n)G(i, \chi)$ where $n=200$, $\chi$ is the Dirichlet character mod 11, and $x=1$, 2, 3, ..., 2000.  See Figure 66 for a plot of the real components of $\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n)G(i, \chi)$ where $n=40$, $\chi$ is the Dirichlet character mod 11, and $x=1$, 2, 3, ..., 100 superimposed on a plot of the replicated (two times) real components of $2\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n)G(i, \chi)$ where $n=20$, $\chi$ is the Dirichlet character mod 11, and $x=1$, 2, 3, ..., 50 .  See Figure 67 for a plot of the real components of $\frac{1}{n}\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n)G(i, \chi)$ where $n=400$, $\chi$ is the Dirichlet character mod 11, and $x=n$, $2n$, $3n$, ..., $100n$ superimposed on a plot of the real components of $\frac{1}{n}\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n)G(i, \chi)$ where $n=200$, $\chi$ is the Dirichlet character mod 11, and $x=n$, $2n$, $3n$, ..., $100n$.  As $n\rightarrow\infty$, the real (and imaginary) components of $\frac{1}{n}\sum_{i=1}^{x}(y_{\lfloor x/i \rfloor}(n)-z_{\lfloor x/i \rfloor}(n)+\lfloor (n-1)/2 \rfloor/n)G(i, \chi)$, $x=n$, $2n$, $3n$, ..., where $\chi$ is the Dirichlet character mod 11 appear to approach limits.  For $n=10,000,000$, these values are 0.4776508, 0.3240762, 0.8212111, $-0.4973424$, $-0.06077291$, 0.6700821, 0.8753683, $-0.1347543$, 0.4056038, $-0.3244519$, $0.3555685$, 0.5669565, 0.8245811, 0.1917227, $-0.0002897262$, $-0.6083173$, 0.6168052, 0.8068949, 0.4895114, $-0.2735334$, ....  Let $\epsilon_{1}(1)$, $\epsilon_{1}(2)$, $\epsilon_{1}(3)$, ..., denote these approximate limits and let $\epsilon_{m}(x)$, $m=2$, 3, 4, ..., denote the limits and $m-1$ values that have been linearly interpolated between successive limits.  See Figure 68 for a plot of $\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 2000 (201 limits were used).  For a linear least-squares fit of $\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 2000, $p_{1}=0.449$ with a 95\% confidence interval of ($0.4489$, $0.4491$), $p_{2}=0.02979$ with a 95\% confidence interval of ($-0.006429$, 0.1239), SSE=2297, R-square=1, and RMSE=1.072.  See Figure 69 for a plot of $\sum_{i=1}^{x}(\epsilon_{10}(\lfloor x/i \rfloor)-0.449)\log(i)$ for $x=1$, 2, 3, ..., 2000.  For a linear least-squares fit of $\sum_{i=1}^{x}(\epsilon_{10}(\lfloor x/i \rfloor)-0.449)\log(i)$ for $x=1$, 2, 3, ..., 2000, SSE=3075, R-square=0.9977, and RMSE=1.241.  See Figure 70 for a plot of $\sum_{i=1}^{x}(\epsilon_{10}(\lfloor x/i \rfloor)-0.449)\sigma_{0}(i)$ for $x=1$, 2, 3, ..., 2000.  For a linear least-squares fit of $\sum_{i=1}^{x}(\epsilon_{10}(\lfloor x/i \rfloor)-0.4490)\sigma_{0}(i)$ for $x=1$, 2, 3, ..., 2000, SSE=4094, R-square=0.9876, and RMSE=1.811.  See Figure 71 for a plot of $\sum_{i=1}^{x}(\epsilon_{10}(\lfloor x/i \rfloor)-0.449)\sigma_{1}(i)$ for $x=1$, 2, 3, ..., 2000.  For a quadratic least-squares fit of $\sum_{i=1}^{x}(\epsilon_{10}(\lfloor x/i \rfloor)-0.449)\sigma_{1}(i)$ for $x=1$, 2, 3, ..., 2000, $p_{1}=0.01704$ with a 95\% confidence interval of (0.01704, 0.01704), $p_{2}=0.01579$ with a 95\% confidence interval of (0.01202, 0.01956), $p_{3}=0.3728$ with a 95\% confidence interval of ($-1.261$, 2.007), SSE=3.073e+5, R-square=1, and RMSE=12.41.  (Note that the $p_{1}$ and $p_{2}$ values are almost equal.)  See Figure 72 for a plot of $\sum_{i=1}^{x}(\epsilon_{10}(\lfloor x/i \rfloor)-0.449)\sigma_{2}(i)$ for $x=1$, 2, 3, ..., 2000.  For a cubic least-squares fit of $\sum_{i=1}^{x}(\epsilon_{10}(\lfloor x/i \rfloor)-0.449)\sigma_{2}(i)$ for $x=1$, 2, 3, ..., 2000, SSE=5.258e+8, R-square=1, and RMSE=1623.  See Figure 73 for a plot of $\log(x!)$ and $23.9\sum_{i=1}^{x}(\epsilon_{10}(\lfloor x/i \rfloor)-0.449)\log(i)\sigma_{0}(i)/2$ (superimposed on each other) for $x=1$, 2, 3, ..., 2000.  See Figure 74 for a plot of $\frac{1}{x\log(x)}\sum_{i=1}^{x}(\epsilon_{10}(\lfloor x/i \rfloor)-0.449)\Lambda(i)$ for $x=2$, 3, 4, ..., 2000. \\

\noindent See Figure 75 for a plot of $\sum_{i=1}^{x}\epsilon_{1}(\lfloor x/i \rfloor)$ and $\sum_{i=1}^{x}\epsilon_{1}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 60.  The peaks and valleys of the two curves fall roughly at the same places.  See Figure 76 for a plot of $\sum_{i=1}^{x}\epsilon_{1}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 200.  For a linear least-squares fit of $\sum_{i=1}^{x}\epsilon_{1}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 200, $p_{1}=0.3944$ with a 95\% confidence interval of ($0.3902$, $0.3986$), $p_{2}=0.0452$ with a 95\% confidence interval of ($-0.4404$, 0.5308), SSE=595.7, R-square=0.9943, and RMSE=1.735.  See Figure 77 for a plot of $\sum_{i=1}^{x}\epsilon_{1}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 200.  For a linear least-squares fit of $\sum_{i=1}^{x}\epsilon_{1}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 200, $p_{1}=0.3877$ with a 95\% confidence interval of ($0.3818$, $0.3936$), $p_{2}=0.1115$ with a 95\% confidence interval of ($-0.5763$, 0.7992), SSE=1195, R-square=0.9882, and RMSE=2.457.  Let $p_{1}(n)$ and $p_{2}(n)$ denote the $p_{1}$ and $p_{2}$ values of the linear least-squares fits of $\sum_{i=1}^{x}\epsilon_{1}(\lfloor x/i \rfloor)$, $\sum_{i=1}^{x}\epsilon_{2}(\lfloor x/i \rfloor)$, $\sum_{i=1}^{x}\epsilon_{3}(\lfloor x/i \rfloor)$, ..., $\sum_{i=1}^{x}\epsilon_{40}(\lfloor x/i \rfloor)$ for respective $x$ values up to 200, 400, 600, ..., 8000.  See Figure 78 for a plot of $np_{1}(n)$ for $n=1$, 2, 3, ..., 40.  For a linear least-squares fit of $np_{1}(n)$ for $n=1$, 2, 3, ..., 40, $p_{1}=0.4693$ with a 95\% confidence interval of (0.4684, 0.4702), $p_{2}=-0.1823$ with a 95\% confidence interval of ($-0.2032$, $-0.1614$), SSE=0.03882, R-square=1, and RMSE=0.03196.  See Figure 79 for a plot of $np_{2}(n)$ for $n=1$, 2, 3, ..., 40.  For a linear least-squares fit of $np_{2}(n)$ for $n=1$, 2, 3, ..., 40, $p_{1}=0.02812$ with a 95\% confidence interval of (0.02778, 0.02846), $p_{2}=0.03972$ with a 95\% confidence interval of (0.03166, 0.04778), SSE=0.005795, R-square=0.9986, and RMSE=0.01235.  For a linear least-squares fit of $\sum_{i=1}^{x}\epsilon_{20}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 4000, the respective $p_{1}$ and $p_{2}$ values are 0.4588 and 0.02951.  For a linear least-squares fit of  $\sum_{i=1}^{x}\epsilon_{40}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 8000, the respective $p_{1}$ and $p_{2}$ values are 0.4658 and 0.0292.  See Figure 80 for a plot of $-0.4588x-0.02951+\sum_{i=1}^{x}\epsilon_{20}(\lfloor x/i \rfloor)$ (where each value is replicated twice) for $x=1$, 2, 3, ..., 1000 and $-0.4658x-0.0292+\sum_{i=1}^{x}\epsilon_{40}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 2000 (superimposed on each other).  The peaks and valleys of the two curves occur at the same places and have almost the same magnitudes.  See Figure 81 for a plot of the difference in the two curves.  Another way to compare such curves is to use the smoothed $p_{1}$ and $p_{2}$ values given by the linear least-squares fits of the $np_{1}$ and $np_{2}$ values (although the normalized value of $\sum_{i=1}^{x}\epsilon_{n}(\lfloor x/i \rfloor)$ may no longer be zero).  The smoothed $p_{1}(n)$ values (denoted by $p''_{1}(n)$) would then be $(0.4693n-0.1823)/n$ and the smoothed $p_{2}(n)$ values (denoted by $p''_{2}(n)$) would then be $(0.02812n+0.03972)/n$.  See Figure 82 for the curves corresponding to those given in Figure 80.  See Figure 83 for the difference in the two curves given by using the smoothed $p_{1}$ and $p_{2}$ values.  This approach is useful for determining the rate of growth of $-p_{1}(n)x-p_{2}(n)+\sum_{i=1}^{x}\epsilon_{n}(\lfloor x/i \rfloor)$ due to small errors in the estimation of the slope.  See Figure 84 for a plot of $-p''_{1}(10)x-p''_{2}(10)+\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)$ (where each value is replicated eight times) for $x=1$, 2, 3, ..., 105 and $-p''_{1}(80)x-p''_{2}(80)+\sum_{i=1}^{x}\epsilon_{80}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 840.  See Figure 85 for a plot of the difference between these two curves.  For a linear least-squares fit of the difference, $p_{1}=-0.00374$ with a 95\% confidence interval of ($-0.003762$, $-0.003718$), $p_{2}=0.005433$ with a 95\% confidence interval of ($-0.005125$, 0.01599), SSE=5.082, R-square=0.9927, and RMSE=0.07788.  When the $-p''_{1}(10)x-p''_{2}(10)+\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)$ values are replicated two, three, four, five, six, seven, and eight times and the corresponding differences are made, the respective $p_{1}$ values of the linear least-squares fits of the differences are 0.00036, $-0.0005961$, $-0.001535$, $-0.002293$, $-0.002793$, $-0.003305$, and $-0.00374$.  Denote these values by $d(1)$, $d(2)$, $d(3)$, $d(4)$, $d(5)$, $d(6)$, and $d(7)$.  See Figure 86 for a plot $\sqrt{d(1)-d(n)}$ versus $\log(n)$ for $n=1$, 2, 3, 4, 5, 6, and 7.  For a quadratic least-squares fit of these quantities, $p_{1}=-0.008379$ with a 95\% confidence interval of ($-0.01046$, $-0.0063$), $p_{2}=0.0487$ with a 95\% confidence interval of (0.04443, 0.05297), $p_{3}=0.0002989$ with a 95\% confidence interval of ($-0.00167$, 0.002268), SSE=2.122e-6, R-square=0.9993, and RMSE=0.0007284.   \\

\noindent  See Figure 87 for a plot of the $p_{1}$ values of the linear least-squares fits of $\sum_{i=1}^{x}\epsilon_{1}(\lfloor x/i \rfloor)\Lambda(i)$, $\sum_{i=1}^{x}\epsilon_{2}(\lfloor x/i \rfloor)\Lambda(i)$, $\sum_{i=1}^{x}\epsilon_{3}(\lfloor x/i \rfloor)\Lambda(i)$, ..., $\sum_{i=1}^{x}\epsilon_{40}(\lfloor x/i \rfloor)\Lambda(i)$ for respective $x$ values up to 200, 400, 600, ..., 8000.  Denote these $p_{1}$ values by $p'_{1}(n)$, $n=1$, 2, 3, ..., 40.  See Figure 88 for a plot of $np'_{1}(n)$ for $n=1$, 2, 3, ..., 40.  For a linear least-squares fit of $np'_{1}(n)$ for $n=1$, 2, 3, ..., 40, $p_{1}=0.4694$ with a 95\% confidence interval of (0.4685, 0.4703), $p_{2}=-0.1883$ with a 95\% confidence interval of ($-0.2099$, $-0.1666$), SSE=0.04194, R-square=1, and RMSE=0.03322.  See Figure 89 for a plot of the corresponding $p_{2}$ values (the intercepts are erratic and appear to oscillate around zero).  See Figure 90 for a plot of $-p'_{1}(10)x+\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)\Lambda(i)$ and $-p'_{1}(20)x+\sum_{i=1}^{x}\epsilon_{20}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 100.  The peaks and valleys of the two curves occur at the same places and have about the same magnitudes. See Figure 91 for a plot of $-p'_{1}(20)x+\sum_{i=1}^{x}\epsilon_{20}(\lfloor x/i \rfloor)\Lambda(i)+p'_{1}(10)x-\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)\Lambda(i)$,  $-p'_{1}(30)x+\sum_{i=1}^{x}\epsilon_{30}(\lfloor x/i \rfloor)\Lambda(i)+p'_{1}(10)x-\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)\Lambda(i)$,  $-p'_{1}(40)x+\sum_{i=1}^{x}\epsilon_{40}(\lfloor x/i \rfloor)\Lambda(i)+p'_{1}(10)x-\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)\Lambda(i)$, ...,  $-p'_{1}(100)x+\sum_{i=1}^{x}\epsilon_{100}(\lfloor x/i \rfloor)\Lambda(i)+p'_{1}(10)x-\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 60.  Denote the above values at $x=22$ by $d(n)$, $n=1$, 2, 3, ..., 9 respectively.  See Figure 92 for plot of $d(n)$ for $n=1$, 2, 3, ..., 9.  For a quadratic least-squares fit of $d(n)$, $n=1$, 2, 3, ..., 9, $p_{1}=0.001386$ with a 95\% confidence interval of (0.0009563, 0.001815), $p_{2}=-0.02663$ with a 95\% confidence interval of ($-0.03103$, $-0.02222$), $p_{3}=0.02756$ with a 95\% confidence interval of (0.01797, 0.03715), SSE=5.695e-5, R-square=0.9945, and RMSE=0.003081. Similar results valid for a larger range of $x$ values can be obtained by using smoothed $p'_{1}(n)$ values.  Denote the smoothed $p'_{1}(n)$ values (given by $(0.4694n-0.1883)/n$, $n=1$, 2, 3, ...) by $p''_{1}(n)$.  See Figure 93 for a plot of $-p''_{1}(20)x+\sum_{i=1}^{x}\epsilon_{20}(\lfloor x/i \rfloor)\Lambda(i)+p''_{1}(10)x-\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)\Lambda(i)$,  $-p''_{1}(30)x+\sum_{i=1}^{x}\epsilon_{30}(\lfloor x/i \rfloor)\Lambda(i)+p''_{1}(10)x-\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)\Lambda(i)$,  $-p''_{1}(40)x+\sum_{i=1}^{x}\epsilon_{40}(\lfloor x/i \rfloor)\Lambda(i)+p''_{1}(10)x-\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)\Lambda(i)$, ...,  $-p''_{1}(100)x+\sum_{i=1}^{x}\epsilon_{100}(\lfloor x/i \rfloor)\Lambda(i)+p''_{1}(10)x-\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 1000.  Denote the $p_{1}$ values of the linear least-squares fits of the above values by $d(n)$, $n=1$, 2, 3, ..., 9.  See Figure 94 for a plot of $\sqrt{d(n)}$ versus $\log(n)$ for $n=1$, 2, 3, ..., 9.  For a quadratic least-squares fit of these quantities, $p_{1}=-0.006534$ with a 95\% confidence interval of ($-0.007059$, $-0.006009$), $p_{2}=0.04247$ with a 95\% confidence interval of (.04122, 0.04371), $p_{3}=0.1372$ with a 95\% confidence interval of (0.1365, 0.1378), SSE=4.947e-7, R-square=0.9998, and RMSE=0.0002872.      \\

\noindent  See Figure 95 for a plot of $-p'_{1}(10)x+\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)\Lambda(i)$ (where each value is replicated eight times) for $x=1$, 2, 3, ..., 105 and $-p'_{1}(80)x+\sum_{i=1}^{x}\epsilon_{80}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 840.  See Figure 96 for a plot of the difference between these two curves.  For a linear least-squares fit of the difference, $p_{1}=-0.4147$ with a 95\% confidence interval of ($-0.4154$, $-0.4141$), $p_{2}=0.2906$ with a 95\% confidence interval of ($-0.007371$, 0.5886), SSE=4049, R-square=0.9995, and RMSE=2.198. When the $-p'_{1}(10)x+\sum_{i=1}^{x}\epsilon_{10}(\lfloor x/i \rfloor)\Lambda(i)$ values are replicated two, three, four, five, six, seven, and eight times and the corresponding differences are made, the respective $p_{1}$ values of the linear least-squares fits of the differences are $-0.2344$, $-0.3135$, $-0.3523$, $-0.3779$, $-0.3935$, $-0.4048$, and $-0.4147$.  (See Figure 97 for a plot of all these difference curves.)  Denote these values by $d(1)$, $d(2)$, $d(3)$, $d(4)$, $d(5)$, $d(6)$, and $d(7)$.  See Figure 98 for a plot $\sqrt{-d(n)}$ versus $\log(n)$ for $n=1$, 2, 3, 4, 5, 6, and 7.  For a quadratic least-squares fit of these quantities, $p_{1}=-0.02134$ with a 95\% confidence interval of ($-0.0229$, $-0.01978$), $p_{2}=0.1234$ with a 95\% confidence interval of (0.1202, 0.1266), $p_{3}=0.4843$ with a 95\% confidence interval of (0.4828, 0.4857), SSE=1.192e-6, R-square=0.9999, and RMSE=0.0005485.   

\section{Convolutions Involving Nontrivial Zeros of the Riemann Zeta Function}

\noindent  Let $\theta_{1}$, $\theta_{2}$, $\theta_{3}$, .... denote the imaginary parts of the nontrivial zeros of the zeta function.  Let $\kappa_{1}(1)$, $\kappa_{1}(2)$, $\kappa_{1}(3)$, ..., denote $\log(\theta_{1})$, $\log(\theta_{2})$, $\log(\theta_{3})$, ... and let $\kappa_{m}(x)$, $m=2$, 3, 4, ..., denote these values and $m-1$ values that have been linearly interpolated between successive values.  See Figure 99 for a plot of $\sum_{i=1}^{x}\kappa_{3}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 3000 (1001 zeta function zeros from Andrew Odlyzko's~\cite{ao} tables were used).  For a linear least-squares fit of $\sum_{i=1}^{x}\kappa_{3}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 3000, $p_{1}=2.894$ with a 95\% confidence interval of ($2.894$, $2.894$), $p_{2}=-1.927$ with a 95\% confidence interval of ($-1.943$, $-1.91$), SSE=163.6, R-square=1, and RMSE=0.2336.  See Figure 100 for a plot of $\sum_{i=1}^{x}(\kappa_{3}(\lfloor x/i \rfloor)-2.894)\log(i)$ for $x=1$, 2, 3, ..., 3000.  For a linear least-squares fit of $\sum_{i=1}^{x}(\kappa_{3}(\lfloor x/i \rfloor)-2.894)\log(i)$ for $x=1$, 2, 3, ..., 3000, $p_{1}=-0.4148$ with a 95\% confidence interval of ($-0.4149$, $-0.4148$), $p_{2}=2.761$ with a 95\% confidence interval of (2.708, 2.815), SSE=1649, R-square=1, and RMSE=0.7416.  See Figure 101 for a plot of $\sum_{i=1}^{x}(\kappa_{3}(\lfloor x/i \rfloor)-2.894)\sigma_{0}(i)$ for $x=1$, 2, 3, ..., 3000.  For a linear least-squares fit of $\sum_{i=1}^{x}(\kappa_{3}(\lfloor x/i \rfloor)-2.894)\sigma_{0}(i)$ for $x=1$, 2, 3, ..., 3000, $p_{1}=-0.4144$ with a 95\% confidence interval of ($-0.4145$, $-0.4143$), $p_{2}=0.2982$ with a 95\% confidence interval of (0.1788, 0.4176), SSE=8335, R-square=1, and RMSE=1.667.  See Figure 102 for a plot of $\sum_{i=1}^{x}(\kappa_{3}(\lfloor x/i \rfloor)-2.894)\sigma_{1}(i)$ for $x=1$, 2, 3, ..., 3000.  For a quadratic least-squares fit of $\sum_{i=1}^{x}(\kappa_{3}(\lfloor x/i \rfloor)-2.894)\sigma_{1}(i)$ for $x=1$, 2, 3, ..., 3000, $p_{1}=-0.1477$ with a 95\% confidence interval of ($-0.1477$, $-0.1477$), $p_{2}=-0.1478$ with a 95\% confidence interval of $-0.1566$, $-0.1389$), $p_{3}=0.08416$ with a 95\% confidence interval of ($-5.657$, 5.825),  SSE=8.553e+6, R-square=1, and RMSE=53.42.  (Note that the $p_{1}$ and $p_{2}$ values are almost equal.)  See Figure 103 for a plot of $\sum_{i=1}^{x}(\kappa_{3}(\lfloor x/i \rfloor)-2.894)\sigma_{2}(i)$ for $x=1$, 2, 3, ..., 3000.  For a cubic least-squares fit of $\sum_{i=1}^{x}(\kappa_{3}(\lfloor x/i \rfloor)-2.894)\sigma_{2}(i)$ for $x=1$, 2, 3, ..., 3000, SSE=2.891e+12, R-square=1, and RMSE=3.107e+4.  See Figure 104 for a plot of $-\log(x!)$ and $2.65\sum_{i=1}^{x}(\kappa_{3}(\lfloor x/i \rfloor)-2.894)\log(i)\sigma_{0}(i)/2$ (superimposed on each other) for $x=1$, 2, 3, ..., 3000.  See Figure 105 for a plot of $\frac{1}{x\log(x)}\sum_{i=1}^{x}(\kappa_{3}(\lfloor x/i \rfloor)-2.894)\Lambda(i)$ for $x=2$, 3, 4, ..., 3000.  Other than giving more accurate results, the convolutions of $\kappa_{m}(x)$ have the same properties as those of $\epsilon_{m}(x)$. \\ 

\noindent See Figure 106 for a plot of $\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)$ and $\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)\Lambda(i)$ (superimposed on each other) for $x=1$, 2, 3, ..., 20000 (20001 zeta function zeros were used).  For a linear least-squares fit of $\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 20000, $p_{1}=2.763$ with a 95\% confidence interval of ($2.763$, $2.763$), $p_{2}=-2.269$ with a 95\% confidence interval of ($-2.274$, $-2.263$), SSE=902.8, R-square=1, and RMSE=0.2125.  For a linear least-squares fit of $\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 20000, $p_{1}=2.762$ with a 95\% confidence interval of ($2.762$, $2.762$), $p_{2}=-5.208$ with a 95\% confidence interval of ($-6.866$, $-3.55$), SSE=7.153e+7, R-square=1, and RMSE=59.87.  See Figure 107 for a plot of the $p_{1}$ values of the linear least-squares fits of $\sum_{i=1}^{x}\kappa_{1}(\lfloor x/i \rfloor)$, $\sum_{i=1}^{x}\kappa_{2}(\lfloor x/i \rfloor)$, $\sum_{i=1}^{x}\kappa_{3}(\lfloor x/i \rfloor)$, ..., $\sum_{i=1}^{x}\kappa_{40}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 20000.  Denote these $p_{1}$ and $p_{2}$ values by $p_{1}(n)$ and $p_{2}(n)$, $n=1$, 2, 3, ..., 40.  See Figure 108 for a plot of $np_{1}(n)$ for $n=1$, 2, 3, ..., 40.  For a linear least-squares fit of $np_{1}(n)$ for $n=1$, 2, 3, ..., 40, $p_{1}=2.673$ with a 95\% confidence interval of (2.67, 2.676), $p_{2}=0.824$ with a 95\% confidence interval of (0.7509, 0.897), SSE=0.4766, R-square=1, and RMSE=0.112.  See Figure 109 for a plot of $np_{2}(n)$ for $n=1$, 2, 3, ..., 40.  For a quadratic least-squares fit of $np_{2}(n)$ for $n=1$, 2, 3, ..., 40, $p_{1}=0.01105$ with a 95\% confidence interval of (0.01016, 0.01194), $p_{2}=-2.136$ with a 95\% confidence interval of ($-2.173$, $-2.098$), $p_{3}=-2.03$ with a 95\% confidence interval of ($-2.364$, $-1.695$), SSE=4.042, R-square=0.9997, and RMSE=0.3305.  For a linear least-squares fit of $\sum_{i=1}^{x}\kappa_{20}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 20000, the respective $p_{1}$ and $p_{2}$ values are 2.719 and $-2.013$.  For a linear least-squares fit of  $\sum_{i=1}^{x}\kappa_{40}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 20000, the respective $p_{1}$ and $p_{2}$ values are 2.69 and $-1.757$.  See Figure 110 for a plot of $-2.719x+2.013+\sum_{i=1}^{x}\kappa_{20}(\lfloor x/i \rfloor)$ (where each value is replicated twice) for $x=1$, 2, 3, ..., 1000 and $-2.69x+1.757+\sum_{i=1}^{x}\kappa_{40}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 2000 (superimposed on each other).  See Figure 111 for a plot of the difference in the two curves.  For a linear least-squares fit of the difference, $p_{1}=-0.0004672$ with a 95\% confidence interval of ($-0.0004708$, $-0.0004636$), $p_{2}=0.2681$ with a 95\% confidence interval of (0.264, 0.2723), SSE=4.395, R-square=0.9707, and RMS=0.0469. The smoothed $p_{1}(n)$ values would be $(2.673n+0.824)/n$ and the smoothed $p_{2}(n)$ values would be $(0.01105n^{2}-2.136n-2.03)/n$, $n=1$, 2, 3, .....  Denote these values by $p''_{1}(n)$ and $p''_{2}(n)$, $n=1$, 2, 3, ....  See Figure 112 for the curves corresponding to those given in Figure 110.  See Figure 113 for a plot of the difference in the two curves.  For a linear least-squares fit of the difference, $p_{1}=0.005533$ with a 95\% confidence interval of (0.005529, 0.005536), $p_{2}=0.2851$ with a 95\% confidence interval of (0.2809, 0.2892), SSE=4.447, R-square=0.9998, and RMS=0.04718.  See Figure 114 for a plot of $-p''_{1}(10)x-p''_{2}(10)+\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)$ (where each value is replicated eight times) for $x=1$, 2, 3, ..., 105 and $-p''_{1}(80)x-p''_{2}(80)+\sum_{i=1}^{x}\kappa_{80}(\lfloor x/i \rfloor)$ for $x=1$, 2, 3, ..., 840.  See Figure 115 for a plot of the difference between these two curves.  For a linear least-squares fit of the difference, $p_{1}=0.01142$ with a 95\% confidence interval of (0.01141, 0.01144), $p_{2}=0.9804$ with a 95\% confidence interval of (0.9732, 0.9877), SSE=2.399, R-square=0.9996, and RMSE=0.0535.  When the $-p''_{1}(10)x-p''_{2}(10)+\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)$ values are replicated two, three, four, five, six, seven, and eight times and the corresponding differences are made, the respective $p_{1}$ values of the linear least-squares fits of the differences are $-0.0007004$, 0.002501, 0.005195, 0.007298, 0.008966, 0.01031, and 0.01142.  (See Figure 116 for a plot of these differences.)  Denote these values by $d(1)$, $d(2)$, $d(3)$, $d(4)$, $d(5)$, $d(6)$, and $d(7)$.  See Figure 117 for a plot $\sqrt{d(n)-d(1)}$ versus $\log(n)$ for $n=1$, 2, 3, 4, 5, 6, and 7.  For a quadratic least-squares fit of these quantities, $p_{1}=-0.01688$ with a 95\% confidence interval of ($-0.02197$, $-0.01179$), $p_{2}=0.08835$ with a 95\% confidence interval of (0.0779, 0.09879), $p_{3}=0.0008116$ with a 95\% confidence interval of ($-0.004008$, 0.005631), SSE=1.271e-5, R-square=0.9986, and RMSE=0.001783.  Note the similarly of this curve to the corresponding curve given in Figure 86.   \\ 

\noindent  See Figure 118 for a plot of the $p_{1}$ values of the linear least-squares fits of $\sum_{i=1}^{x}\kappa_{1}(\lfloor x/i \rfloor)\Lambda(i)$, $\sum_{i=1}^{x}\kappa_{2}(\lfloor x/i \rfloor)\Lambda(i)$, $\sum_{i=1}^{x}\kappa_{3}(\lfloor x/i \rfloor)\Lambda(i)$, ..., $\sum_{i=1}^{x}\kappa_{40}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 20000.  Denote these $p_{1}$ and $p_{2}$ values by $p'_{1}(n)$ and $p'_{2}(n)$, $n=1$, 2, 3, ..., 40.  See Figure 119 for a plot of $np'_{1}(n)$ for $n=1$, 2, 3, ..., 40.  For a linear least-squares fit of $np'_{1}(n)$ for $n=1$, 2, 3, ..., 40, $p_{1}=2.672$ with a 95\% confidence interval of (2.669, 2.675), $p_{2}=0.8237$ with a 95\% confidence interval of (0.7509, 0.8965), SSE=0.4732, R-square=1, and RMSE=0.1116.  See Figure 120 for a plot of $np'_{2}(n)$ for $n=1$, 2, 3, ..., 40.  For a quadratic least-squares fit of $np'_{2}(n)$ for $n=1$, 2, 3, ..., 40, $p_{1}=0.03749$ with a 95\% confidence interval of (0.03416, 0.04082), $p_{2}=-4.593$ with a 95\% confidence interval of ($-4.734$, $-4.452$), $p_{3}=-8.21$ with a 95\% confidence interval of ($-9.462$, $-6.958$), SSE=56.74, R-square=0.9989, and RMSE=1.238.  See Figure 121 for a plot of $-p'_{1}(10)x-p'_{2}(10)+\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)\Lambda(i)$ and $-p'_{1}(20)x-p'_{2}(20)+\sum_{i=1}^{x}\kappa_{20}(\lfloor x/i \rfloor)\Lambda(i)$ (superimposed on each other)  for $x=1$, 2, 3, ..., 100.  The peaks and valleys of the two curves occur at the same places and have almost the same magnitudes.  See Figure 122 for a plot of $-p'_{1}(20)x-p'_{2}(20)+\sum_{i=1}^{x}\kappa_{20}(\lfloor x/i \rfloor)\Lambda(i) +p'_{1}(10)x+p'_{2}(10)-\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 1000.  See Figure 123 for a plot of $-p'_{1}(20)x-p'_{2}(20)+\sum_{i=1}^{x}\kappa_{20}(\lfloor x/i \rfloor)\Lambda(i) +p'_{1}(10)x+p'_{2}(10)-\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)\Lambda(i)$, $-p'_{1}(30)x-p'_{2}(30)+\sum_{i=1}^{x}\kappa_{30}(\lfloor x/i \rfloor)\Lambda(i) +p'_{1}(10)x+p'_{2}(10)-\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)\Lambda(i)$, $-p'_{1}(40)x-p'_{2}(40)+\sum_{i=1}^{x}\kappa_{40}(\lfloor x/i \rfloor)\Lambda(i) +p'_{1}(10)x+p'_{2}(10)-\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)\Lambda(i)$, ..., $-p'_{1}(110)x-p'_{2}(110)+\sum_{i=1}^{x}\kappa_{110}(\lfloor x/i \rfloor)\Lambda(i) +p'_{1}(10)x+p'_{2}(10)-\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 2, ..., 100.  The beginning values of these curves (at $x=1$) are $-0.897$, $-1.432$, $-1.794$, $-2.061$, $-2.281$, $-2.466$, $-2.605$, $-2.757$, $-2.865$, and $-2.98$.  Denote these values by $d(n)$, $n=1$, 2, 3, ..., 10.  See Figure 124 for a plot of $nd(n)$ for $n=1$, 2, 3, ..., 10.  For a quadratic least-squares fit of $nd(n)$ for $n=1$, 2, 3, ..., 10, $p_{1}=-0.1129$ with a 95\% confidence interval of ($-0.1313$, $-0.09448$), $p_{2}=-2.015$ with a 95\% confidence interval of ($-2.222$, $-1.807$), $p_{3}=1.48$ with a 95\% confidence interval of (0.9831, 1.978), SSE=0.2239, R-square=0.9997, and RMSE=0.1788.    \\

\noindent The smoothed $p'_{1}(n)$ values would be $(2.672n+0.8237)/n$ and the smoothed $p'_{2}(n)$ values would be $(0.03749n^{2}-4.593n-8.21)/n$, $n=1$, 2, 3, .....  Denote these values by $p''_{1}(n)$ and $p''_{2}(n)$, $n=1$, 2, 3, ....  See Figure 125 for a plot of $-p''_{1}(20)x-p''_{2}(20)+\sum_{i=1}^{x}\kappa_{20}(\lfloor x/i \rfloor)\Lambda(i) +p''_{1}(10)x+p''_{2}(10)-\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)\Lambda(i)$,  $-p''_{1}(30)x-p''_{2}(30)+\sum_{i=1}^{x}\kappa_{30}(\lfloor x/i \rfloor)\Lambda(i) +p''_{1}(10)x+p''_{2}(10)-\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)\Lambda(i)$, $-p''_{1}(40)x-p''_{2}(40)+\sum_{i=1}^{x}\kappa_{40}(\lfloor x/i \rfloor)\Lambda(i) +p''_{1}(10)x+p''_{2}(10)-\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)\Lambda(i)$, ..., and $-p''_{1}(110)x-p''_{2}(110)+\sum_{i=1}^{x}\kappa_{110}(\lfloor x/i \rfloor)\Lambda(i) +p''_{1}(10)x+p''_{2}(10)-\sum_{i=1}^{x}\kappa_{10}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 1000.  The endpoints of these curves (at $x=1000$) are 2.4217, 6.3512, 9.3307, 11.5999, 13.3032, 14.6796, 15.7949, 16.7158, 17.5157, and 18.1906.  Denote these values by $d(n)$, $d=1$, 2, 3, ..., 10.  See Figure 126 for a plot of $\sqrt{d(n)}$ versus $\log(n)$ for $n=1$, 2, 3, ..., 10.  For a quadratic least-squares fit of these quantities, $p_{1}=-0.1618$ with a 95\% confidence interval of ($-0.1853$, $-0.1382$), $p_{2}=1.56$ with a 95\% confidence interval of (1.501, 1.619), $p_{3}=1.544$ with a 95\% confidence interval of (1.51, 1.578), SSE=0.001596, R-square=0.9998, and RMSE=0.0151. 

\section{A Common Function} 

\noindent  For a linear least-squares fit of $\sum_{i=1}^{x}\kappa_{100}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 20000 (where 20001 zeta function zeros are used), $p_{1}=2.668$ and $p_{2}=-2.249$.  For a linear least-squares fit of $\sum_{i=1}^{x}\epsilon_{100}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 20000 (where 201 limits are used), $p_{1}=0.4714$ and $p_{2}=0.7928$.  See Figure 127 for a plot of $-2.668x+\sum_{i=1}^{x}\kappa_{100}(\lfloor x/i \rfloor)\Lambda(i)$ and $5.66(-0.4714x+\sum_{i=1}^{x}\epsilon_{100}(\lfloor x/i \rfloor)\Lambda(i))$ for $x=1$, 2, 3, ..., 100.  The peaks and valleys of the two curves occur at the same places and have almost the same magnitudes.  Only 201 approximate limits of unknown accuracy were used for the $\epsilon$ convolutions (double-precision floating point arithmetic was used, but any effect due to cumulative errors wasn't investigated).  This may account for the erratic $y$-intercepts in some of the linear least-squares fits.  Even when 1001 zeta function zeros are used, the $y$-intercepts in some of the linear least-squares fits of the $\kappa$ convolutions are erratic.  For a linear least-squares fit of $\sum_{i=1}^{x}\epsilon_{100}(\lfloor x/i \rfloor)\Lambda(i)$ for $x=1$, 2, 3, ..., 10000 where 101 of the limits corresponding to the imaginary parts of the Dirichlet character modulo 11 are used, $p_{1}=-1.628$ and $p_{2}=3.075$.  See Figure 128 for a plot of $-2.668x+\sum_{i=1}^{x}\kappa_{100}(\lfloor x/i \rfloor)\Lambda(i)$ and $-1.64(1.628x+\sum_{i=1}^{x}\epsilon_{100}(\lfloor x/i \rfloor)\Lambda(i))$ for $x=1$, 2, 3, ..., 100.  The peaks and valleys of the two curves occur at the same places and have almost the same magnitudes.  

\section{Materials and Methods}

\noindent  Software for computing the $\epsilon$ and $\kappa$ (and other) convolutions is attached.  The 201 limits for the $\epsilon$ convolutions are included.  

\begin{thebibliography}{99}

\bibitem{fl} Franel, J. and Landau, E., Les suites de Farey et le probl\`{e}me des nombres premiers, \emph{G\"{o}ttinger Nachr.}, 198-206 (1924)

\bibitem{li} Littlewood, J. E., Quelques cons$\acute{e}$quences de l'hypoth$\grave{e}$se que la fonction $\zeta(s)$ n'a pas de z$\grave{e}$ros dans le demi-plan Re$(s)>1/2$. \emph{C. R. Acad. Sci. Paris} \bfseries 154\normalfont, 263-266 (1912)

\bibitem{or} Odlyzko, A. M., and te Riele, H. J. J., Disproof of the Mertens Conjecture, \emph{ Journal f\"{u}r die reine und angewandte Mathematik}, \bfseries 357\normalfont:138-160 (1985)

\bibitem{k} Kanemitsu, S. and Yoshimoto, M., Farey series and the Riemann hypothesis, \emph{Acta Arith.}, \normalfont LXXV.4 (1996)

\bibitem{m1} Mertens, F., \"{U}ber eine zahlentheoretische Funktion, \emph{Akademie Wissenschaftlicher Wien Mathematik-Naturlich Kleine Sitzungsber}, \bfseries 106 \normalfont (1897) 761-830

\bibitem{ta} Apostol, T. M., \emph{Introduction to Analytic Number Theory}, Springer, 1976

\bibitem{c} Cox, D., ``www.darrellcox.website/bound.pdf"

\bibitem{h} Haselgrove, C. B., A disproof of a conjecture of P$\acute{o}$lya, \emph{Mathematika}, \bfseries 5\normalfont(02), 141-145 (1958)

\bibitem{ao} Odlyzko, A. M., ``www.dtc.umn.edu/~odlyZko/zeta$-$tables/index.html"

\end{thebibliography}

\end{document}