From b0569c5058776517bd2417986d7588c9b9f50dc2 Mon Sep 17 00:00:00 2001 From: Santo Cariotti Date: Mon, 17 Jun 2024 13:53:03 +0200 Subject: eof --- conclusion.tex | 2 +- content.tex | 7 ++++--- edge.tex | 12 ++++++++++++ k8s.tex | 2 +- main.tex | 3 ++- tests.tex | 30 ++++++++++++++++++++++-------- 6 files changed, 42 insertions(+), 14 deletions(-) create mode 100644 edge.tex diff --git a/conclusion.tex b/conclusion.tex index 53141cc..8c1b624 100644 --- a/conclusion.tex +++ b/conclusion.tex @@ -1,6 +1,6 @@ \begin{frame}{Conclusion} \begin{itemize} \item Kubespray takes longer to instantiate new function instances. - \item K3s and MicroK8s, removing unnecessary components, have reduced the deployment time and complexity, improving performance for the majority of serverless edge workloads. \pause \textbf{\alert{We can say that they are comparable.}} + \item K3s and MicroK8s, removing unnecessary components, have reduced the deployment time and complexity, improving performance for the majority of serverless edge workloads. \pause \textbf{\alert{We can say that they are comparable for out serverless performance evaluation.}} \end{itemize} \end{frame} \ No newline at end of file diff --git a/content.tex b/content.tex index 0b2b574..b326ab1 100644 --- a/content.tex +++ b/content.tex @@ -2,9 +2,10 @@ \begin{enumerate} \item<1-> What is Kubernetes? - \item<2-> What is Serverless? - \item<3-> How can we combine both? - \item<4-> Which K8s distribution is more efficient for serveless development? + \item<2-> What is "the edge"? + \item<3-> What is Serverless? + \item<4-> How can we combine them? + \item<5-> Which K8s distribution is more efficient for serverless development? \end{enumerate} \end{frame} \ No newline at end of file diff --git a/edge.tex b/edge.tex new file mode 100644 index 0000000..065a7dc --- /dev/null +++ b/edge.tex @@ -0,0 +1,12 @@ +% -------------------- frame 1 ------------------- +\begin{frame}{Edge computing} + +We make deal with low-powered and resource constrained device so to have: + +\begin{itemize} +\item<1-> Faster response times for latency sensitive apps. +\item<2-> Increase user privacy by sending only filtered data upstream to the cloud. +\item<3-> Reduced network congestion. +\end{itemize} + +\end{frame} \ No newline at end of file diff --git a/k8s.tex b/k8s.tex index bd3a7bc..974a52c 100644 --- a/k8s.tex +++ b/k8s.tex @@ -4,7 +4,7 @@ \centering \includegraphics[width=0.2\linewidth]{static/Kubernetes_logo_without_workmark.svg.png} \end{figure} - The development was started by Google in 2014, but is now developed by Cloud Native Computing Foundation. + The development was started by Google in 2014, but is now mantained by Cloud Native Computing Foundation. It is the most widely used container orchestrator. \end{frame} diff --git a/main.tex b/main.tex index 401f8fe..1db0766 100644 --- a/main.tex +++ b/main.tex @@ -18,13 +18,14 @@ \title[Kubernetes distributions for the edge: serverless performance evaluation]{Kubernetes distributions for the edge: serverless performance evaluation \footnotesize{[1]}} \author[]{Santo Cariotti} -\date[]{University of Bologna, 2024-06-17} +\date[]{University of Bologna\\ Jun 17, 2024} \begin{document} \frame{\titlepage} \input{content} \input{k8s} +\input{edge} \input{serverless} \input{tests} \input{conclusion} diff --git a/tests.tex b/tests.tex index a7dd0d4..4469c2c 100644 --- a/tests.tex +++ b/tests.tex @@ -33,6 +33,18 @@ Let's choose 14 tests from the FunctionBench serverless benchmarking suite [4]. 5 tests for each Kubernetes distribution. Executed using the recommended OpenFaaS of-watchdog template for Python 3.7. +\pause + +We'll see performances for: + +\begin{itemize} + \item Cold start + \item Serial execution + \item Parallel execution using a single replica + \item Parallel execution using native OpenFaaS auto scaling + \item Parallel execution using Kubernetes HPA +\end{itemize} + \end{frame} % -- frame 4 -- @@ -57,13 +69,13 @@ Kubespray exhibits a 15\% increase compared to both K3s and MicroK8s. Is the performance difference between K3s and MicroK8s statistically significant? \pause -Using a Mann-Whitney U test with \(\alpha = 0.05\) and +Using a Mann-Whitney \(U\) test with \(\alpha = 0.05\) and \begin{itemize} \item H0: the two populations are equal - \item H0: the two populations are not equal + \item H1: the two populations are not equal \end{itemize} -we have a \(p\)-value = 0.202, so we can't reject the null hypothesis. +we have a \(p\)-value \(= 0.202 > 0.05\), so we can't reject the null hypothesis. \end{frame} @@ -84,16 +96,17 @@ Once again, Kubespray results are slower than both K3s and MicroK8s. % -- frame 9 -- \begin{frame}{Serial execution performance — Results} -Is the performance difference between K3s and MicroK8s statistically significant? +How can I compare these three results? \pause Using a Kruskal-Wallis test with \(\alpha = 0.05\) and \begin{itemize} \item H0: the population medians are equal - \item H0: the population medians are not equal + \item H1: the population medians are not equal \end{itemize} -where the null hypothesis failed to be rejected for the video-processing test. Keeping the same hypothesis we can perform the Mann-Whitney U test where, this time, the null hypothesis is rejected in 10 of the 14 tests. The null hypothesis can't be rejected for 3 CPU and 1 network benchmarks. +where the null hypothesis failed to be rejected for the video-processing test. \pause +\\ Keeping the same hypothesis we can perform the Mann-Whitney \(U\) test for both K3s and MicroK8s and, this time, the null hypothesis is rejected in 10 of the 14 tests. The null hypothesis can't be rejected for 3 CPU and 1 network benchmarks. \end{frame} % -- frame 10 -- @@ -116,13 +129,14 @@ This time Kubespray has better performance than both K3s and MicroK8s for 6 of t \begin{frame}{Parallel execution using native OpenFaaS auto scaling} Each function is invoked for a fixed amount of time using varying concurrency to determine the performance of the auto-scaling behavior. \pause +\\ It tests the number of successful invocations per second for the last 10 seconds: if the number is larger than 5, it scales up the number of function instances up to a preconfigured maximum. \end{frame} % -- frame 13 -- \begin{frame}{Parallel execution using native OpenFaaS auto scaling — Results} -Performances are tests by 1 request per second from 6 concurrent workers for more than 200 seconds, successfully reaching the defined threshold for the maximum number of replicas. +Performances are tests by 1 req/s from 6 concurrent workers for more than 200s, successfully reaching the defined threshold for the maximum number of replicas.\\ The current number of deployed replicas are not taken but this leads to suboptimal scaling decision scaling to maximum number of configured replicas or not scaling at all under a consistent load. \end{frame} @@ -142,7 +156,7 @@ We find out results for three different execution strategy: % -- frame 15 -- \begin{frame}{Parallel execution using Kubernetes Horizonal Pod Autoscaler — Results} -Kubespray exhibits higher response times across the three tests, while the results obtained from K3S and MicroK8s are similar. +Kubespray exhibits higher response times across the three tests, while the results obtained from K3sand MicroK8s are similar. \begin{figure} \centering -- cgit v1.2.3-71-g8e6c