diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index acd890f1c9480d3ccd28e73da94d7f7b34a0a1dd..df7fc56db817887545e0927bcb5d49e093e82223 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -19,6 +19,7 @@ build_pdf:
       - "output/${Name}.pdf"
   only:
     - main
+    - develop
 
 convert_md:
   stage: convert
diff --git a/Fazeli_Shahroudi-Sepehr-Mastersthesis.bbl b/Fazeli_Shahroudi-Sepehr-Mastersthesis.bbl
new file mode 100644
index 0000000000000000000000000000000000000000..65456746d19ed3be1137a159132ea399f31520f0
--- /dev/null
+++ b/Fazeli_Shahroudi-Sepehr-Mastersthesis.bbl
@@ -0,0 +1,31 @@
+% Generated by IEEEtran.bst, version: 1.14 (2015/08/26)
+\begin{thebibliography}{1}
+\providecommand{\url}[1]{#1}
+\csname url@samestyle\endcsname
+\providecommand{\newblock}{\relax}
+\providecommand{\bibinfo}[2]{#2}
+\providecommand{\BIBentrySTDinterwordspacing}{\spaceskip=0pt\relax}
+\providecommand{\BIBentryALTinterwordstretchfactor}{4}
+\providecommand{\BIBentryALTinterwordspacing}{\spaceskip=\fontdimen2\font plus
+\BIBentryALTinterwordstretchfactor\fontdimen3\font minus
+  \fontdimen4\font\relax}
+\providecommand{\BIBforeignlanguage}[2]{{%
+\expandafter\ifx\csname l@#1\endcsname\relax
+\typeout{** WARNING: IEEEtran.bst: No hyphenation pattern has been}%
+\typeout{** loaded for the language `#1'. Using the pattern for}%
+\typeout{** the default language instead.}%
+\else
+\language=\csname l@#1\endcsname
+\fi
+#2}}
+\providecommand{\BIBdecl}{\relax}
+\BIBdecl
+
+\bibitem{ferreira_generic_2024}
+D.~Ferreira, F.~Moutinho, J.~P. Matos-Carvalho, M.~Guedes, and P.~Deusdado,
+  ``\BIBforeignlanguage{eng}{Generic {FPGA} {Pre}-{Processing} {Image}
+  {Library} for {Industrial} {Vision} {Systems}},''
+  \emph{\BIBforeignlanguage{eng}{Sensors (Basel, Switzerland)}}, vol.~24,
+  no.~18, p. 6101, Sep. 2024.
+
+\end{thebibliography}
diff --git a/Fazeli_Shahroudi-Sepehr-Mastersthesis.tex b/Fazeli_Shahroudi-Sepehr-Mastersthesis.tex
index ffd868b3d2c7b56ef447635df4fae57ec2abf5ec..a2f84ba185ee5e6e3b6d06e2e3bb5e2b4bd53862 100644
--- a/Fazeli_Shahroudi-Sepehr-Mastersthesis.tex
+++ b/Fazeli_Shahroudi-Sepehr-Mastersthesis.tex
@@ -19,6 +19,7 @@
 \usepackage[T1]{fontenc}
 \usepackage{textcomp}
 \usepackage{enumitem}
+\usepackage{multirow}
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 \newcommand{\thesistitleDE}{Vergleichende Evaluierung von Bildverarbeitungsbibliotheken für industrielle Anwendungen bei Dassault Systems}
 \newcommand{\thesistitleEN}{Comparative Evaluation of Image Processing Libraries for Industrial Applications at Dassault Systems}
@@ -32,12 +33,74 @@
 \newcommand{\degree}{Master of Engineering (M.Eng.)}
 % \let\oldsection\section
 % \renewcommand{\section}{\clearpage\oldsection}
-\let\oldlongtable\longtable
-\let\endoldlongtable\endlongtable
-\renewcommand{\endlongtable}{\endoldlongtable\clearpage}
+% \let\oldlongtable\longtable
+% \let\endoldlongtable\endlongtable
+% \renewcommand{\endlongtable}{\endoldlongtable\clearpage}
 % \let\oldsubsection\subsection
 % \renewcommand{\subsection}{\clearpage\oldsubsection}
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\lstloadlanguages{C,C++,csh,Java}
+
+\definecolor{red}{rgb}{0.6,0,0} 
+\definecolor{blue}{rgb}{0,0,0.6}
+\definecolor{green}{rgb}{0,0.8,0}
+\definecolor{cyan}{rgb}{0.0,0.6,0.6}
+\definecolor{cloudwhite}{rgb}{0.9412, 0.9608, 0.8471}
+
+\lstset{
+language=csh,
+basicstyle=\footnotesize\ttfamily,
+numbers=left,
+numberstyle=\tiny,
+numbersep=5pt,
+tabsize=2,
+extendedchars=true,
+breaklines=true,
+frame=b,
+stringstyle=\color{blue}\ttfamily,
+showspaces=false,
+showtabs=false,
+xleftmargin=17pt,
+framexleftmargin=17pt,
+framexrightmargin=5pt,
+framexbottommargin=4pt,
+commentstyle=\color{green},
+morecomment=[l]{//}, %use comment-line-style!
+morecomment=[s]{/*}{*/}, %for multiline comments
+showstringspaces=false,
+morekeywords={ abstract, event, new, struct,
+as, explicit, null, switch,
+base, extern, object, this,
+bool, false, operator, throw,
+break, finally, out, true,
+byte, fixed, override, try,
+case, float, params, typeof,
+catch, for, private, uint,
+char, foreach, protected, ulong,
+checked, goto, public, unchecked,
+class, if, readonly, unsafe,
+const, implicit, ref, ushort,
+continue, in, return, using,
+decimal, int, sbyte, virtual,
+default, interface, sealed, volatile,
+delegate, internal, short, void,
+do, is, sizeof, while,
+double, lock, stackalloc,
+else, long, static,
+enum, namespace, string},
+keywordstyle=\color{cyan},
+identifierstyle=\color{red},
+backgroundcolor=\color{cloudwhite},
+}
+
+\usepackage{caption}
+\DeclareCaptionFont{white}{\color{white}}
+\DeclareCaptionFormat{listing}{\colorbox{blue}{\parbox{\textwidth}{\hspace{5pt}#1#2#3}}}
+\DeclareCaptionFormat{listingbullet}{\hspace{27.5pt}\colorbox{blue}{\parbox{0.931\textwidth}{\hspace{5pt}#1#2#3}}}
+\captionsetup[lstlisting]{format=listing,labelfont=white,textfont=white, 
+    singlelinecheck=false, margin=0pt, font={bf,footnotesize}}
+\renewcommand\lstlistingname{Code}
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 \clearpairofpagestyles
 \ihead{\headmark}
 \ofoot*{\pagemark}
@@ -51,41 +114,52 @@
 \pagenumbering{roman}
 \input{./sources/title.tex}
 \input{./sources/declaration.tex}
-\input{./sources/Abstract.tex}
-
-
-
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
 \tableofcontents
 
 \cleardoublepage%
 \setcounter{page}{1}
 \pagenumbering{arabic}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
 % \input{main}
+\input{./sources/Abstract.tex}
 
 % \chapter{Introduction}
-\input{chapters/1-Introduction.tex} % Separate file or replace with content directly here
+\input{chapters/1-Introduction.tex}
 
 % \chapter{Literature Review}
-\input{chapters/2-Literature_Review.tex} % Background or Literature Review
+% \input{chapters/2-Literature_Review.tex}
 
 % \chapter{Methodology}
-\input{chapters/3-Methodology.tex} % Methods or approach to the research
+% \input{chapters/3-Methodology.tex}
+
+% \chapter{Methodology}
+\input{chapters/2-Methodology.tex}
+
+% \chapter{Implementation}
+\input{chapters/3-Implementation.tex}
+
+% \chapter{Results}
+\input{chapters/4-Results.tex}
+
+% \chapter{Discussion}
+\input{chapters/5-Discussion.tex}
 
 % \chapter{Evaluation of Alternatives}
-\input{chapters/4-Evaluation_of_Alternatives.tex} % Findings or data analysis
+% \input{chapters/4-Evaluation_of_Alternatives.tex}
 
 % \chapter{Analysis and Discussion}
-\input{chapters/5-Analysis_and_Discussion.tex} % Analysis of the results
+% \input{chapters/5-Analysis_and_Discussion.tex}
 
 % \chapter{Conclusion and Recommendations}
-\input{chapters/6-Conclusion_and_Recommendations.tex} % Summary and future work
+% \input{chapters/6-Conclusion_and_Recommendations.tex}
 
-% Appendices (Optional)
-\appendix
 % \chapter{Appendices}
-\input{chapters/Appendices.tex} % Additional information, data, or figures
+\input{chapters/Appendices.tex} 
 
 \bibliographystyle{IEEEtran}
-\bibliography{references}
+\bibliography{./sources/references.bib}
 \end{document}
\ No newline at end of file
diff --git a/IEEEtran.bst b/IEEEtran.bst
new file mode 100644
index 0000000000000000000000000000000000000000..f9c03d79f4fb8df4cd260e69258dbba3bb4dfd04
--- /dev/null
+++ b/IEEEtran.bst
@@ -0,0 +1,2409 @@
+%%
+%% IEEEtran.bst
+%% BibTeX Bibliography Style file for IEEE Journals and Conferences (unsorted)
+%% Version 1.14 (2015/08/26)
+%% 
+%% Copyright (c) 2003-2015 Michael Shell
+%% 
+%% Original starting code base and algorithms obtained from the output of
+%% Patrick W. Daly's makebst package as well as from prior versions of
+%% IEEE BibTeX styles:
+%% 
+%% 1. Howard Trickey and Oren Patashnik's ieeetr.bst  (1985/1988)
+%% 2. Silvano Balemi and Richard H. Roy's IEEEbib.bst (1993)
+%% 
+%% Support sites:
+%% http://www.michaelshell.org/tex/ieeetran/
+%% http://www.ctan.org/pkg/ieeetran
+%% and/or
+%% http://www.ieee.org/
+%% 
+%% For use with BibTeX version 0.99a or later
+%%
+%% This is a numerical citation style.
+%% 
+%%*************************************************************************
+%% Legal Notice:
+%% This code is offered as-is without any warranty either expressed or
+%% implied; without even the implied warranty of MERCHANTABILITY or
+%% FITNESS FOR A PARTICULAR PURPOSE! 
+%% User assumes all risk.
+%% In no event shall the IEEE or any contributor to this code be liable for
+%% any damages or losses, including, but not limited to, incidental,
+%% consequential, or any other damages, resulting from the use or misuse
+%% of any information contained here.
+%%
+%% All comments are the opinions of their respective authors and are not
+%% necessarily endorsed by the IEEE.
+%%
+%% This work is distributed under the LaTeX Project Public License (LPPL)
+%% ( http://www.latex-project.org/ ) version 1.3, and may be freely used,
+%% distributed and modified. A copy of the LPPL, version 1.3, is included
+%% in the base LaTeX documentation of all distributions of LaTeX released
+%% 2003/12/01 or later.
+%% Retain all contribution notices and credits.
+%% ** Modified files should be clearly indicated as such, including  **
+%% ** renaming them and changing author support contact information. **
+%%*************************************************************************
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% DEFAULTS FOR THE CONTROLS OF THE BST STYLE %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+% These are the defaults for the user adjustable controls. The values used
+% here can be overridden by the user via IEEEtranBSTCTL entry type.
+
+% NOTE: The recommended LaTeX command to invoke a control entry type is:
+% 
+%\makeatletter
+%\def\bstctlcite{\@ifnextchar[{\@bstctlcite}{\@bstctlcite[@auxout]}}
+%\def\@bstctlcite[#1]#2{\@bsphack
+%  \@for\@citeb:=#2\do{%
+%    \edef\@citeb{\expandafter\@firstofone\@citeb}%
+%    \if@filesw\immediate\write\csname #1\endcsname{\string\citation{\@citeb}}\fi}%
+%  \@esphack}
+%\makeatother
+%
+% It is called at the start of the document, before the first \cite, like:
+% \bstctlcite{IEEEexample:BSTcontrol}
+%
+% IEEEtran.cls V1.6 and later does provide this command.
+
+
+
+% #0 turns off the display of the number for articles.
+% #1 enables
+FUNCTION {default.is.use.number.for.article} { #1 }
+
+
+% #0 turns off the display of the paper and type fields in @inproceedings.
+% #1 enables
+FUNCTION {default.is.use.paper} { #1 }
+
+
+% #0 turns off the display of urls
+% #1 enables
+FUNCTION {default.is.use.url} { #1 }
+
+
+% #0 turns off the forced use of "et al."
+% #1 enables
+FUNCTION {default.is.forced.et.al} { #0 }
+
+
+% The maximum number of names that can be present beyond which an "et al."
+% usage is forced. Be sure that num.names.shown.with.forced.et.al (below)
+% is not greater than this value!
+% Note: There are many instances of references in IEEE journals which have
+% a very large number of authors as well as instances in which "et al." is
+% used profusely.
+FUNCTION {default.max.num.names.before.forced.et.al} { #10 }
+
+
+% The number of names that will be shown with a forced "et al.".
+% Must be less than or equal to max.num.names.before.forced.et.al
+FUNCTION {default.num.names.shown.with.forced.et.al} { #1 }
+
+
+% #0 turns off the alternate interword spacing for entries with URLs.
+% #1 enables
+FUNCTION {default.is.use.alt.interword.spacing} { #1 }
+
+
+% If alternate interword spacing for entries with URLs is enabled, this is
+% the interword spacing stretch factor that will be used. For example, the
+% default "4" here means that the interword spacing in entries with URLs can
+% stretch to four times normal. Does not have to be an integer. Note that
+% the value specified here can be overridden by the user in their LaTeX
+% code via a command such as: 
+% "\providecommand\BIBentryALTinterwordstretchfactor{1.5}" in addition to
+% that via the IEEEtranBSTCTL entry type.
+FUNCTION {default.ALTinterwordstretchfactor} { "4" }
+
+
+% #0 turns off the "dashification" of repeated (i.e., identical to those
+% of the previous entry) names. The IEEE normally does this.
+% #1 enables
+FUNCTION {default.is.dash.repeated.names} { #1 }
+
+
+% The default name format control string.
+FUNCTION {default.name.format.string}{ "{f.~}{vv~}{ll}{, jj}" }
+
+
+% The default LaTeX font command for the names.
+FUNCTION {default.name.latex.cmd}{ "" }
+
+
+% The default URL prefix.
+FUNCTION {default.name.url.prefix}{ "[Online]. Available:" }
+
+
+% Other controls that cannot be accessed via IEEEtranBSTCTL entry type.
+
+% #0 turns off the terminal startup banner/completed message so as to
+% operate more quietly.
+% #1 enables
+FUNCTION {is.print.banners.to.terminal} { #1 }
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% FILE VERSION AND BANNER %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+FUNCTION{bst.file.version} { "1.14" }
+FUNCTION{bst.file.date} { "2015/08/26" }
+FUNCTION{bst.file.website} { "http://www.michaelshell.org/tex/ieeetran/bibtex/" }
+
+FUNCTION {banner.message}
+{ is.print.banners.to.terminal
+     { "-- IEEEtran.bst version" " " * bst.file.version *
+       " (" * bst.file.date * ") " * "by Michael Shell." *
+       top$
+       "-- " bst.file.website *
+       top$
+       "-- See the " quote$ * "IEEEtran_bst_HOWTO.pdf" * quote$ * " manual for usage information." *
+       top$
+     }
+     { skip$ }
+   if$
+}
+
+FUNCTION {completed.message}
+{ is.print.banners.to.terminal
+     { ""
+       top$
+       "Done."
+       top$
+     }
+     { skip$ }
+   if$
+}
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%
+%% STRING CONSTANTS %%
+%%%%%%%%%%%%%%%%%%%%%%
+
+FUNCTION {bbl.and}{ "and" }
+FUNCTION {bbl.etal}{ "et~al." }
+FUNCTION {bbl.editors}{ "eds." }
+FUNCTION {bbl.editor}{ "ed." }
+FUNCTION {bbl.edition}{ "ed." }
+FUNCTION {bbl.volume}{ "vol." }
+FUNCTION {bbl.of}{ "of" }
+FUNCTION {bbl.number}{ "no." }
+FUNCTION {bbl.in}{ "in" }
+FUNCTION {bbl.pages}{ "pp." }
+FUNCTION {bbl.page}{ "p." }
+FUNCTION {bbl.chapter}{ "ch." }
+FUNCTION {bbl.paper}{ "paper" }
+FUNCTION {bbl.part}{ "pt." }
+FUNCTION {bbl.patent}{ "Patent" }
+FUNCTION {bbl.patentUS}{ "U.S." }
+FUNCTION {bbl.revision}{ "Rev." }
+FUNCTION {bbl.series}{ "ser." }
+FUNCTION {bbl.standard}{ "Std." }
+FUNCTION {bbl.techrep}{ "Tech. Rep." }
+FUNCTION {bbl.mthesis}{ "Master's thesis" }
+FUNCTION {bbl.phdthesis}{ "Ph.D. dissertation" }
+FUNCTION {bbl.st}{ "st" }
+FUNCTION {bbl.nd}{ "nd" }
+FUNCTION {bbl.rd}{ "rd" }
+FUNCTION {bbl.th}{ "th" }
+
+
+% This is the LaTeX spacer that is used when a larger than normal space
+% is called for (such as just before the address:publisher).
+FUNCTION {large.space} { "\hskip 1em plus 0.5em minus 0.4em\relax " }
+
+% The LaTeX code for dashes that are used to represent repeated names.
+% Note: Some older IEEE journals used something like
+% "\rule{0.275in}{0.5pt}\," which is fairly thick and runs right along
+% the baseline. However, the IEEE now uses a thinner, above baseline,
+% six dash long sequence.
+FUNCTION {repeated.name.dashes} { "------" }
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% PREDEFINED STRING MACROS %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+MACRO {jan} {"Jan."}
+MACRO {feb} {"Feb."}
+MACRO {mar} {"Mar."}
+MACRO {apr} {"Apr."}
+MACRO {may} {"May"}
+MACRO {jun} {"Jun."}
+MACRO {jul} {"Jul."}
+MACRO {aug} {"Aug."}
+MACRO {sep} {"Sep."}
+MACRO {oct} {"Oct."}
+MACRO {nov} {"Nov."}
+MACRO {dec} {"Dec."}
+
+
+
+%%%%%%%%%%%%%%%%%%
+%% ENTRY FIELDS %%
+%%%%%%%%%%%%%%%%%%
+
+ENTRY
+  { address
+    assignee
+    author
+    booktitle
+    chapter
+    day
+    dayfiled
+    edition
+    editor
+    howpublished
+    institution
+    intype
+    journal
+    key
+    language
+    month
+    monthfiled
+    nationality
+    note
+    number
+    organization
+    pages
+    paper
+    publisher
+    school
+    series
+    revision
+    title
+    type
+    url
+    volume
+    year
+    yearfiled
+    CTLuse_article_number
+    CTLuse_paper
+    CTLuse_url
+    CTLuse_forced_etal
+    CTLmax_names_forced_etal
+    CTLnames_show_etal
+    CTLuse_alt_spacing
+    CTLalt_stretch_factor
+    CTLdash_repeated_names
+    CTLname_format_string
+    CTLname_latex_cmd
+    CTLname_url_prefix
+  }
+  {}
+  { label }
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%
+%% INTEGER VARIABLES %%
+%%%%%%%%%%%%%%%%%%%%%%%
+
+INTEGERS { prev.status.punct this.status.punct punct.std
+           punct.no punct.comma punct.period 
+           prev.status.space this.status.space space.std
+           space.no space.normal space.large
+           prev.status.quote this.status.quote quote.std
+           quote.no quote.close
+           prev.status.nline this.status.nline nline.std
+           nline.no nline.newblock 
+           status.cap cap.std
+           cap.no cap.yes}
+
+INTEGERS { longest.label.width multiresult nameptr namesleft number.label numnames }
+
+INTEGERS { is.use.number.for.article
+           is.use.paper
+           is.use.url
+           is.forced.et.al
+           max.num.names.before.forced.et.al
+           num.names.shown.with.forced.et.al
+           is.use.alt.interword.spacing
+           is.dash.repeated.names}
+
+
+%%%%%%%%%%%%%%%%%%%%%%
+%% STRING VARIABLES %%
+%%%%%%%%%%%%%%%%%%%%%%
+
+STRINGS { bibinfo
+          longest.label
+          oldname
+          s
+          t
+          ALTinterwordstretchfactor
+          name.format.string
+          name.latex.cmd
+          name.url.prefix}
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%
+%% LOW LEVEL FUNCTIONS %%
+%%%%%%%%%%%%%%%%%%%%%%%%%
+
+FUNCTION {initialize.controls}
+{ default.is.use.number.for.article 'is.use.number.for.article :=
+  default.is.use.paper 'is.use.paper :=
+  default.is.use.url 'is.use.url :=
+  default.is.forced.et.al 'is.forced.et.al :=
+  default.max.num.names.before.forced.et.al 'max.num.names.before.forced.et.al :=
+  default.num.names.shown.with.forced.et.al 'num.names.shown.with.forced.et.al :=
+  default.is.use.alt.interword.spacing 'is.use.alt.interword.spacing :=
+  default.is.dash.repeated.names 'is.dash.repeated.names :=
+  default.ALTinterwordstretchfactor 'ALTinterwordstretchfactor :=
+  default.name.format.string 'name.format.string :=
+  default.name.latex.cmd 'name.latex.cmd :=
+  default.name.url.prefix 'name.url.prefix :=
+}
+
+
+% This IEEEtran.bst features a very powerful and flexible mechanism for
+% controlling the capitalization, punctuation, spacing, quotation, and
+% newlines of the formatted entry fields. (Note: IEEEtran.bst does not need
+% or use the newline/newblock feature, but it has been implemented for
+% possible future use.) The output states of IEEEtran.bst consist of
+% multiple independent attributes and, as such, can be thought of as being
+% vectors, rather than the simple scalar values ("before.all", 
+% "mid.sentence", etc.) used in most other .bst files.
+% 
+% The more flexible and complex design used here was motivated in part by
+% the IEEE's rather unusual bibliography style. For example, the IEEE ends the
+% previous field item with a period and large space prior to the publisher
+% address; the @electronic entry types use periods as inter-item punctuation
+% rather than the commas used by the other entry types; and URLs are never
+% followed by periods even though they are the last item in the entry.
+% Although it is possible to accommodate these features with the conventional
+% output state system, the seemingly endless exceptions make for convoluted,
+% unreliable and difficult to maintain code.
+%
+% IEEEtran.bst's output state system can be easily understood via a simple
+% illustration of two most recently formatted entry fields (on the stack):
+%
+%               CURRENT_ITEM
+%               "PREVIOUS_ITEM
+%
+% which, in this example, is to eventually appear in the bibliography as:
+% 
+%               "PREVIOUS_ITEM," CURRENT_ITEM
+%
+% It is the job of the output routine to take the previous item off of the
+% stack (while leaving the current item at the top of the stack), apply its
+% trailing punctuation (including closing quote marks) and spacing, and then
+% to write the result to BibTeX's output buffer:
+% 
+%               "PREVIOUS_ITEM," 
+% 
+% Punctuation (and spacing) between items is often determined by both of the
+% items rather than just the first one. The presence of quotation marks
+% further complicates the situation because, in standard English, trailing
+% punctuation marks are supposed to be contained within the quotes.
+% 
+% IEEEtran.bst maintains two output state (aka "status") vectors which
+% correspond to the previous and current (aka "this") items. Each vector
+% consists of several independent attributes which track punctuation,
+% spacing, quotation, and newlines. Capitalization status is handled by a
+% separate scalar because the format routines, not the output routine,
+% handle capitalization and, therefore, there is no need to maintain the
+% capitalization attribute for both the "previous" and "this" items.
+% 
+% When a format routine adds a new item, it copies the current output status
+% vector to the previous output status vector and (usually) resets the
+% current (this) output status vector to a "standard status" vector. Using a
+% "standard status" vector in this way allows us to redefine what we mean by
+% "standard status" at the start of each entry handler and reuse the same
+% format routines under the various inter-item separation schemes. For
+% example, the standard status vector for the @book entry type may use
+% commas for item separators, while the @electronic type may use periods,
+% yet both entry handlers exploit many of the exact same format routines.
+% 
+% Because format routines have write access to the output status vector of
+% the previous item, they can override the punctuation choices of the
+% previous format routine! Therefore, it becomes trivial to implement rules
+% such as "Always use a period and a large space before the publisher." By
+% pushing the generation of the closing quote mark to the output routine, we
+% avoid all the problems caused by having to close a quote before having all
+% the information required to determine what the punctuation should be.
+%
+% The IEEEtran.bst output state system can easily be expanded if needed.
+% For instance, it is easy to add a "space.tie" attribute value if the
+% bibliography rules mandate that two items have to be joined with an
+% unbreakable space. 
+
+FUNCTION {initialize.status.constants}
+{ #0 'punct.no :=
+  #1 'punct.comma :=
+  #2 'punct.period :=
+  #0 'space.no := 
+  #1 'space.normal :=
+  #2 'space.large :=
+  #0 'quote.no :=
+  #1 'quote.close :=
+  #0 'cap.no :=
+  #1 'cap.yes :=
+  #0 'nline.no :=
+  #1 'nline.newblock :=
+}
+
+FUNCTION {std.status.using.comma}
+{ punct.comma 'punct.std :=
+  space.normal 'space.std :=
+  quote.no 'quote.std :=
+  nline.no 'nline.std :=
+  cap.no 'cap.std :=
+}
+
+FUNCTION {std.status.using.period}
+{ punct.period 'punct.std :=
+  space.normal 'space.std :=
+  quote.no 'quote.std :=
+  nline.no 'nline.std :=
+  cap.yes 'cap.std :=
+}
+
+FUNCTION {initialize.prev.this.status}
+{ punct.no 'prev.status.punct :=
+  space.no 'prev.status.space :=
+  quote.no 'prev.status.quote :=
+  nline.no 'prev.status.nline :=
+  punct.no 'this.status.punct :=
+  space.no 'this.status.space :=
+  quote.no 'this.status.quote :=
+  nline.no 'this.status.nline :=
+  cap.yes 'status.cap :=
+}
+
+FUNCTION {this.status.std}
+{ punct.std 'this.status.punct :=
+  space.std 'this.status.space :=
+  quote.std 'this.status.quote :=
+  nline.std 'this.status.nline :=
+}
+
+FUNCTION {cap.status.std}{ cap.std 'status.cap := }
+
+FUNCTION {this.to.prev.status}
+{ this.status.punct 'prev.status.punct :=
+  this.status.space 'prev.status.space :=
+  this.status.quote 'prev.status.quote :=
+  this.status.nline 'prev.status.nline :=
+}
+
+
+FUNCTION {not}
+{   { #0 }
+    { #1 }
+  if$
+}
+
+FUNCTION {and}
+{   { skip$ }
+    { pop$ #0 }
+  if$
+}
+
+FUNCTION {or}
+{   { pop$ #1 }
+    { skip$ }
+  if$
+}
+
+
+% convert the strings "yes" or "no" to #1 or #0 respectively
+FUNCTION {yes.no.to.int}
+{ "l" change.case$ duplicate$
+    "yes" =
+    { pop$  #1 }
+    { duplicate$ "no" =
+        { pop$ #0 }
+        { "unknown boolean " quote$ * swap$ * quote$ *
+          " in " * cite$ * warning$
+          #0
+        }
+      if$
+    }
+  if$
+}
+
+
+% pushes true if the single char string on the stack is in the
+% range of "0" to "9"
+FUNCTION {is.num}
+{ chr.to.int$
+  duplicate$ "0" chr.to.int$ < not
+  swap$ "9" chr.to.int$ > not and
+}
+
+% multiplies the integer on the stack by a factor of 10
+FUNCTION {bump.int.mag}
+{ #0 'multiresult :=
+    { duplicate$ #0 > }
+    { #1 -
+      multiresult #10 +
+      'multiresult :=
+    }
+  while$
+pop$
+multiresult
+}
+
+% converts a single character string on the stack to an integer
+FUNCTION {char.to.integer}
+{ duplicate$ 
+  is.num
+    { chr.to.int$ "0" chr.to.int$ - }
+    {"noninteger character " quote$ * swap$ * quote$ *
+          " in integer field of " * cite$ * warning$
+    #0
+    }
+  if$
+}
+
+% converts a string on the stack to an integer
+FUNCTION {string.to.integer}
+{ duplicate$ text.length$ 'namesleft :=
+  #1 'nameptr :=
+  #0 'numnames :=
+    { nameptr namesleft > not }
+    { duplicate$ nameptr #1 substring$
+      char.to.integer numnames bump.int.mag +
+      'numnames :=
+      nameptr #1 +
+      'nameptr :=
+    }
+  while$
+pop$
+numnames
+}
+
+
+
+
+% The output routines write out the *next* to the top (previous) item on the
+% stack, adding punctuation and such as needed. Since IEEEtran.bst maintains
+% the output status for the top two items on the stack, these output
+% routines have to consider the previous output status (which corresponds to
+% the item that is being output). Full independent control of punctuation,
+% closing quote marks, spacing, and newblock is provided.
+% 
+% "output.nonnull" does not check for the presence of a previous empty
+% item.
+% 
+% "output" does check for the presence of a previous empty item and will
+% remove an empty item rather than outputing it.
+% 
+% "output.warn" is like "output", but will issue a warning if it detects
+% an empty item.
+
+FUNCTION {output.nonnull}
+{ swap$
+  prev.status.punct punct.comma =
+     { "," * }
+     { skip$ }
+   if$
+  prev.status.punct punct.period =
+     { add.period$ }
+     { skip$ }
+   if$ 
+  prev.status.quote quote.close =
+     { "''" * }
+     { skip$ }
+   if$
+  prev.status.space space.normal =
+     { " " * }
+     { skip$ }
+   if$
+  prev.status.space space.large =
+     { large.space * }
+     { skip$ }
+   if$
+  write$
+  prev.status.nline nline.newblock =
+     { newline$ "\newblock " write$ }
+     { skip$ }
+   if$
+}
+
+FUNCTION {output}
+{ duplicate$ empty$
+    'pop$
+    'output.nonnull
+  if$
+}
+
+FUNCTION {output.warn}
+{ 't :=
+  duplicate$ empty$
+    { pop$ "empty " t * " in " * cite$ * warning$ }
+    'output.nonnull
+  if$
+}
+
+% "fin.entry" is the output routine that handles the last item of the entry
+% (which will be on the top of the stack when "fin.entry" is called).
+
+FUNCTION {fin.entry}
+{ this.status.punct punct.no =
+     { skip$ }
+     { add.period$ }
+   if$
+   this.status.quote quote.close =
+     { "''" * }
+     { skip$ }
+   if$
+write$
+newline$
+}
+
+
+FUNCTION {is.last.char.not.punct}
+{ duplicate$
+   "}" * add.period$
+   #-1 #1 substring$ "." =
+}
+
+FUNCTION {is.multiple.pages}
+{ 't :=
+  #0 'multiresult :=
+    { multiresult not
+      t empty$ not
+      and
+    }
+    { t #1 #1 substring$
+      duplicate$ "-" =
+      swap$ duplicate$ "," =
+      swap$ "+" =
+      or or
+        { #1 'multiresult := }
+        { t #2 global.max$ substring$ 't := }
+      if$
+    }
+  while$
+  multiresult
+}
+
+FUNCTION {capitalize}{ "u" change.case$ "t" change.case$ }
+
+FUNCTION {emphasize}
+{ duplicate$ empty$
+    { pop$ "" }
+    { "\emph{" swap$ * "}" * }
+  if$
+}
+
+FUNCTION {do.name.latex.cmd}
+{ name.latex.cmd
+  empty$
+    { skip$ }
+    { name.latex.cmd "{" * swap$ * "}" * }
+  if$
+}
+
+% IEEEtran.bst uses its own \BIBforeignlanguage command which directly
+% invokes the TeX hyphenation patterns without the need of the Babel
+% package. Babel does a lot more than switch hyphenation patterns and
+% its loading can cause unintended effects in many class files (such as
+% IEEEtran.cls).
+FUNCTION {select.language}
+{ duplicate$ empty$ 'pop$
+    { language empty$ 'skip$
+        { "\BIBforeignlanguage{" language * "}{" * swap$ * "}" * }
+      if$
+    }
+  if$
+}
+
+FUNCTION {tie.or.space.prefix}
+{ duplicate$ text.length$ #3 <
+    { "~" }
+    { " " }
+  if$
+  swap$
+}
+
+FUNCTION {get.bbl.editor}
+{ editor num.names$ #1 > 'bbl.editors 'bbl.editor if$ }
+
+FUNCTION {space.word}{ " " swap$ * " " * }
+
+
+% Field Conditioners, Converters, Checkers and External Interfaces
+
+FUNCTION {empty.field.to.null.string}
+{ duplicate$ empty$
+    { pop$ "" }
+    { skip$ }
+  if$
+}
+
+FUNCTION {either.or.check}
+{ empty$
+    { pop$ }
+    { "can't use both " swap$ * " fields in " * cite$ * warning$ }
+  if$
+}
+
+FUNCTION {empty.entry.warn}
+{ author empty$ title empty$ howpublished empty$
+  month empty$ year empty$ note empty$ url empty$
+  and and and and and and
+    { "all relevant fields are empty in " cite$ * warning$ }
+    'skip$
+  if$
+}
+
+
+% The bibinfo system provides a way for the electronic parsing/acquisition
+% of a bibliography's contents as is done by ReVTeX. For example, a field
+% could be entered into the bibliography as:
+% \bibinfo{volume}{2}
+% Only the "2" would show up in the document, but the LaTeX \bibinfo command
+% could do additional things with the information. IEEEtran.bst does provide
+% a \bibinfo command via "\providecommand{\bibinfo}[2]{#2}". However, it is
+% currently not used as the bogus bibinfo functions defined here output the
+% entry values directly without the \bibinfo wrapper. The bibinfo functions
+% themselves (and the calls to them) are retained for possible future use.
+% 
+% bibinfo.check avoids acting on missing fields while bibinfo.warn will
+% issue a warning message if a missing field is detected. Prior to calling
+% the bibinfo functions, the user should push the field value and then its
+% name string, in that order.
+
+FUNCTION {bibinfo.check}
+{ swap$ duplicate$ missing$
+    { pop$ pop$ "" }
+    { duplicate$ empty$
+        { swap$ pop$ }
+        { swap$ pop$ }
+      if$
+    }
+  if$
+}
+
+FUNCTION {bibinfo.warn}
+{ swap$ duplicate$ missing$
+    { swap$ "missing " swap$ * " in " * cite$ * warning$ pop$ "" }
+    { duplicate$ empty$
+        { swap$ "empty " swap$ * " in " * cite$ * warning$ }
+        { swap$ pop$ }
+      if$
+    }
+  if$
+}
+
+
+% The IEEE separates large numbers with more than 4 digits into groups of
+% three. The IEEE uses a small space to separate these number groups. 
+% Typical applications include patent and page numbers.
+
+% number of consecutive digits required to trigger the group separation.
+FUNCTION {large.number.trigger}{ #5 }
+
+% For numbers longer than the trigger, this is the blocksize of the groups.
+% The blocksize must be less than the trigger threshold, and 2 * blocksize
+% must be greater than the trigger threshold (can't do more than one
+% separation on the initial trigger).
+FUNCTION {large.number.blocksize}{ #3 }
+
+% What is actually inserted between the number groups.
+FUNCTION {large.number.separator}{ "\," }
+
+% So as to save on integer variables by reusing existing ones, numnames
+% holds the current number of consecutive digits read and nameptr holds
+% the number that will trigger an inserted space.
+FUNCTION {large.number.separate}
+{ 't :=
+  ""
+  #0 'numnames :=
+  large.number.trigger 'nameptr :=
+  { t empty$ not }
+  { t #-1 #1 substring$ is.num
+      { numnames #1 + 'numnames := }
+      { #0 'numnames := 
+        large.number.trigger 'nameptr :=
+      }
+    if$
+    t #-1 #1 substring$ swap$ *
+    t #-2 global.max$ substring$ 't :=
+    numnames nameptr =
+      { duplicate$ #1 nameptr large.number.blocksize - substring$ swap$
+        nameptr large.number.blocksize - #1 + global.max$ substring$
+        large.number.separator swap$ * *
+        nameptr large.number.blocksize - 'numnames :=
+        large.number.blocksize #1 + 'nameptr :=
+      }
+      { skip$ }
+    if$
+  }
+  while$
+}
+
+% Converts all single dashes "-" to double dashes "--".
+FUNCTION {n.dashify}
+{ large.number.separate
+  't :=
+  ""
+    { t empty$ not }
+    { t #1 #1 substring$ "-" =
+        { t #1 #2 substring$ "--" = not
+            { "--" *
+              t #2 global.max$ substring$ 't :=
+            }
+            {   { t #1 #1 substring$ "-" = }
+                { "-" *
+                  t #2 global.max$ substring$ 't :=
+                }
+              while$
+            }
+          if$
+        }
+        { t #1 #1 substring$ *
+          t #2 global.max$ substring$ 't :=
+        }
+      if$
+    }
+  while$
+}
+
+
+% This function detects entries with names that are identical to that of
+% the previous entry and replaces the repeated names with dashes (if the
+% "is.dash.repeated.names" user control is nonzero).
+FUNCTION {name.or.dash}
+{ 's :=
+   oldname empty$
+     { s 'oldname := s }
+     { s oldname =
+         { is.dash.repeated.names
+              { repeated.name.dashes }
+              { s 'oldname := s }
+            if$
+         }
+         { s 'oldname := s }
+       if$
+     }
+   if$
+}
+
+% Converts the number string on the top of the stack to
+% "numerical ordinal form" (e.g., "7" to "7th"). There is
+% no artificial limit to the upper bound of the numbers as the
+% two least significant digits determine the ordinal form.
+FUNCTION {num.to.ordinal}
+{ duplicate$ #-2 #1 substring$ "1" =
+      { bbl.th * }
+      { duplicate$ #-1 #1 substring$ "1" =
+          { bbl.st * }
+          { duplicate$ #-1 #1 substring$ "2" =
+              { bbl.nd * }
+              { duplicate$ #-1 #1 substring$ "3" =
+                  { bbl.rd * }
+                  { bbl.th * }
+                if$
+              }
+            if$
+          }
+        if$
+      }
+    if$
+}
+
+% If the string on the top of the stack begins with a number,
+% (e.g., 11th) then replace the string with the leading number
+% it contains. Otherwise retain the string as-is. s holds the
+% extracted number, t holds the part of the string that remains
+% to be scanned.
+FUNCTION {extract.num}
+{ duplicate$ 't :=
+  "" 's :=
+  { t empty$ not }
+  { t #1 #1 substring$
+    t #2 global.max$ substring$ 't :=
+    duplicate$ is.num
+      { s swap$ * 's := }
+      { pop$ "" 't := }
+    if$
+  }
+  while$
+  s empty$
+    'skip$
+    { pop$ s }
+  if$
+}
+
+% Converts the word number string on the top of the stack to
+% Arabic string form. Will be successful up to "tenth".
+FUNCTION {word.to.num}
+{ duplicate$ "l" change.case$ 's :=
+  s "first" =
+    { pop$ "1" }
+    { skip$ }
+  if$
+  s "second" =
+    { pop$ "2" }
+    { skip$ }
+  if$
+  s "third" =
+    { pop$ "3" }
+    { skip$ }
+  if$
+  s "fourth" =
+    { pop$ "4" }
+    { skip$ }
+  if$
+  s "fifth" =
+    { pop$ "5" }
+    { skip$ }
+  if$
+  s "sixth" =
+    { pop$ "6" }
+    { skip$ }
+  if$
+  s "seventh" =
+    { pop$ "7" }
+    { skip$ }
+  if$
+  s "eighth" =
+    { pop$ "8" }
+    { skip$ }
+  if$
+  s "ninth" =
+    { pop$ "9" }
+    { skip$ }
+  if$
+  s "tenth" =
+    { pop$ "10" }
+    { skip$ }
+  if$
+}
+
+
+% Converts the string on the top of the stack to numerical
+% ordinal (e.g., "11th") form.
+FUNCTION {convert.edition}
+{ duplicate$ empty$ 'skip$
+    { duplicate$ #1 #1 substring$ is.num
+        { extract.num
+          num.to.ordinal
+        }
+        { word.to.num
+          duplicate$ #1 #1 substring$ is.num
+            { num.to.ordinal }
+            { "edition ordinal word " quote$ * edition * quote$ *
+              " may be too high (or improper) for conversion" * " in " * cite$ * warning$
+            }
+          if$
+        }
+      if$
+    }
+  if$
+}
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% LATEX BIBLIOGRAPHY CODE %%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+FUNCTION {start.entry}
+{ newline$
+  "\bibitem{" write$
+  cite$ write$
+  "}" write$
+  newline$
+  ""
+  initialize.prev.this.status
+}
+
+% Here we write out all the LaTeX code that we will need. The most involved
+% code sequences are those that control the alternate interword spacing and
+% foreign language hyphenation patterns. The heavy use of \providecommand
+% gives users a way to override the defaults. Special thanks to Javier Bezos,
+% Johannes Braams, Robin Fairbairns, Heiko Oberdiek, Donald Arseneau and all
+% the other gurus on comp.text.tex for their help and advice on the topic of
+% \selectlanguage, Babel and BibTeX.
+FUNCTION {begin.bib}
+{ "% Generated by IEEEtran.bst, version: " bst.file.version * " (" * bst.file.date * ")" *
+  write$ newline$
+  preamble$ empty$ 'skip$
+    { preamble$ write$ newline$ }
+  if$
+  "\begin{thebibliography}{"  longest.label  * "}" *
+  write$ newline$
+  "\providecommand{\url}[1]{#1}"
+  write$ newline$
+  "\csname url@samestyle\endcsname"
+  write$ newline$
+  "\providecommand{\newblock}{\relax}"
+  write$ newline$
+  "\providecommand{\bibinfo}[2]{#2}"
+  write$ newline$
+  "\providecommand{\BIBentrySTDinterwordspacing}{\spaceskip=0pt\relax}"
+  write$ newline$
+  "\providecommand{\BIBentryALTinterwordstretchfactor}{"
+  ALTinterwordstretchfactor * "}" *
+  write$ newline$
+  "\providecommand{\BIBentryALTinterwordspacing}{\spaceskip=\fontdimen2\font plus "
+  write$ newline$
+  "\BIBentryALTinterwordstretchfactor\fontdimen3\font minus \fontdimen4\font\relax}"
+  write$ newline$
+  "\providecommand{\BIBforeignlanguage}[2]{{%"
+  write$ newline$
+  "\expandafter\ifx\csname l@#1\endcsname\relax"
+  write$ newline$
+  "\typeout{** WARNING: IEEEtran.bst: No hyphenation pattern has been}%"
+  write$ newline$
+  "\typeout{** loaded for the language `#1'. Using the pattern for}%"
+  write$ newline$
+  "\typeout{** the default language instead.}%"
+  write$ newline$
+  "\else"
+  write$ newline$
+  "\language=\csname l@#1\endcsname"
+  write$ newline$
+  "\fi"
+  write$ newline$
+  "#2}}"
+  write$ newline$
+  "\providecommand{\BIBdecl}{\relax}"
+  write$ newline$
+  "\BIBdecl"
+  write$ newline$
+}
+
+FUNCTION {end.bib}
+{ newline$ "\end{thebibliography}" write$ newline$ }
+
+FUNCTION {if.url.alt.interword.spacing}
+{ is.use.alt.interword.spacing
+    { is.use.url
+        { url empty$ 'skip$ {"\BIBentryALTinterwordspacing" write$ newline$} if$ }
+        { skip$ }
+      if$
+    }
+    { skip$ }
+  if$
+}
+
+FUNCTION {if.url.std.interword.spacing}
+{ is.use.alt.interword.spacing
+    { is.use.url
+        { url empty$ 'skip$ {"\BIBentrySTDinterwordspacing" write$ newline$} if$ }
+        { skip$ }
+      if$
+    }
+    { skip$ }
+  if$
+}
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%
+%% LONGEST LABEL PASS %%
+%%%%%%%%%%%%%%%%%%%%%%%%
+
+FUNCTION {initialize.longest.label}
+{ "" 'longest.label :=
+  #1 'number.label :=
+  #0 'longest.label.width :=
+}
+
+FUNCTION {longest.label.pass}
+{ type$ "ieeetranbstctl" =
+    { skip$ }
+    { number.label int.to.str$ 'label :=
+      number.label #1 + 'number.label :=
+      label width$ longest.label.width >
+        { label 'longest.label :=
+          label width$ 'longest.label.width :=
+        }
+        { skip$ }
+      if$
+    }
+  if$
+}
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%%
+%% FORMAT HANDLERS %%
+%%%%%%%%%%%%%%%%%%%%%
+
+%% Lower Level Formats (used by higher level formats)
+
+FUNCTION {format.address.org.or.pub.date}
+{ 't :=
+  ""
+  year empty$
+    { "empty year in " cite$ * warning$ }
+    { skip$ }
+  if$
+  address empty$ t empty$ and
+  year empty$ and month empty$ and
+    { skip$ }
+    { this.to.prev.status
+      this.status.std
+      cap.status.std
+      address "address" bibinfo.check *
+      t empty$
+        { skip$ }
+        { punct.period 'prev.status.punct :=
+          space.large 'prev.status.space :=
+          address empty$
+            { skip$ }
+            { ": " * }
+          if$
+          t *
+        }
+      if$
+      year empty$ month empty$ and
+        { skip$ }
+        { t empty$ address empty$ and
+            { skip$ }
+            { ", " * }
+          if$
+          month empty$
+            { year empty$
+                { skip$ }
+                { year "year" bibinfo.check * }
+              if$
+            }
+            { month "month" bibinfo.check *
+              year empty$
+                 { skip$ }
+                 { " " * year "year" bibinfo.check * }
+              if$
+            }
+          if$
+        }
+      if$
+    }
+  if$
+}
+
+
+FUNCTION {format.names}
+{ 'bibinfo :=
+  duplicate$ empty$ 'skip$ {
+  this.to.prev.status
+  this.status.std
+  's :=
+  "" 't :=
+  #1 'nameptr :=
+  s num.names$ 'numnames :=
+  numnames 'namesleft :=
+    { namesleft #0 > }
+    { s nameptr
+      name.format.string
+      format.name$
+      bibinfo bibinfo.check
+      't :=
+      nameptr #1 >
+        { nameptr num.names.shown.with.forced.et.al #1 + =
+          numnames max.num.names.before.forced.et.al >
+          is.forced.et.al and and
+            { "others" 't :=
+              #1 'namesleft :=
+            }
+            { skip$ }
+          if$
+          namesleft #1 >
+            { ", " * t do.name.latex.cmd * }
+            { s nameptr "{ll}" format.name$ duplicate$ "others" =
+                { 't := }
+                { pop$ }
+              if$
+              t "others" =
+                { " " * bbl.etal emphasize * }
+                { numnames #2 >
+                    { "," * }
+                    { skip$ }
+                  if$
+                  bbl.and
+                  space.word * t do.name.latex.cmd *
+                }
+              if$
+            }
+          if$
+        }
+        { t do.name.latex.cmd }
+      if$
+      nameptr #1 + 'nameptr :=
+      namesleft #1 - 'namesleft :=
+    }
+  while$
+  cap.status.std
+  } if$
+}
+
+
+
+
+%% Higher Level Formats
+
+%% addresses/locations
+
+FUNCTION {format.address}
+{ address duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      cap.status.std
+    }
+  if$
+}
+
+
+
+%% author/editor names
+
+FUNCTION {format.authors}{ author "author" format.names }
+
+FUNCTION {format.editors}
+{ editor "editor" format.names duplicate$ empty$ 'skip$
+    { ", " *
+      get.bbl.editor
+      capitalize
+      *
+    }
+  if$
+}
+
+
+
+%% date
+
+FUNCTION {format.date}
+{
+  month "month" bibinfo.check duplicate$ empty$
+  year  "year" bibinfo.check duplicate$ empty$
+    { swap$ 'skip$
+        { this.to.prev.status
+          this.status.std
+          cap.status.std
+         "there's a month but no year in " cite$ * warning$ }
+      if$
+      *
+    }
+    { this.to.prev.status
+      this.status.std
+      cap.status.std
+      swap$ 'skip$
+        {
+          swap$
+          " " * swap$
+        }
+      if$
+      *
+    }
+  if$
+}
+
+FUNCTION {format.date.electronic}
+{ month "month" bibinfo.check duplicate$ empty$
+  year  "year" bibinfo.check duplicate$ empty$
+    { swap$ 
+        { pop$ }
+        { "there's a month but no year in " cite$ * warning$
+        pop$ ")" * "(" swap$ *
+        this.to.prev.status
+        punct.no 'this.status.punct :=
+        space.normal 'this.status.space :=
+        quote.no 'this.status.quote :=
+        cap.yes  'status.cap :=
+        }
+      if$
+    }
+    { swap$ 
+        { swap$ pop$ ")" * "(" swap$ * }
+        { "(" swap$ * ", " * swap$ * ")" * }
+      if$
+    this.to.prev.status
+    punct.no 'this.status.punct :=
+    space.normal 'this.status.space :=
+    quote.no 'this.status.quote :=
+    cap.yes  'status.cap :=
+    }
+  if$
+}
+
+
+
+%% edition/title
+
+% Note: The IEEE considers the edition to be closely associated with
+% the title of a book. So, in IEEEtran.bst the edition is normally handled 
+% within the formatting of the title. The format.edition function is 
+% retained here for possible future use.
+FUNCTION {format.edition}
+{ edition duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      convert.edition
+      status.cap
+        { "t" }
+        { "l" }
+      if$ change.case$
+      "edition" bibinfo.check
+      "~" * bbl.edition *
+      cap.status.std
+    }
+  if$
+}
+
+% This is used to format the booktitle of a conference proceedings.
+% Here we use the "intype" field to provide the user a way to 
+% override the word "in" (e.g., with things like "presented at")
+% Use of intype stops the emphasis of the booktitle to indicate that
+% we no longer mean the written conference proceedings, but the
+% conference itself.
+FUNCTION {format.in.booktitle}
+{ booktitle "booktitle" bibinfo.check duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      select.language
+      intype missing$
+        { emphasize
+          bbl.in " " *
+        }
+        { intype " " * }
+      if$
+      swap$ *
+      cap.status.std
+    }
+  if$
+}
+
+% This is used to format the booktitle of collection.
+% Here the "intype" field is not supported, but "edition" is.
+FUNCTION {format.in.booktitle.edition}
+{ booktitle "booktitle" bibinfo.check duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      select.language
+      emphasize
+      edition empty$ 'skip$
+        { ", " *
+          edition
+          convert.edition
+          "l" change.case$
+          * "~" * bbl.edition *
+        }
+      if$
+      bbl.in " " * swap$ *
+      cap.status.std
+    }
+  if$
+}
+
+FUNCTION {format.article.title}
+{ title duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      "t" change.case$
+    }
+  if$
+  "title" bibinfo.check
+  duplicate$ empty$ 'skip$
+    { quote.close 'this.status.quote :=
+      is.last.char.not.punct
+        { punct.std 'this.status.punct := }
+        { punct.no 'this.status.punct := }
+      if$
+      select.language
+      "``" swap$ *
+      cap.status.std
+    }
+  if$
+}
+
+FUNCTION {format.article.title.electronic}
+{ title duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      cap.status.std
+      "t" change.case$ 
+    }
+  if$
+  "title" bibinfo.check
+  duplicate$ empty$ 
+    { skip$ } 
+    { select.language }
+  if$
+}
+
+FUNCTION {format.book.title.edition}
+{ title "title" bibinfo.check
+  duplicate$ empty$
+    { "empty title in " cite$ * warning$ }
+    { this.to.prev.status
+      this.status.std
+      select.language
+      emphasize
+      edition empty$ 'skip$
+        { ", " *
+          edition
+          convert.edition
+          status.cap
+            { "t" }
+            { "l" }
+          if$
+          change.case$
+          * "~" * bbl.edition *
+        }
+      if$
+      cap.status.std
+    }
+  if$
+}
+
+FUNCTION {format.book.title}
+{ title "title" bibinfo.check
+  duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      cap.status.std
+      select.language
+      emphasize
+    }
+  if$
+}
+
+
+
+%% journal
+
+FUNCTION {format.journal}
+{ journal duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      cap.status.std
+      select.language
+      emphasize
+    }
+  if$
+}
+
+
+
+%% how published
+
+FUNCTION {format.howpublished}
+{ howpublished duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      cap.status.std
+    }
+  if$
+}
+
+
+
+%% institutions/organization/publishers/school
+
+FUNCTION {format.institution}
+{ institution duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      cap.status.std
+    }
+  if$
+}
+
+FUNCTION {format.organization}
+{ organization duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      cap.status.std
+    }
+  if$
+}
+
+FUNCTION {format.address.publisher.date}
+{ publisher "publisher" bibinfo.warn format.address.org.or.pub.date }
+
+FUNCTION {format.address.publisher.date.nowarn}
+{ publisher "publisher" bibinfo.check format.address.org.or.pub.date }
+
+FUNCTION {format.address.organization.date}
+{ organization "organization" bibinfo.check format.address.org.or.pub.date }
+
+FUNCTION {format.school}
+{ school duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      cap.status.std
+    }
+  if$
+}
+
+
+
+%% volume/number/series/chapter/pages
+
+FUNCTION {format.volume}
+{ volume empty.field.to.null.string
+  duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      bbl.volume 
+      status.cap
+        { capitalize }
+        { skip$ }
+      if$
+      swap$ tie.or.space.prefix
+      "volume" bibinfo.check
+      * *
+      cap.status.std
+    }
+  if$
+}
+
+FUNCTION {format.number}
+{ number empty.field.to.null.string
+  duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      status.cap
+         { bbl.number capitalize }
+         { bbl.number }
+       if$
+      swap$ tie.or.space.prefix
+      "number" bibinfo.check
+      * *
+      cap.status.std
+    }
+  if$
+}
+
+FUNCTION {format.number.if.use.for.article}
+{ is.use.number.for.article 
+     { format.number }
+     { "" }
+   if$
+}
+
+% The IEEE does not seem to tie the series so closely with the volume
+% and number as is done in other bibliography styles. Instead the
+% series is treated somewhat like an extension of the title.
+FUNCTION {format.series}
+{ series empty$ 
+   { "" }
+   { this.to.prev.status
+     this.status.std
+     bbl.series " " *
+     series "series" bibinfo.check *
+     cap.status.std
+   }
+ if$
+}
+
+
+FUNCTION {format.chapter}
+{ chapter empty$
+    { "" }
+    { this.to.prev.status
+      this.status.std
+      type empty$
+        { bbl.chapter }
+        { type "l" change.case$
+          "type" bibinfo.check
+        }
+      if$
+      chapter tie.or.space.prefix
+      "chapter" bibinfo.check
+      * *
+      cap.status.std
+    }
+  if$
+}
+
+
+% The intended use of format.paper is for paper numbers of inproceedings.
+% The paper type can be overridden via the type field.
+% We allow the type to be displayed even if the paper number is absent
+% for things like "postdeadline paper"
+FUNCTION {format.paper}
+{ is.use.paper
+     { paper empty$
+        { type empty$
+            { "" }
+            { this.to.prev.status
+              this.status.std
+              type "type" bibinfo.check
+              cap.status.std
+            }
+          if$
+        }
+        { this.to.prev.status
+          this.status.std
+          type empty$
+            { bbl.paper }
+            { type "type" bibinfo.check }
+          if$
+          " " * paper
+          "paper" bibinfo.check
+          *
+          cap.status.std
+        }
+      if$
+     }
+     { "" } 
+   if$
+}
+
+
+FUNCTION {format.pages}
+{ pages duplicate$ empty$ 'skip$
+    { this.to.prev.status
+      this.status.std
+      duplicate$ is.multiple.pages
+        {
+          bbl.pages swap$
+          n.dashify
+        }
+        {
+          bbl.page swap$
+        }
+      if$
+      tie.or.space.prefix
+      "pages" bibinfo.check
+      * *
+      cap.status.std
+    }
+  if$
+}
+
+
+
+%% technical report number
+
+FUNCTION {format.tech.report.number}
+{ number "number" bibinfo.check
+  this.to.prev.status
+  this.status.std
+  cap.status.std
+  type duplicate$ empty$
+    { pop$ 
+      bbl.techrep
+    }
+    { skip$ }
+  if$
+  "type" bibinfo.check 
+  swap$ duplicate$ empty$
+    { pop$ }
+    { tie.or.space.prefix * * }
+  if$
+}
+
+
+
+%% note
+
+FUNCTION {format.note}
+{ note empty$
+    { "" }
+    { this.to.prev.status
+      this.status.std
+      punct.period 'this.status.punct :=
+      note #1 #1 substring$
+      duplicate$ "{" =
+        { skip$ }
+        { status.cap
+          { "u" }
+          { "l" }
+        if$
+        change.case$
+        }
+      if$
+      note #2 global.max$ substring$ * "note" bibinfo.check
+      cap.yes  'status.cap :=
+    }
+  if$
+}
+
+
+
+%% patent
+
+FUNCTION {format.patent.date}
+{ this.to.prev.status
+  this.status.std
+  year empty$
+    { monthfiled duplicate$ empty$
+        { "monthfiled" bibinfo.check pop$ "" }
+        { "monthfiled" bibinfo.check }
+      if$
+      dayfiled duplicate$ empty$
+        { "dayfiled" bibinfo.check pop$ "" * }
+        { "dayfiled" bibinfo.check 
+          monthfiled empty$ 
+             { "dayfiled without a monthfiled in " cite$ * warning$
+               * 
+             }
+             { " " swap$ * * }
+           if$
+        }
+      if$
+      yearfiled empty$
+        { "no year or yearfiled in " cite$ * warning$ }
+        { yearfiled "yearfiled" bibinfo.check 
+          swap$
+          duplicate$ empty$
+             { pop$ }
+             { ", " * swap$ * }
+           if$
+        }
+      if$
+    }
+    { month duplicate$ empty$
+        { "month" bibinfo.check pop$ "" }
+        { "month" bibinfo.check }
+      if$
+      day duplicate$ empty$
+        { "day" bibinfo.check pop$ "" * }
+        { "day" bibinfo.check 
+          month empty$ 
+             { "day without a month in " cite$ * warning$
+               * 
+             }
+             { " " swap$ * * }
+           if$
+        }
+      if$
+      year "year" bibinfo.check 
+      swap$
+      duplicate$ empty$
+        { pop$ }
+        { ", " * swap$ * }
+      if$
+    }
+  if$
+  cap.status.std
+}
+
+FUNCTION {format.patent.nationality.type.number}
+{ this.to.prev.status
+  this.status.std
+  nationality duplicate$ empty$
+    { "nationality" bibinfo.warn pop$ "" }
+    { "nationality" bibinfo.check
+      duplicate$ "l" change.case$ "united states" =
+        { pop$ bbl.patentUS }
+        { skip$ }
+      if$
+      " " *
+    }
+  if$
+  type empty$
+    { bbl.patent "type" bibinfo.check }
+    { type "type" bibinfo.check }
+  if$  
+  *
+  number duplicate$ empty$
+    { "number" bibinfo.warn pop$ }
+    { "number" bibinfo.check
+      large.number.separate
+      swap$ " " * swap$ *
+    }
+  if$ 
+  cap.status.std
+}
+
+
+
+%% standard
+
+FUNCTION {format.organization.institution.standard.type.number}
+{ this.to.prev.status
+  this.status.std
+  organization duplicate$ empty$
+    { pop$ 
+      institution duplicate$ empty$
+        { "institution" bibinfo.warn }
+        { "institution" bibinfo.warn " " * }
+      if$
+    }
+    { "organization" bibinfo.warn " " * }
+  if$
+  type empty$
+    { bbl.standard "type" bibinfo.check }
+    { type "type" bibinfo.check }
+  if$  
+  *
+  number duplicate$ empty$
+    { "number" bibinfo.check pop$ }
+    { "number" bibinfo.check
+      large.number.separate
+      swap$ " " * swap$ *
+    }
+  if$ 
+  cap.status.std
+}
+
+FUNCTION {format.revision}
+{ revision empty$
+    { "" }
+    { this.to.prev.status
+      this.status.std
+      bbl.revision
+      revision tie.or.space.prefix
+      "revision" bibinfo.check
+      * *
+      cap.status.std
+    }
+  if$
+}
+
+
+%% thesis
+
+FUNCTION {format.master.thesis.type}
+{ this.to.prev.status
+  this.status.std
+  type empty$
+    {
+      bbl.mthesis
+    }
+    { 
+      type "type" bibinfo.check
+    }
+  if$
+cap.status.std
+}
+
+FUNCTION {format.phd.thesis.type}
+{ this.to.prev.status
+  this.status.std
+  type empty$
+    {
+      bbl.phdthesis
+    }
+    { 
+      type "type" bibinfo.check
+    }
+  if$
+cap.status.std
+}
+
+
+
+%% URL
+
+FUNCTION {format.url}
+{ is.use.url
+    { url empty$
+      { "" }
+      { this.to.prev.status
+        this.status.std
+        cap.yes 'status.cap :=
+        name.url.prefix " " *
+        "\url{" * url * "}" *
+        punct.no 'this.status.punct :=
+        punct.period 'prev.status.punct :=
+        space.normal 'this.status.space :=
+        space.normal 'prev.status.space :=
+        quote.no 'this.status.quote :=
+      }
+    if$
+    }
+    { "" }
+  if$
+}
+
+
+
+
+%%%%%%%%%%%%%%%%%%%%
+%% ENTRY HANDLERS %%
+%%%%%%%%%%%%%%%%%%%%
+
+
+% Note: In many journals, the IEEE (or the authors) tend not to show the number
+% for articles, so the display of the number is controlled here by the
+% switch "is.use.number.for.article"
+FUNCTION {article}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.authors "author" output.warn
+  name.or.dash
+  format.article.title "title" output.warn
+  format.journal "journal" bibinfo.check "journal" output.warn
+  format.volume output
+  format.number.if.use.for.article output
+  format.pages output
+  format.date "year" output.warn
+  format.note output
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+FUNCTION {book}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  author empty$
+    { format.editors "author and editor" output.warn }
+    { format.authors output.nonnull }
+  if$
+  name.or.dash
+  format.book.title.edition output
+  format.series output
+  author empty$
+    { skip$ }
+    { format.editors output }
+  if$
+  format.address.publisher.date output
+  format.volume output
+  format.number output
+  format.note output
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+FUNCTION {booklet}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.authors output
+  name.or.dash
+  format.article.title "title" output.warn
+  format.howpublished "howpublished" bibinfo.check output
+  format.organization "organization" bibinfo.check output
+  format.address "address" bibinfo.check output
+  format.date output
+  format.note output
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+FUNCTION {electronic}
+{ std.status.using.period
+  start.entry
+  if.url.alt.interword.spacing
+  format.authors output
+  name.or.dash
+  format.date.electronic output
+  format.article.title.electronic output
+  format.howpublished "howpublished" bibinfo.check output
+  format.organization "organization" bibinfo.check output
+  format.address "address" bibinfo.check output
+  format.note output
+  format.url output
+  fin.entry
+  empty.entry.warn
+  if.url.std.interword.spacing
+}
+
+FUNCTION {inbook}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  author empty$
+    { format.editors "author and editor" output.warn }
+    { format.authors output.nonnull }
+  if$
+  name.or.dash
+  format.book.title.edition output
+  format.series output
+  format.address.publisher.date output
+  format.volume output
+  format.number output
+  format.chapter output
+  format.pages output
+  format.note output
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+FUNCTION {incollection}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.authors "author" output.warn
+  name.or.dash
+  format.article.title "title" output.warn
+  format.in.booktitle.edition "booktitle" output.warn
+  format.series output
+  format.editors output
+  format.address.publisher.date.nowarn output
+  format.volume output
+  format.number output
+  format.chapter output
+  format.pages output
+  format.note output
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+FUNCTION {inproceedings}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.authors "author" output.warn
+  name.or.dash
+  format.article.title "title" output.warn
+  format.in.booktitle "booktitle" output.warn
+  format.series output
+  format.editors output
+  format.volume output
+  format.number output
+  publisher empty$
+    { format.address.organization.date output }
+    { format.organization "organization" bibinfo.check output
+      format.address.publisher.date output
+    }
+  if$
+  format.paper output
+  format.pages output
+  format.note output
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+FUNCTION {manual}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.authors output
+  name.or.dash
+  format.book.title.edition "title" output.warn
+  format.howpublished "howpublished" bibinfo.check output 
+  format.organization "organization" bibinfo.check output
+  format.address "address" bibinfo.check output
+  format.date output
+  format.note output
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+FUNCTION {mastersthesis}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.authors "author" output.warn
+  name.or.dash
+  format.article.title "title" output.warn
+  format.master.thesis.type output.nonnull
+  format.school "school" bibinfo.warn output
+  format.address "address" bibinfo.check output
+  format.date "year" output.warn
+  format.note output
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+FUNCTION {misc}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.authors output
+  name.or.dash
+  format.article.title output
+  format.howpublished "howpublished" bibinfo.check output 
+  format.organization "organization" bibinfo.check output
+  format.address "address" bibinfo.check output
+  format.pages output
+  format.date output
+  format.note output
+  format.url output
+  fin.entry
+  empty.entry.warn
+  if.url.std.interword.spacing
+}
+
+FUNCTION {patent}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.authors output
+  name.or.dash
+  format.article.title output
+  format.patent.nationality.type.number output
+  format.patent.date output
+  format.note output
+  format.url output
+  fin.entry
+  empty.entry.warn
+  if.url.std.interword.spacing
+}
+
+FUNCTION {periodical}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.editors output
+  name.or.dash
+  format.book.title "title" output.warn
+  format.series output
+  format.volume output
+  format.number output
+  format.organization "organization" bibinfo.check output
+  format.date "year" output.warn
+  format.note output
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+FUNCTION {phdthesis}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.authors "author" output.warn
+  name.or.dash
+  format.article.title "title" output.warn
+  format.phd.thesis.type output.nonnull
+  format.school "school" bibinfo.warn output
+  format.address "address" bibinfo.check output
+  format.date "year" output.warn
+  format.note output
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+FUNCTION {proceedings}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.editors output
+  name.or.dash
+  format.book.title "title" output.warn
+  format.series output
+  format.volume output
+  format.number output
+  publisher empty$
+    { format.address.organization.date output }
+    { format.organization "organization" bibinfo.check output
+      format.address.publisher.date output
+    }
+  if$
+  format.note output
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+FUNCTION {standard}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.authors output
+  name.or.dash
+  format.book.title "title" output.warn
+  format.howpublished "howpublished" bibinfo.check output 
+  format.organization.institution.standard.type.number output
+  format.revision output
+  format.date output
+  format.note output
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+FUNCTION {techreport}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.authors "author" output.warn
+  name.or.dash
+  format.article.title "title" output.warn
+  format.howpublished "howpublished" bibinfo.check output 
+  format.institution "institution" bibinfo.warn output
+  format.address "address" bibinfo.check output
+  format.tech.report.number output.nonnull
+  format.date "year" output.warn
+  format.note output
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+FUNCTION {unpublished}
+{ std.status.using.comma
+  start.entry
+  if.url.alt.interword.spacing
+  format.authors "author" output.warn
+  name.or.dash
+  format.article.title "title" output.warn
+  format.date output
+  format.note "note" output.warn
+  format.url output
+  fin.entry
+  if.url.std.interword.spacing
+}
+
+
+% The special entry type which provides the user interface to the
+% BST controls
+FUNCTION {IEEEtranBSTCTL}
+{ is.print.banners.to.terminal
+    { "** IEEEtran BST control entry " quote$ * cite$ * quote$ * " detected." *
+      top$
+    }
+    { skip$ }
+  if$
+  CTLuse_article_number
+  empty$
+    { skip$ }
+    { CTLuse_article_number
+      yes.no.to.int
+      'is.use.number.for.article :=
+    }
+  if$
+  CTLuse_paper
+  empty$
+    { skip$ }
+    { CTLuse_paper
+      yes.no.to.int
+      'is.use.paper :=
+    }
+  if$
+  CTLuse_url
+  empty$
+    { skip$ }
+    { CTLuse_url
+      yes.no.to.int
+      'is.use.url :=
+    }
+  if$
+  CTLuse_forced_etal
+  empty$
+    { skip$ }
+    { CTLuse_forced_etal
+      yes.no.to.int
+      'is.forced.et.al :=
+    }
+  if$
+  CTLmax_names_forced_etal
+  empty$
+    { skip$ }
+    { CTLmax_names_forced_etal
+      string.to.integer
+      'max.num.names.before.forced.et.al :=
+    }
+  if$
+  CTLnames_show_etal
+  empty$
+    { skip$ }
+    { CTLnames_show_etal
+      string.to.integer
+      'num.names.shown.with.forced.et.al :=
+    }
+  if$
+  CTLuse_alt_spacing
+  empty$
+    { skip$ }
+    { CTLuse_alt_spacing
+      yes.no.to.int
+      'is.use.alt.interword.spacing :=
+    }
+  if$
+  CTLalt_stretch_factor
+  empty$
+    { skip$ }
+    { CTLalt_stretch_factor
+      'ALTinterwordstretchfactor :=
+      "\renewcommand{\BIBentryALTinterwordstretchfactor}{"
+      ALTinterwordstretchfactor * "}" *
+      write$ newline$
+    }
+  if$
+  CTLdash_repeated_names
+  empty$
+    { skip$ }
+    { CTLdash_repeated_names
+      yes.no.to.int
+      'is.dash.repeated.names :=
+    }
+  if$
+  CTLname_format_string
+  empty$
+    { skip$ }
+    { CTLname_format_string
+      'name.format.string :=
+    }
+  if$
+  CTLname_latex_cmd
+  empty$
+    { skip$ }
+    { CTLname_latex_cmd
+      'name.latex.cmd :=
+    }
+  if$
+  CTLname_url_prefix
+  missing$
+    { skip$ }
+    { CTLname_url_prefix
+      'name.url.prefix :=
+    }
+  if$
+
+
+  num.names.shown.with.forced.et.al max.num.names.before.forced.et.al >
+    { "CTLnames_show_etal cannot be greater than CTLmax_names_forced_etal in " cite$ * warning$ 
+      max.num.names.before.forced.et.al 'num.names.shown.with.forced.et.al :=
+    }
+    { skip$ }
+  if$
+}
+
+
+%%%%%%%%%%%%%%%%%%%
+%% ENTRY ALIASES %%
+%%%%%%%%%%%%%%%%%%%
+FUNCTION {conference}{inproceedings}
+FUNCTION {online}{electronic}
+FUNCTION {internet}{electronic}
+FUNCTION {webpage}{electronic}
+FUNCTION {www}{electronic}
+FUNCTION {default.type}{misc}
+
+
+
+%%%%%%%%%%%%%%%%%%
+%% MAIN PROGRAM %%
+%%%%%%%%%%%%%%%%%%
+
+READ
+
+EXECUTE {initialize.controls}
+EXECUTE {initialize.status.constants}
+EXECUTE {banner.message}
+
+EXECUTE {initialize.longest.label}
+ITERATE {longest.label.pass}
+
+EXECUTE {begin.bib}
+ITERATE {call.type$}
+EXECUTE {end.bib}
+
+EXECUTE{completed.message}
+
+
+%% That's all folks, mds.
diff --git a/chapters/1-Introduction.tex b/chapters/1-Introduction.tex
index b9b37ae2329e0a3b60cf958437285698c4442d9b..b4f51b422c091ce5f407097ba2c4aa60642afd62 100755
--- a/chapters/1-Introduction.tex
+++ b/chapters/1-Introduction.tex
@@ -1,24 +1,11 @@
 \chapter{Introduction}
 
-\section{Background}
-The purpose of this investigation is to identify and evaluate potential alternatives to ImageSharp for image processing. Currently, ImageSharp costs \$5,000 per year, which impacts our pricing structure. This review explores cost-effective and efficient alternatives.
+\input{sections/Chapter-1-sections/General-Introduction.tex}
 
-\section{Problem Statement}
-ImageSharp has limitations regarding cost and performance. These limitations motivate the search for a viable alternative that balances cost, functionality, and performance.
+\input{sections/Chapter-1-sections/Relevance.tex}
 
-\section{Research Objectives}
-The objectives are:
-\begin{itemize}
-    \item Identify cost-effective alternatives.
-    \item Evaluate alternatives based on functionality and performance.
-\end{itemize}
+\input{sections/Chapter-1-sections/Aim-and-Objectives.tex}
 
-\section{Thesis Structure}
-This thesis is organized as follows:
-\begin{itemize}
-    \item Chapter 2 provides a literature review of image processing libraries.
-    \item Chapter 3 describes the methodology.
-    \item Chapter 4 evaluates the alternatives.
-    \item Chapter 5 discusses the analysis and insights.
-    \item Chapter 6 concludes with recommendations.
-\end{itemize}
+\input{sections/Chapter-1-sections/Research-Questions.tex}
+
+\input{sections/Chapter-1-sections/Related-Work.tex}
\ No newline at end of file
diff --git a/chapters/2-Methodology.tex b/chapters/2-Methodology.tex
new file mode 100644
index 0000000000000000000000000000000000000000..7a92f8ce58427b2098b23441e4b757e7bb2c40e9
--- /dev/null
+++ b/chapters/2-Methodology.tex
@@ -0,0 +1,17 @@
+\chapter{Methodology}
+
+This chapter outlines the methodology used to compare various image processing libraries. The evaluation is grounded in two core performance metrics: \textbf{Image Conversion} and \textbf{pixel iteration}. These metrics provide a basis for comparing the efficiency and responsiveness of different libraries in performing fundamental image processing tasks. In the following sections, we explain why these metrics were chosen, how they are measured, how the results are processed, and the criteria for selecting the libraries under investigation.
+
+\input{sections/Chapter-2-sections/Performance-Metrics.tex}
+\input{sections/Chapter-2-sections/Rationale.tex}
+\input{sections/Chapter-2-sections/Measurement-Procedure.tex}
+\input{sections/Chapter-2-sections/Data-Analysis.tex}
+\input{sections/Chapter-2-sections/Library-Selection.tex}
+
+
+
+
+
+
+
+
diff --git a/chapters/3-Implementation.tex b/chapters/3-Implementation.tex
new file mode 100644
index 0000000000000000000000000000000000000000..3d5cf6ec9b1453eef5a05adca43972f40148855c
--- /dev/null
+++ b/chapters/3-Implementation.tex
@@ -0,0 +1,17 @@
+\chapter{Implementation}
+
+This chapter details the implementation of a comprehensive benchmarking framework to evaluate several image processing libraries, including ImageSharp, OpenCvSharp paired with SkiaSharp, Emgu CV coupled with Structure.Sketching, and Magick.NET integrated with MagicScaler. The objective was to create an end‐to‐end system that not only measures execution times for common image operations but also provides insights into memory usage.
+
+This has been sought to answer key questions regarding the efficiency of image conversion and pixel iteration operations—two fundamental tasks in image processing. The following sections describe the review process, architectural decisions, and technical implementations in the study.extensive study.
+
+\input{sections/Chapter-3-sections/System-Architecture.tex}
+
+\input{sections/Chapter-3-sections/Image-Conversion.tex}
+
+\input{sections/Chapter-3-sections/Pixel-Iteration.tex}
+
+\input{sections/Chapter-3-sections/Libraries-Implementation.tex}
+
+\input{sections/Chapter-3-sections/Memory-Profiling.tex}
+
+\input{sections/Chapter-3-sections/Result-Export.tex}
\ No newline at end of file
diff --git a/chapters/4-Results.tex b/chapters/4-Results.tex
new file mode 100644
index 0000000000000000000000000000000000000000..cded2edf3a7077f4e5e854918b192dcd212d9120
--- /dev/null
+++ b/chapters/4-Results.tex
@@ -0,0 +1,18 @@
+\chapter{Results}
+
+This chapter presents our findings from the benchmarking experiments conducted to evaluate the performance of alternative image processing libraries. The results include quantitative data on image conversion and pixel iteration times, as well as memory consumption for each library or combination tested. The data generated will be used to answer the research question and support the hypotheses formulated in the previous chapters. The benchmarking approach consisted of running two primary tests on each library: an image conversion test that measured the time taken to load, process, and save images, and a pixel iteration test that recorded the time required to process every pixel in an image for a grayscale conversion. These experiments were performed in a controlled environment, with warm-up iterations included to reduce the impact of initial overhead. Memory consumption was tracked alongside processing times using BenchmarkDotNet, thereby offering a complete picture of both speed and resource utilization.\\
+
+%%[PLACEHOLDER: a media summarizing benchmarking methodology]
+
+Before discussing the results in detail, it is important to review the benchmarking design. In this study, each library was tested under the same conditions: the same input image was used, a fixed number of warm-up iterations were performed to reduce the effects of just-in-time compilation and caching, and finally, 100 main iterations were executed to ensure reliable statistics. For the image conversion test, the time measured was the duration needed to load a JPEG image, convert it to PNG, and save it back to disk. In the pixel iteration test, the focus was on recording the time required to access and change each pixel for producing a grayscale version of the image.
+
+Memory diagnostics were captured concurrently, with particular attention to allocated memory and garbage collection events. This dual approach ensured that our results were not solely focused on speed but also took into account the resource efficiency of each solution.
+
+
+%%[PLACEHOLDER: a media Diagram of benchmarking process] or reference to it
+
+\input{sections/Chapter-4-sections/Image_conversion_benchmark_results.tex}
+\input{sections/Chapter-4-sections/Pixel_iteration_benchmark_results.tex}
+\input{sections/Chapter-4-sections/Memory_benchmark_results.tex}
+\input{sections/Chapter-4-sections/Analysis_and_Interpretation_of_Results.tex}
+\input{sections/Chapter-4-sections/Summary.tex}
diff --git a/chapters/5-Discussion.tex b/chapters/5-Discussion.tex
new file mode 100644
index 0000000000000000000000000000000000000000..a4e62ceda72d3143bd7f0946076b2e069fc6cb25
--- /dev/null
+++ b/chapters/5-Discussion.tex
@@ -0,0 +1,73 @@
+\chapter{Discussion}
+
+This chapter interprets the results obtained in the benchmarking experiments, placing them in a broader theoretical and practical context.Explores what the results imply about the efficiency, ease of implementation, licensing concerns, and usability of the evaluated image processing libraries. Furthermore, addresses the larger implications of these findings for software development and image processing as a field.
+
+\section{Interpreting the Results: Performance vs. Practicality}  
+
+The results obtained from our benchmarking study reveal a clear hierarchy of performance among the tested libraries. However, performance alone does not determine the best library for a given use case. The ideal choice depends on a variety of factors, including memory efficiency, ease of integration, licensing constraints, and the specific needs of the application.  
+
+\subsection{Performance Trade-offs and Suitability for Real-World Applications}
+
+From performance standpoint, OpenCvSharp + SkiaSharp and Emgu CV + Structure.Sketching outperform ImageSharp in both image conversion and pixel iteration tasks. However, these libraries require more complex implementations compared to ImageSharp’s user-friendly API. While ImageSharp is slower, it remains a compelling option for projects where ease of use is prioritized over raw speed. SkiaSharp, with its lightweight architecture and cross-platform compatibility, demonstrated remarkable performance in image conversion tasks. It consistently outperformed ImageSharp while consuming significantly less memory. This makes SkiaSharp an ideal choice for applications requiring efficient format conversion without extensive manipulation of individual pixels. Emgu CV, despite its high memory usage, proved to be the fastest option for pixel iteration. This is unsurprising, given its reliance on OpenCV’s highly optimized C++ backend. However, its higher memory footprint may be a drawback for applications running on constrained systems.  Magick.NET, on the other hand, performed well in certain tasks but fell short in pixel iteration due to excessive processing times. This suggests that while Magick.NET is a robust tool for high-quality image manipulation and format conversion, it may not be suitable for performance-critical applications requiring low-latency processing. in graph \ref{fig:image-conversion} and \ref{fig:pixel-iteration} the performance comparison of the libraries in image conversion and pixel iteration tasks respectively can be seen.
+
+\subsection{The Impact of Licensing on Library Selection}  
+
+Licensing can be a key consideration in selecting an image processing library. The cost of proprietary solutions can be prohibitive, particularly for small businesses or open-source projects. ImageSharp, while powerful, requires a yearly cost of couple of thousand dollars for commercial use.This cost must be weighed against its performance limitations. Open-source alternatives like OpenCvSharp and SkiaSharp, which are licensed under MIT and Apache 2.0 respectively, offer a compelling alternative by providing high performance at no cost. Emgu CV, although based on the open-source OpenCV framework, requires a one-time fee (version specific) of less than thousand dollars, with additional costs for future upgrades. While this is significantly more affordable than ImageSharp, it still represents an investment that must be justified by superior performance. on the other hand,Magick.NET was licensed under Apache 2.0, and provides extensive functionality for free, making it an attractive option for projects that require advanced image processing features but cannot afford proprietary licenses.  
+ 
+\begin{longtable}
+    {|>{\raggedright\arraybackslash}p{0.30\textwidth}|>{\raggedright\arraybackslash}p{0.20\textwidth}|>{\raggedright\arraybackslash}p{0.20\textwidth}|>{\raggedright\arraybackslash}p{0.20\textwidth}|}
+    \hline
+    \rowcolor{purple!30}
+    \textbf{Library Combination} & \textbf{Licensing Model} & \textbf{Cost} & \textbf{Usage Restrictions / Remarks} \\
+    \hline
+    \endfirsthead
+
+    \hline
+    \rowcolor{purple!30}
+    \textbf{Library Combination} & \textbf{Licensing Model} & \textbf{Cost} & \textbf{Usage Restrictions / Remarks} \\
+    \hline
+    \endhead
+
+    \textbf{ImageSharp} & Proprietary (Commercial) & ~\$5,000/year & Requires a subscription; higher conversion times \\\hline
+    \textbf{OpenCvSharp + SkiaSharp} & Open-source (Apache-2.0 \& MIT) & Free & No recurring fees; excellent conversion performance \\\hline
+    \textbf{Magick.NET} & Open-source (Apache-2.0) & Free & Good for advanced processing; slower pixel iteration \\\hline
+    \textbf{Emgu CV + Structure.Sketching} & Open-source with paid tier & ~\$799 (Emgu CV only) & Cost-effective; strong for pixel manipulation and processing \\\hline
+
+    \caption{Library Licensing, Costs, and Usage Restrictions Comparison Table}
+    \label{tab:licensing}
+\end{longtable}
+
+
+\section{Strengths and Weaknesses of the Different Libraries}  
+
+ImageSharp’s biggest advantage is its simple API and pure .NET implementation. It is easy to integrate and requires minimal setup. However, our benchmarks show that it lags behind other libraries in performance. Its relatively high memory efficiency during pixel iteration is a plus, but for tasks requiring fast image conversion or pixel-level modifications, other options are preferable.  
+OpenCvSharp+SkiaSharp: High Performance, Moderate Complexity.This combination provides the best balance between speed and memory efficiency. OpenCvSharp offers the power of OpenCV’s optimized image processing, while SkiaSharp enhances its rendering and format conversion capabilities. However, using these libraries effectively requires familiarity with both OpenCV and SkiaSharp APIs, making them less beginner-friendly than ImageSharp. Emgu CV’s performance in pixel iteration tasks is unmatched, making it ideal for applications involving real-time image analysis, such as AI-driven image recognition. However, its high memory consumption may pose a problem for resource-limited environments. Structure.Sketching complements Emgu CV by providing efficient image creation and drawing capabilities, making this combination well-suited for applications requiring both processing speed and graphical rendering.  In contrast,Magick.NET excels in high-quality image manipulation and resampling but falls short in raw speed. The high processing times recorded for pixel iteration indicate that Magick.NET is best suited for batch processing or scenarios where quality takes precedence over execution time. And MagickScaler, provides advanced image scaling capabilities, making it a valuable tool for applications requiring precise image resizing and enhancement.
+
+Overally There is no single library that is best for all use cases. The optimal choice depends on the application’s specific requirements. If ease of implementation and maintainability are priorities, ImageSharp remains a solid choice despite its performance drawbacks. For performance-intensive applications where raw speed is essential, OpenCvSharp+SkiaSharp or Emgu CV+Structure.Sketching are superior choices.  
+  
+\vspace{1em}
+\includegraphics[width=\textwidth]{media/usecase.png}
+\captionof{figure}{Mapping different libraries to their ideal use cases}
+\label{fig:usecase}
+
+\section{Considerations for Future Research}  
+
+Image processing is a fundamental component of many industries, including medical imaging, computer vision, digital content creation, and web applications. The performance gains demonstrated by OpenCvSharp and Emgu CV suggest that these libraries can benefit a wide range of applications, from autonomous vehicle navigation to medical diagnostics.  
+
+Moreover, the balance between speed and memory efficiency is a recurring challenge in computational imaging. This study highlights the need for hybrid approaches—such as combining OpenCvSharp with SkiaSharp to achieve optimal performance while minimizing resource consumption.  
+
+Future research could explore the following areas to further enhance the capabilities of image processing libraries:
+
+\textbf{Expanding the Scope of Benchmarking:} While our study focused on image conversion and pixel iteration, real-world applications often require additional operations such as filtering, blending, and object detection. Future research could expand the benchmarking scope to include these tasks, providing a more comprehensive evaluation of each library’s capabilities.  
+
+\textbf{GPU Acceleration and Parallel Processing:} One limitation of our study is that all benchmarks were conducted on a CPU. Many modern image processing tasks benefit from GPU acceleration, which libraries like OpenCV support. Investigating the performance of these libraries on GPU-accelerated hardware could yield valuable insights into their scalability and efficiency.  
+
+\textbf{Cloud-Based Processing:} With the growing adoption of cloud computing, it would be beneficial to evaluate how these libraries perform in cloud-based environments such as AWS Lambda or Azure Functions. Factors such as cold start times, scalability, and integration with cloud-based storage solutions would be critical considerations for enterprise applications.  
+
+\textbf{Further Optimizations in Memory Usage:} Although Emgu CV was the fastest in pixel iteration, its high memory consumption remains a concern. Future research could explore memory optimization techniques, such as reducing redundant data structures or leveraging memory-efficient algorithms, to improve its efficiency without compromising speed.  
+
+\section{Closing Thoughts}  
+
+The findings of this study offer clear guidance for developers seeking to optimize their image processing workflows. While ImageSharp remains a user-friendly option, open-source alternatives such as OpenCvSharp and SkiaSharp provide superior performance at no cost. Emgu CV excels in computationally intensive tasks but requires careful memory management, while Magick.NET remains a powerful tool for applications prioritizing high-quality output.  
+
+Ultimately, the choice of an image processing library should be guided by the specific needs of the application. Whether prioritizing speed, memory efficiency, ease of integration, or licensing freedom, developers now have a well-defined framework for making informed decisions.  
\ No newline at end of file
diff --git a/chapters/Appendices.tex b/chapters/Appendices.tex
index fddc37a0797414b527b3360b71caccbc007270ce..02d6cd54e218b0aa17265953abba7653c23cf676 100755
--- a/chapters/Appendices.tex
+++ b/chapters/Appendices.tex
@@ -1,4 +1,5 @@
 \chapter{Appendices}
+\cite{ferreira_generic_2024}
 \input{chapters/Appendices/appendix_a.tex}
 \input{chapters/Appendices/appendix_b.tex}
 \input{chapters/Appendices/appendix_c.tex}
\ No newline at end of file
diff --git a/media/log_1.png b/media/log_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..a20192457906a80c72671b727271fbdd019264bd
Binary files /dev/null and b/media/log_1.png differ
diff --git a/media/log_2.png b/media/log_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..13e0227b42a7f1297b686a53151b9cf77a125c31
Binary files /dev/null and b/media/log_2.png differ
diff --git a/media/usecase.png b/media/usecase.png
new file mode 100644
index 0000000000000000000000000000000000000000..351a445569465c9570e280af0b195a12572f2ba8
Binary files /dev/null and b/media/usecase.png differ
diff --git a/chapters/2-Literature_Review.tex b/outdated/2-Literature_Review.tex
similarity index 100%
rename from chapters/2-Literature_Review.tex
rename to outdated/2-Literature_Review.tex
diff --git a/chapters/3-Methodology.tex b/outdated/3-Methodology.tex
similarity index 100%
rename from chapters/3-Methodology.tex
rename to outdated/3-Methodology.tex
diff --git a/chapters/4-Evaluation_of_Alternatives.tex b/outdated/4-Evaluation_of_Alternatives.tex
similarity index 100%
rename from chapters/4-Evaluation_of_Alternatives.tex
rename to outdated/4-Evaluation_of_Alternatives.tex
diff --git a/chapters/5-Analysis_and_Discussion.tex b/outdated/5-Analysis_and_Discussion.tex
similarity index 100%
rename from chapters/5-Analysis_and_Discussion.tex
rename to outdated/5-Analysis_and_Discussion.tex
diff --git a/chapters/6-Conclusion_and_Recommendations.tex b/outdated/6-Conclusion_and_Recommendations.tex
similarity index 100%
rename from chapters/6-Conclusion_and_Recommendations.tex
rename to outdated/6-Conclusion_and_Recommendations.tex
diff --git a/outdated/Data-Collection.tex b/outdated/Data-Collection.tex
new file mode 100644
index 0000000000000000000000000000000000000000..5861fc825ee9f1f6ba035e967886bca9f943e000
--- /dev/null
+++ b/outdated/Data-Collection.tex
@@ -0,0 +1,33 @@
+\section{Data Collection and Analysis}
+
+To ensure that our performance measurements are both reliable and meaningful, we implement a rigorous data collection and analysis process:
+
+\subsection{ Repeated Trials and Averaging}
+
+Each experimental test (for both image loading and pixel iteration) is executed over a large number of iterations—typically 100 runs per library. This repetition helps smooth out transient variations and ensures that our measurements represent a consistent performance profile. The following steps are taken:
+
+\begin{itemize}
+    \item \textbf{Multiple Iterations:} For each library, the test is repeated numerous times under identical conditions.
+    \item \textbf{Statistical Averaging:} The mean time for each metric is computed from these iterations, providing a representative average performance figure.
+    \item \textbf{Variance Analysis:} Standard deviations and variances are calculated to assess the consistency of the results. Outlier values are identified and, if necessary, excluded to prevent skewed outcomes.
+\end{itemize}
+
+\subsection{ Memory Profiling}
+
+In addition to timing metrics, we capture memory consumption data to understand the resource efficiency of each library. This involves:
+
+\begin{itemize}
+    \item \textbf{Memory Allocation Tracking:} Monitoring the amount of memory allocated during image loading and pixel iteration operations.
+    \item \textbf{Garbage Collection Monitoring:} Recording the frequency and duration of garbage collection events to assess their impact on performance.
+    \item \textbf{Profiling Tools:} Utilizing integrated tools such as BenchmarkDotNet’s memory profiler, along with additional system-level profiling utilities, we record the peak memory usage and total allocations.
+\end{itemize}
+
+\subsection{ Statistical Analysis}
+
+The raw data collected from repeated trials is processed using statistical software to perform further analysis:
+
+\begin{itemize}
+    \item \textbf{Confidence Intervals:} Confidence intervals are calculated around the mean values to provide a measure of the reliability of our measurements.
+    \item \textbf{Comparative Statistical Tests:} Where applicable, we employ statistical tests (e.g., t-tests) to determine whether differences in performance metrics between libraries are statistically significant.
+    \item \textbf{Data Visualization:} Graphs and charts are generated to visually compare the performance distributions across libraries, offering an intuitive understanding of their relative efficiencies.
+\end{itemize}
\ No newline at end of file
diff --git a/outdated/Definition.tex b/outdated/Definition.tex
new file mode 100644
index 0000000000000000000000000000000000000000..639b629cd1cf8ad0fd76770e8fc40811302525f0
--- /dev/null
+++ b/outdated/Definition.tex
@@ -0,0 +1,27 @@
+\section{Definition and Rationale for the Metrics}
+
+\subsection{ Image Loading Time}
+
+\textbf{Definition:}  
+Image loading time is defined as the total elapsed time required to retrieve an image from a file system, decode it into a standardized internal representation (e.g., RGBA32), and initialize any associated data structures needed for further processing.
+
+\textbf{Rationale:}  
+\begin{itemize}
+    \item \textbf{I/O and Decoding Efficiency:} The time taken to load an image is a direct indicator of how efficiently a library handles input/output operations and decodes various image formats. In industrial applications, where images may be read in real time from cameras or sensors, fast loading is essential.
+    \item \textbf{Baseline for Processing Pipelines:} Since loading is the first step in any image processing pipeline, delays at this stage can cascade and affect overall system performance. An efficient image loading mechanism can significantly reduce the latency of the entire workflow.
+    \item \textbf{Minimizing Overhead:} The loading process also involves memory allocation and data structure initialization. A library that minimizes overhead in these areas will allow more resources to be devoted to the actual image processing tasks.
+    \item \textbf{Standardized Measurement:} The loading process can be uniformly measured across different libraries by using the same image datasets and environmental conditions, ensuring that the comparisons are fair and reproducible.
+\end{itemize}
+
+\subsection{ Pixel Iteration Time}
+
+\textbf{Definition:}  
+Pixel iteration time refers to the duration required to traverse every pixel in an image and apply a predefined, uniform operation—such as converting an image to grayscale. This metric captures the efficiency of the library’s core routines for low-level data manipulation.
+
+\textbf{Rationale:}
+\begin{itemize}
+    \item \textbf{Core Computation Performance:} Many image processing tasks (e.g., filtering, thresholding, and color adjustments) require examining or modifying every pixel. The speed at which a library can iterate over pixels is a direct measure of its processing efficiency.
+    \item \textbf{Algorithmic Sensitivity:} Pixel-level operations are sensitive to implementation details like loop unrolling, memory access patterns, and caching strategies. Faster iteration times imply better-optimized routines.
+    \item \textbf{Simplicity and Reproducibility:} Converting an image to grayscale is a simple and commonly used operation that can serve as a proxy for other pixel-level tasks. Its simplicity makes it an ideal candidate for standardizing comparisons across libraries.
+    \item \textbf{Isolation of Low-Level Performance:} By isolating the pixel iteration operation from higher-level tasks, we can specifically evaluate the efficiency of the library’s data structures and internal algorithms without interference from more complex operations.
+\end{itemize}
\ No newline at end of file
diff --git a/outdated/Measurement-Procedure.tex b/outdated/Measurement-Procedure.tex
new file mode 100644
index 0000000000000000000000000000000000000000..e2de2991771cb0279059c4b8d141dd05c4751871
--- /dev/null
+++ b/outdated/Measurement-Procedure.tex
@@ -0,0 +1,43 @@
+\section{Measurement Procedure}
+
+\subsection{ Experimental Setup}
+
+To ensure consistency and reliability, all tests are performed under controlled conditions:
+
+\begin{itemize}
+    \item \textbf{Hardware Consistency:} All experiments are conducted on the same machine with a fixed hardware configuration. This eliminates variability due to differences in CPU speed, memory, or storage performance.
+    \item \textbf{Software Environment:} We use a consistent operating system and development environment (e.g., .NET framework) across all tests. Timing is measured using high-precision tools such as BenchmarkDotNet to capture accurate performance metrics.
+    \item \textbf{Image Dataset:} A standardized dataset of images is used for testing. This dataset includes images of varying resolutions and formats to simulate real-world industrial scenarios.
+    \item \textbf{Repetition and Averaging:} Each test is repeated multiple times (e.g., 100 iterations) to account for random fluctuations and to ensure that the measured performance is statistically significant. The average and variance of the results are computed to assess consistency.
+\end{itemize}
+
+\subsection{ Measuring Image Loading Time}
+
+The procedure for measuring image loading time consists of the following steps:
+
+\begin{itemize}
+    \item \textbf{File Access Initiation:} The test begins by initiating a file read operation from the disk. The image file is selected from a predetermined dataset.
+    \item \textbf{Decoding and Conversion:} Once the file is accessed, the image is decoded into a standardized internal format, such as RGBA32. This step includes converting the raw image data (e.g., JPEG, PNG) into a format that is readily usable by the library.
+    \item \textbf{Initialization of Data Structures:} Any necessary memory allocation and initialization of internal data structures are performed at this stage.
+    \item \textbf{Timing the Operation:} A high-resolution timer records the time from the initiation of the file read operation until the image is fully loaded and ready for processing.
+    \item \textbf{Repetition and Averaging:} This process is repeated multiple times, and the average loading time is computed. Variability in the measurements is analyzed using standard deviation metrics to ensure reproducibility.
+\end{itemize}
+
+\subsection{ Measuring Pixel Iteration Time}
+
+The measurement of pixel iteration time is carried out in a similar systematic manner:
+\begin{itemize}
+    \item \textbf{Image Loading:} Prior to the iteration test, the image is loaded into memory using the same process as described above. This ensures that the image is in a known, consistent state.
+    \item \textbf{Pixel Operation Execution:} A simple operation is defined (e.g., converting each pixel to its grayscale equivalent). The algorithm iterates over every pixel, reading its RGB values and computing the grayscale value based on a weighted sum.
+    \item \textbf{Timing the Iteration:} The entire pixel iteration process is timed from the moment the iteration begins until every pixel has been processed. High-precision timers are used to capture this duration.
+    \item \textbf{Isolation of the Operation:} To ensure that the measurement reflects only the time for pixel iteration, other processes (such as file I/O) are not included in this timing.
+    \item \textbf{Multiple Iterations:} Like the image loading test, the pixel iteration test is repeated many times (e.g., 100 iterations) to obtain an average processing time. Outliers are analyzed and removed if they are deemed to be due to external interference.
+\end{itemize}
+
+\subsection{ Tools and Instrumentation}
+
+\begin{itemize}
+    \item \textbf{Benchmarking Framework:} BenchmarkDotNet is used as the primary tool for performance measurement. It provides accurate timing measurements and can also track memory usage.
+    \item \textbf{Profiling Utilities:} Additional profiling tools are employed to monitor memory allocation and garbage collection events. This ensures that both time and resource consumption are captured.
+    \item \textbf{Data Logging:} All measurements are logged for further statistical analysis. This raw data is later processed to compute averages, standard deviations, and confidence intervals, forming the basis for our comparative analysis.
+\end{itemize}
\ No newline at end of file
diff --git a/sections/Chapter-5-sections/Overall_Comparison_and_Key_Insights.tex b/outdated/Overall_Comparison_and_Key_Insights.tex
similarity index 100%
rename from sections/Chapter-5-sections/Overall_Comparison_and_Key_Insights.tex
rename to outdated/Overall_Comparison_and_Key_Insights.tex
diff --git a/outdated/Overview.tex b/outdated/Overview.tex
new file mode 100644
index 0000000000000000000000000000000000000000..e3092c7ed92224e0b6110673be2138abd26c8333
--- /dev/null
+++ b/outdated/Overview.tex
@@ -0,0 +1,19 @@
+\section{Overview}
+
+Image processing tasks in industrial applications typically involve two fundamental operations: acquiring image data (loading) and performing pixel-level computations (iteration). The efficiency of these operations directly influences the overall performance of any image processing system. In our evaluation, we have chosen to focus on two key metrics:
+
+\begin{itemize}
+    \item \textbf{Image Loading Time:} The time taken to load an image from persistent storage into the system's memory.
+    \item \textbf{Pixel Iteration Time:} The duration required to traverse and process each pixel in an image, exemplified by converting the image to grayscale.
+\end{itemize}
+
+These metrics are chosen for several reasons:
+
+\begin{itemize}
+    \item \textbf{Universality:} Both operations are common to nearly all image processing workflows, regardless of the complexity of the subsequent processing steps.
+    \item \textbf{Fundamental Performance Indicators:} The speed of image loading reflects the efficiency of file I/O, image decoding, and memory allocation, while pixel iteration performance indicates how well a library can handle low-level data manipulation—an operation that is central to filtering, enhancement, and other pixel-based computations.
+    \item \textbf{Comparability and Reproducibility:} By using standardized tasks that all libraries must perform, we can compare their performance on a like-for-like basis. This approach minimizes variability and provides a clear baseline for comparing otherwise diverse systems.
+    \item \textbf{Hardware Independence:} These metrics are less influenced by high-level algorithmic choices and more by the underlying implementation and optimizations, making them suitable for benchmarking across different libraries and platforms.
+\end{itemize}
+
+While there are many other potential metrics (such as encoding time, advanced filtering speed, or transformation accuracy), we selected image loading and pixel iteration because they are both critical and universally applicable operations. They provide a controlled environment for performance measurement and are directly relevant to the low-level efficiency needed in industrial scenarios.
\ No newline at end of file
diff --git a/outdated/Selection-Criteria.tex b/outdated/Selection-Criteria.tex
new file mode 100644
index 0000000000000000000000000000000000000000..64c60729b67711ef5cb15ffd8f713bee06dfa6be
--- /dev/null
+++ b/outdated/Selection-Criteria.tex
@@ -0,0 +1,52 @@
+\section{Selection Criteria for Image Processing Libraries}
+
+Selecting the appropriate libraries for our comparison is a critical step that shapes the overall evaluation. We employ a set of comprehensive criteria to ensure that only relevant and robust image processing libraries are included:
+
+\subsection{ Functional Coverage}
+
+The primary requirement is that the library must support the core operations fundamental to image processing:
+
+\begin{itemize}
+    \item \textbf{Image Loading and Creation:} The library should efficiently load images from various formats and support the creation of new images.
+    \item \textbf{Pixel Manipulation:} It must provide mechanisms for direct pixel access and manipulation, which are essential for tasks like filtering, transformation, and color adjustments.
+    \item \textbf{Transformation Capabilities:} Support for resizing, cropping, and color space conversions is essential to evaluate overall processing flexibility.
+\end{itemize}
+
+\subsection{ Performance and Resource Efficiency}
+
+Given the industrial context, the following performance aspects are prioritized:
+
+\begin{itemize}
+    \item \textbf{Low Image Loading Time:} Efficient I/O and decoding capabilities ensure that the system can handle high volumes of image data.
+    \item \textbf{Fast Pixel Iteration:} The library must exhibit optimized routines for traversing and processing pixels, indicating low-level efficiency.
+    \item \textbf{Memory Usage:} Efficient memory management is critical, particularly when processing high-resolution images or large batches. Libraries with minimal memory overhead and low garbage collection impact are preferred.
+\end{itemize}
+
+\subsection{ Ease of Integration}
+
+Practical integration into existing industrial systems is another important criterion:
+
+\begin{itemize}
+    \item \textbf{System Compatibility:} The library should seamlessly integrate with our existing software stack (e.g., the .NET framework).
+    \item \textbf{Documentation and Community Support:} Comprehensive documentation and active community support facilitate adoption and troubleshooting.
+    \item \textbf{Modularity and Extensibility:} A modular design that allows for the easy addition of custom functionalities is advantageous for industrial applications that may have evolving requirements.
+\end{itemize}
+
+\subsection{ Licensing and Cost Considerations}
+
+While the focus is on performance, practical deployment also depends on the licensing terms and cost:
+
+\begin{itemize}
+    \item \textbf{Open-Source or Cost-Effective Licensing:} Libraries with permissive or cost-effective licenses are preferred, as they reduce the total cost of ownership.
+    \item \textbf{Long-Term Maintenance:} Consideration of ongoing maintenance costs and the ease of future updates is essential for sustainable industrial deployment.
+\end{itemize}
+
+\subsection{ Relevance to Industrial Applications}
+
+Finally, the chosen libraries must demonstrate applicability to real-world industrial scenarios:
+
+\begin{itemize}
+    \item \textbf{Real-Time Processing:} The ability to handle real-time image processing is crucial for applications such as quality control and automated inspection.
+    \item \textbf{Scalability:} The library should efficiently manage large datasets and high-resolution images.
+    \item \textbf{Robustness:} Proven reliability in diverse industrial conditions (e.g., varying lighting, environmental noise) is essential for practical deployment.
+\end{itemize}
\ No newline at end of file
diff --git a/outdated/Summary.tex b/outdated/Summary.tex
new file mode 100644
index 0000000000000000000000000000000000000000..901666e3c7905b89452c4174793e8c9398218286
--- /dev/null
+++ b/outdated/Summary.tex
@@ -0,0 +1,9 @@
+\section{Summary}
+
+This chapter has presented a detailed methodology for the comparative evaluation of image processing libraries. We focused on two fundamental performance metrics—image loading time and pixel iteration time—selected for their universality, reproducibility, and direct relevance to low-level image processing tasks. The measurement procedures involve controlled experiments using standardized image datasets, with repeated trials to ensure statistical reliability and comprehensive memory profiling to capture resource efficiency.
+
+Furthermore, we outlined the selection criteria used to choose the libraries for evaluation, which include functional coverage, performance, ease of integration, licensing, and industrial applicability. By applying these criteria, we ensure that our comparative analysis is grounded in practical relevance and technical rigor.
+
+The methodology described in this chapter forms the backbone of our experimental evaluation. It provides a clear, structured framework for measuring and analyzing the performance of different image processing libraries. This framework not only facilitates direct comparisons but also helps identify trade-offs between speed, memory efficiency, and ease of integration—factors that are critical for the deployment of image processing solutions in industrial applications.
+
+With the methodology established, the following chapters will present the experimental results and discuss their implications for selecting the most suitable image processing library for industrial applications.
\ No newline at end of file
diff --git a/sections/Chapter-3-sections/Tasks.tex b/outdated/Tasks.tex
similarity index 100%
rename from sections/Chapter-3-sections/Tasks.tex
rename to outdated/Tasks.tex
diff --git a/sections/Chapter-5-sections/development-effort-estimation.tex b/outdated/development-effort-estimation.tex
similarity index 100%
rename from sections/Chapter-5-sections/development-effort-estimation.tex
rename to outdated/development-effort-estimation.tex
diff --git a/sections/Chapter-4-sections/evaluation-of-alternatives-in-go.tex b/outdated/evaluation-of-alternatives-in-go.tex
similarity index 100%
rename from sections/Chapter-4-sections/evaluation-of-alternatives-in-go.tex
rename to outdated/evaluation-of-alternatives-in-go.tex
diff --git a/sections/Chapter-4-sections/evaluation-of-alternatives.tex b/outdated/evaluation-of-alternatives.tex
similarity index 100%
rename from sections/Chapter-4-sections/evaluation-of-alternatives.tex
rename to outdated/evaluation-of-alternatives.tex
diff --git a/sections/Chapter-6-sections/final-decision-on-image-processing-library.tex b/outdated/final-decision-on-image-processing-library.tex
similarity index 100%
rename from sections/Chapter-6-sections/final-decision-on-image-processing-library.tex
rename to outdated/final-decision-on-image-processing-library.tex
diff --git a/sections/Chapter-5-sections/image-processing-benchmark.tex b/outdated/image-processing-benchmark.tex
similarity index 100%
rename from sections/Chapter-5-sections/image-processing-benchmark.tex
rename to outdated/image-processing-benchmark.tex
diff --git a/sections/Chapter-5-sections/memory-benchmarking.tex b/outdated/memory-benchmarking.tex
similarity index 100%
rename from sections/Chapter-5-sections/memory-benchmarking.tex
rename to outdated/memory-benchmarking.tex
diff --git a/sections/Chapter-3-sections/performance-metrics.tex b/outdated/performance-metrics.tex
similarity index 100%
rename from sections/Chapter-3-sections/performance-metrics.tex
rename to outdated/performance-metrics.tex
diff --git a/sections/Chapter-4-sections/suggestions.tex b/outdated/suggestions.tex
similarity index 100%
rename from sections/Chapter-4-sections/suggestions.tex
rename to outdated/suggestions.tex
diff --git a/sections/Chapter-1-sections/Aim-and-Objectives.tex b/sections/Chapter-1-sections/Aim-and-Objectives.tex
new file mode 100644
index 0000000000000000000000000000000000000000..471684514a87f9ec44c38f8e52134d15b836f457
--- /dev/null
+++ b/sections/Chapter-1-sections/Aim-and-Objectives.tex
@@ -0,0 +1,67 @@
+\section{ Aim of the Study and Its Implications for Selecting an Image Processing Tool}
+
+This study was initiated to compare a broad range of image processing libraries based on performance, functionality, and ease of integration. Although the investigation was partly motivated by considerations around the ImageSharp license, the primary goal is to establish a general framework for evaluating different tools in the field. By assessing key metrics such as image conversion speed, pixel iteration efficiency, memory consumption, and development effort, the research aims to provide a balanced perspective that assists developers, engineers, and decision-makers in selecting the most appropriate image processing tool for their projects.
+
+\subsection{ Research Goals and Objectives}
+
+At its core, the study sought to answer the question: “Which image processing library best meets the diverse needs of modern applications?” To do so, several key objectives were identified:
+
+\begin{enumerate}
+    \item \textbf{Provide a Framework for Educated Choices:} 
+        The research aimed to create a framework that helps users evaluate image processing tools based on defined metrics. By comparing factors such as processing speed, memory consumption, development effort, and integration ease, the study aimed to demystify the trade-offs that come with adopting any given tool. This approach allows users to align their choices with their performance needs and project constraints, rather than making decisions solely based on cost considerations. As highlighted in the investigation, while saving on licensing fees is beneficial, the broader picture includes aspects like processing efficiency and long-term maintainability.
+    \item \textbf{Compare a Wide Range of Alternatives:} 
+        ImageSharp is one of many tools available for image processing. The study examined alternatives including OpenImageIO, SkiaSharp, Magick.NET, Emgu CV, MagicScaler, and several others. Each library was assessed against a set of criteria, such as its ability to handle tasks like image loading, pixel manipulation, resizing, and image composition. By comparing these libraries side-by-side, the study provides a nuanced view that helps practitioners understand not only what each tool can do but also the potential gaps that might exist depending on the application’s requirements.
+    \item \textbf{Define Clear Performance and Functional Metrics:}
+        A significant goal of the study was to establish quantifiable metrics that could be used to assess the performance of each image processing library. Metrics such as image conversion time, pixel iteration efficiency, and memory usage were used as benchmarks. For instance, the study measured how long it takes for a tool to load an image, perform a conversion (e.g., from JPEG to PNG), and iterate through pixels for operations like converting to grayscale. Such detailed benchmarking is instrumental in understanding the real-world performance of each library and is critical for users who need to balance speed with resource consumption.
+    \item \textbf{Assist in Tool Selection for Varied Requirements:}
+        Beyond performance metrics, the study was designed to consider the broader context of software integration. Factors such as ease of implementation, the learning curve for developers, compatibility with existing systems, and community support were all taken into account. This holistic view means that the research is not just about raw performance numbers but also about the practicalities of deploying and maintaining these tools in production environments.
+\end{enumerate}
+
+\subsection{ Methodology and Benchmarking}
+
+To achieve these objectives, the study adopted a multi-faceted methodological approach that combined qualitative assessments with quantitative benchmarks. The research was structured into several key phases:
+
+\subsubsection{ Establishing Functional Criteria}
+
+The first step was to outline the core functionalities required from an image processing library. These functionalities included image loading and creation, pixel-level manipulation, image transformation (such as cropping, resizing, and color conversion), and the encoding\\decoding of various image formats. Each library was then evaluated on how well it supports these functions. For example, while ImageSharp provides an elegant and fluent API for chaining operations like cloning, mutating, and resizing images, other tools like Emgu CV or SkiaSharp may offer advantages in raw performance or specific tasks such as advanced 2D rendering.
+
+\subsubsection{ Performance and Memory Benchmarking}
+
+Quantitative performance metrics were a central component of the study. Two key tests were developed:
+
+\begin{itemize}
+    \item \textbf{Image Conversion Test:} This test measured the time taken to load an image, convert it to a different format, and save the result. It simulates a typical workflow in many image processing applications and serves as a proxy for real-world performance. The results indicated significant differences between libraries. For instance, SkiaSharp showed excellent performance in image conversion tasks with both the fastest conversion times and minimal memory allocation, making it an attractive option for performance-critical applications.
+    \item \textbf{Pixel Iteration Test:} Many image processing tasks require iterating over each pixel—for example, when applying filters or performing color adjustments. The study evaluated how long each library took to perform such operations and the associated memory footprint. Although some tools demonstrated faster pixel iteration times, the overall memory consumption varied widely, highlighting the trade-off between speed and resource usage.
+\end{itemize}
+
+\subsubsection{ Estimation of Development Effort}
+
+Recognizing that performance is not the sole criterion for tool selection, the study also estimated the development effort required to integrate each library into an existing application. This included considerations such as the ease of understanding the API, the availability of documentation and community support, and the potential need for custom code to bridge functionality gaps. For example, while some libraries offered powerful processing capabilities, they might require significant custom development to integrate seamlessly into a .NET environment or to support specific image formats.
+
+\subsection{ Practical Implications for Tool Selection}
+
+The comprehensive evaluation detailed in this study has several practical implications for anyone looking to select an image processing tool:
+
+\subsubsection{ Balancing Performance with Practicality}
+
+The metrics established in the study—ranging from processing times to memory usage—provide a clear picture of the strengths and weaknesses of each library. This information is invaluable when balancing the need for high-performance image processing against practical considerations such as ease of integration and long-term maintenance. For instance, a company that prioritizes rapid image conversion and low memory consumption might lean towards SkiaSharp, while an organization needing advanced image manipulation capabilities and robust community support might find Emgu CV more appealing.
+
+\subsubsection{ Making Informed Trade-Offs}
+
+One of the standout contributions of the study is its ability to help users make informed trade-offs. Rather than making decisions based on a single metric, the evaluation presents a multi-dimensional view that incorporates performance, development effort, and functional capabilities. This approach ensures that users can select a tool that best fits their unique requirements, whether that means prioritizing speed, minimizing development overhead, or ensuring compatibility with existing workflows.
+
+\subsubsection{ Extending Beyond Cost Savings}
+
+While cost savings—such as the €5000 per year saving associated with avoiding ImageSharp’s licensing fees—are certainly a factor, the study underscores that financial considerations should not be the sole driver of decision-making. The true value of an image processing tool lies in its ability to meet specific technical and operational requirements. By providing a detailed comparison of several alternatives, the research emphasizes that factors like ease of integration, scalability, and overall performance are equally, if not more, important. This holistic approach helps organizations avoid the pitfall of selecting a tool based solely on its cost.
+
+\subsubsection{ Guiding Future Developments and Integrations}
+
+The insights gained from the study are not only applicable to current technology choices but also serve as a guide for future developments in image processing. The detailed benchmarks and performance analyses can inform future projects, helping developers understand where improvements can be made or which features are most critical. Additionally, the study’s approach to evaluating development effort and integration challenges provides a roadmap for how future research can build on these findings to further refine the selection process.
+
+\subsection{ Conclusion}
+
+In conclusion, this research offers a detailed and methodical framework for comparing a diverse range of image processing libraries. By focusing on critical performance indicators—such as image conversion efficiency, pixel manipulation speed, and memory usage—alongside practical considerations for integration, the study provides actionable insights that transcend mere numerical comparisons. This comprehensive evaluation enables practitioners to appreciate the subtle differences and inherent trade-offs between various tools, ensuring that the selected library meets specific operational requirements and supports long-term scalability.
+
+The findings underscore the importance of adopting a multi-dimensional evaluation approach. Rather than basing decisions solely on isolated performance metrics, the research illustrates how a balanced view—integrating both technical capabilities and practical implementation challenges—can lead to more robust and adaptable solutions. This perspective is essential in a field where evolving technologies and shifting project demands necessitate both flexibility and precision in tool selection.
+
+Ultimately, the insights derived from this investigation empower developers, engineers, and decision-makers to navigate the complex landscape of image processing technologies with confidence. By providing a thorough, balanced comparison of various libraries, the study serves as a valuable resource for making informed decisions that address current needs while also laying a strong foundation for future innovation and development in image processing.
\ No newline at end of file
diff --git a/sections/Chapter-1-sections/General-Introduction.tex b/sections/Chapter-1-sections/General-Introduction.tex
new file mode 100644
index 0000000000000000000000000000000000000000..c7216f6d3809468f39e7732f9ef8324c6319a131
--- /dev/null
+++ b/sections/Chapter-1-sections/General-Introduction.tex
@@ -0,0 +1,105 @@
+\section{ The Significance of Image Processing in Modern Industry}
+
+Digital image processing has emerged as a cornerstone of modern industrial applications, revolutionizing the way industries operate and innovate. From quality control in manufacturing to advanced simulations in aerospace, the ability to process and analyze images digitally has unlocked unprecedented efficiencies and capabilities. This field, which involves the manipulation and analysis of images using algorithms, has evolved significantly over the past few decades, driven by advancements in computing power, algorithm development, and the proliferation of digital imaging devices.
+
+The significance of digital image processing in industrial applications cannot be overstated. In manufacturing, for instance, image processing is integral to quality assurance processes, where it is used to detect defects, measure product dimensions, and ensure compliance with stringent standards. This capability not only enhances product quality but also reduces waste and operational costs. In the automotive industry, image processing is pivotal in the development of autonomous vehicles, where it aids in object detection, lane departure warnings, and pedestrian recognition. Similarly, in the healthcare sector, digital image processing is used in medical imaging technologies such as MRI and CT scans, enabling more accurate diagnoses and treatment planning.
+
+The evolution of digital image processing has been marked by several key developments. Initially, the field was limited by the computational resources available, with early applications focusing on basic image enhancement and restoration. However, the advent of powerful processors and the development of sophisticated algorithms have expanded the scope of image processing to include complex tasks such as pattern recognition, 3D reconstruction, and real-time image analysis. The integration of artificial intelligence and machine learning has further propelled the field, allowing for the development of intelligent systems capable of learning from data and improving over time.
+
+For industries like Dassault Systems, which operates at the forefront of aerospace, defense, and industrial engineering, a comparative study of image processing libraries is crucial. These libraries, which provide pre-built functions and tools for image analysis, vary significantly in terms of performance, ease of use, and functionality. Selecting the right library can have a profound impact on the efficiency and effectiveness of image processing tasks. For instance, libraries such as OpenCV, TensorFlow, and MATLAB offer different strengths and weaknesses, and understanding these differences is essential for optimizing industrial applications.
+
+A comparative study of these libraries not only aids in selecting the most suitable tools for specific tasks but also highlights areas for potential improvement and innovation. By analyzing the performance of different libraries in various scenarios, industries can identify gaps in current technologies and drive the development of new solutions that better meet their needs. Moreover, such studies contribute to the broader field of digital image processing by providing insights into best practices and emerging trends.
+
+% References
+
+% 1. Gonzalez, R. C., & Woods, R. E. (2008). Digital Image Processing. Pearson Prentice Hall.
+% 2. Jain, A. K. (1989). Fundamentals of Digital Image Processing. Prentice Hall.
+% 3. Bradski, G., & Kaehler, A. (2008). Learning OpenCV: Computer Vision with the OpenCV Library. O'Reilly Media.
+% 4. Russ, J. C. (2011). The Image Processing Handbook. CRC Press.
+% 5. Goodfellow, I., Bengio, Y., & Courville, A. (2016). Deep Learning. MIT Press.
+% 6. Szeliski, R. (2010). Computer Vision: Algorithms and Applications. Springer.
+
+\subsection{Evolution and Impact of Digital Image Processing}
+
+Digital image processing has evolved significantly since its inception, transforming from a niche scientific endeavor into a cornerstone of modern technology with applications spanning numerous industries. This essay outlines the historical development of digital image processing, highlighting key advancements and their impact on industrial innovation.
+
+\subsubsection{Early Beginnings}
+
+The origins of digital image processing can be traced back to the 1920s and 1930s with the development of television technology, which laid the groundwork for electronic image capture and transmission. However, it wasn't until the 1960s that digital image processing began to take shape as a distinct field. The launch of the first digital computers provided the necessary computational power to process images digitally. During this period, NASA played a pivotal role by using digital image processing to enhance images of the moon's surface captured by the Ranger 7 spacecraft in 1964. This marked one of the first significant applications of digital image processing, demonstrating its potential for scientific and exploratory purposes.
+
+\subsubsection{The 1970s and 1980s: Theoretical Foundations and Practical Applications}
+
+The 1970s saw the establishment of theoretical foundations for digital image processing. Researchers developed algorithms for image enhancement, restoration, and compression. The Fast Fourier Transform (FFT), introduced by Cooley and Tukey in 1965, became a fundamental tool for image processing, enabling efficient computation of image transformations. This period also witnessed the development of the first commercial applications, such as medical imaging systems. The introduction of Computed Tomography (CT) in 1972 revolutionized medical diagnostics by providing detailed cross-sectional images of the human body, showcasing the life-saving potential of digital image processing.
+
+\subsubsection{The 1990s: The Rise of Computer Vision}
+
+The 1990s marked a significant shift towards computer vision, a subfield of digital image processing focused on enabling machines to interpret visual data. This era saw the development of algorithms for object recognition, motion detection, and 3D reconstruction. The introduction of the JPEG standard in 1992 facilitated the widespread adoption of digital images by providing an efficient method for image compression, crucial for the burgeoning internet era. The decade also saw advancements in facial recognition technology, which laid the groundwork for future applications in security and personal identification.
+
+\subsubsection{The 2000s: Machine Learning and Image Processing}
+
+The 2000s witnessed the integration of machine learning techniques with digital image processing, leading to significant improvements in image analysis and interpretation. The development of Support Vector Machines (SVM) and neural networks enabled more accurate image classification and pattern recognition. This period also saw the emergence of digital cameras and smartphones, which democratized image capture and sharing, further driving the demand for advanced image processing techniques.
+
+\subsubsection{The 2010s to Present: Deep Learning and Industrial Innovation}
+
+The advent of deep learning in the 2010s revolutionized digital image processing. Convolutional Neural Networks (CNNs), popularized by the success of AlexNet in the ImageNet competition in 2012, dramatically improved the accuracy of image recognition tasks. This breakthrough spurred innovation across various industries. In healthcare, deep learning algorithms are now used for early detection of diseases through medical imaging, improving patient outcomes. In the automotive industry, image processing is a critical component of autonomous vehicle systems, enabling real-time object detection and navigation.
+
+In recent years, digital image processing has expanded into areas such as augmented reality (AR) and virtual reality (VR), enhancing user experiences in gaming, education, and training. The integration of image processing with artificial intelligence continues to drive innovation, with applications in fields such as agriculture, where drones equipped with image processing capabilities monitor crop health and optimize yields.
+
+% References
+
+% 1. Cooley, J. W., & Tukey, J. W. (1965). An algorithm for the machine calculation of complex Fourier series. Mathematics of Computation, 19(90), 297-301.
+% 2. Hounsfield, G. N. (1973). Computerized transverse axial scanning (tomography): Part 1. Description of system. British Journal of Radiology, 46(552), 1016-1022.
+% 3. LeCun, Y., Bengio, Y., & Hinton, G. (2015). Deep learning. Nature, 521(7553), 436-444.
+% 4. Krizhevsky, A., Sutskever, I., & Hinton, G. E. (2012). ImageNet classification with deep convolutional neural networks. Advances in Neural Information Processing Systems, 25, 1097-1105.
+
+\subsection{Current Applications of Image Processing in Industry}
+
+Image processing, a critical component of computer vision, has become an indispensable tool across various industries, driving advancements in productivity, quality control, and automation. This essay explores the utilization of image processing in several key sectors, emphasizing applications that demand high precision and efficiency.
+
+\subsubsection{Manufacturing and Quality Control} 
+
+In the manufacturing industry, image processing is pivotal for quality control and defect detection. Automated visual inspection systems utilize high-resolution cameras and sophisticated algorithms to detect defects in products at a speed and accuracy unattainable by human inspectors. For instance, in semiconductor manufacturing, image processing is used to inspect wafers for defects, ensuring that only flawless products proceed to the next production stage. This not only enhances product quality but also reduces waste and operational costs. A study by Zhang et al. (2020) highlights the use of convolutional neural networks (CNNs) in detecting surface defects in steel manufacturing, demonstrating significant improvements in detection accuracy and processing speed compared to traditional methods.
+
+\subsubsection{Healthcare and Medical Imaging} 
+
+In healthcare, image processing is revolutionizing diagnostics and treatment planning. Techniques such as MRI, CT scans, and X-rays rely heavily on image processing to enhance image quality and extract meaningful information. For example, in radiology, image processing algorithms help in the early detection of diseases like cancer by improving the clarity and contrast of medical images, allowing for more accurate diagnoses. A research paper by Litjens et al. (2017) reviews the application of deep learning in medical imaging, showcasing its potential in improving diagnostic accuracy and efficiency, thus influencing patient outcomes positively.
+
+\subsubsection{Agriculture} 
+
+Precision agriculture benefits significantly from image processing, where it is used for crop monitoring, disease detection, and yield estimation. Drones equipped with multispectral cameras capture images of fields, which are then processed to assess plant health and detect stress factors such as pests or nutrient deficiencies. This enables farmers to make informed decisions, optimizing resource use and improving crop yields. A case study by Maimaitijiang et al. (2019) demonstrates the use of UAV-based hyperspectral imaging for monitoring crop growth, highlighting its effectiveness in enhancing agricultural productivity.
+
+\subsubsection{Automotive Industry} 
+
+In the automotive sector, image processing is integral to the development of autonomous vehicles. Advanced driver-assistance systems (ADAS) rely on image processing to interpret data from cameras and sensors, enabling features such as lane departure warnings, adaptive cruise control, and automatic parking. These systems enhance vehicle safety and pave the way for fully autonomous driving. A study by Janai et al. (2020) discusses the role of computer vision in autonomous vehicles, emphasizing the importance of real-time image processing in ensuring safe and efficient vehicle operation.
+
+\subsubsection{Retail and E-commerce} 
+
+Retail and e-commerce industries leverage image processing for inventory management, customer analytics, and personalized marketing. In inventory management, image processing systems track stock levels and identify misplaced items, streamlining operations and reducing labor costs. In customer analytics, facial recognition and sentiment analysis provide insights into customer behavior and preferences, enabling personalized marketing strategies. A paper by Ren et al. (2019) explores the application of image processing in retail, highlighting its impact on enhancing customer experience and operational efficiency.
+
+% References
+
+% - Zhang, Y., Wang, S., & Liu, Y. (2020). Surface defect detection using convolutional neural networks. *Journal of Manufacturing Processes*, 49, 1-9.
+% - Litjens, G., Kooi, T., Bejnordi, B. E., Setio, A. A. A., Ciompi, F., Ghafoorian, M., ... & van Ginneken, B. (2017). A survey on deep learning in medical image analysis. *Medical Image Analysis*, 42, 60-88.
+% - Maimaitijiang, M., Sagan, V., Sidike, P., Hartling, S., Esposito, F., Fritschi, F. B., & Prasad, S. (2019). Soybean yield prediction from UAV using multimodal data fusion and deep learning. *Remote Sensing of Environment*, 233, 111-117.
+% - Janai, J., Güney, F., Behl, A., & Geiger, A. (2020). Computer vision for autonomous vehicles: Problems, datasets and state of the art. *Foundations and Trends® in Computer Graphics and Vision*, 12(1-3), 1-308.
+% - Ren, S., He, K., Girshick, R., & Sun, J. (2019). Faster R-CNN: Towards real-time object detection with region proposal networks. *IEEE Transactions on Pattern Analysis and Machine Intelligence*, 39(6), 1137-1149.
+
+\subsection{The Strategic Importance of Image Processing Libraries}
+
+In the rapidly evolving landscape of industrial applications, the demand for efficient, adaptable, and scalable image processing libraries has become increasingly critical. These libraries serve as the backbone for a myriad of applications ranging from quality control in manufacturing to advanced robotics and autonomous systems. The benefits of employing such libraries are manifold, including reduced time-to-market, enhanced product quality, and cost efficiency, all of which are pivotal for maintaining competitive advantage in the industrial sector.
+
+Firstly, efficient image processing libraries significantly reduce the time-to-market for new products and technologies. In industries where innovation cycles are short and competition is fierce, the ability to quickly develop and deploy new solutions is crucial. Efficient libraries streamline the development process by providing pre-built, optimized functions that developers can readily integrate into their systems. This reduces the need for writing complex algorithms from scratch, thereby accelerating the development timeline. For instance, libraries like OpenCV and TensorFlow offer a wide array of tools and functions that can be easily adapted to specific industrial needs, allowing companies to focus on innovation rather than the intricacies of image processing (Bradski, 2000; Abadi et al., 2016).
+
+Adaptability is another critical factor that underscores the importance of these libraries. Industrial environments are often dynamic, with varying requirements and conditions that necessitate flexible solutions. Scalable image processing libraries can be tailored to meet specific needs, whether it involves adjusting to different hardware configurations or integrating with other software systems. This adaptability ensures that companies can respond swiftly to changes in market demands or technological advancements without overhauling their entire system architecture. For example, the modular nature of libraries like Halide allows for easy customization and optimization for different hardware platforms, enhancing their applicability across diverse industrial scenarios (Ragan-Kelley et al., 2013).
+
+Moreover, the use of scalable image processing libraries contributes to enhanced product quality. In industries such as automotive manufacturing or pharmaceuticals, precision and accuracy are paramount. Advanced image processing capabilities enable more rigorous quality control processes, ensuring that defects are detected and rectified early in the production cycle. This not only improves the quality of the final product but also minimizes waste and reduces the likelihood of costly recalls. Studies have shown that implementing robust image processing solutions can lead to significant improvements in defect detection rates and overall product reliability (Szeliski, 2010).
+
+Cost efficiency is another significant advantage offered by these libraries. By leveraging open-source or commercially available image processing tools, companies can reduce the costs associated with software development and maintenance. These libraries often come with extensive documentation and community support, which can further reduce the need for specialized training and technical support. Additionally, the ability to scale solutions according to demand means that companies can optimize their resource allocation, investing only in the capabilities they need at any given time. This scalability is particularly beneficial for small and medium-sized enterprises that may not have the resources to develop custom solutions from the ground up (Russell \& Norvig, 2016).
+
+% References
+
+% - Bradski, G. (2000). The OpenCV Library. *Dr. Dobb's Journal of Software Tools*.
+% - Abadi, M., Barham, P., Chen, J., Chen, Z., Davis, A., Dean, J., ... & Zheng, X. (2016). TensorFlow: A System for Large-Scale Machine Learning. In *12th USENIX Symposium on Operating Systems Design and Implementation (OSDI 16)* (pp. 265-283).
+% - Ragan-Kelley, J., Barnes, C., Adams, A., Paris, S., Durand, F., & Amarasinghe, S. (2013). Halide: A Language and Compiler for Optimizing Parallelism, Locality, and Recomputation in Image Processing Pipelines. *ACM SIGPLAN Notices*, 48(6), 519-530.
+% - Szeliski, R. (2010). *Computer Vision: Algorithms and Applications*. Springer Science & Business Media.
+% - Russell, S., & Norvig, P. (2016). *Artificial Intelligence: A Modern Approach*. Pearson.
\ No newline at end of file
diff --git a/sections/Chapter-1-sections/Related-Work.tex b/sections/Chapter-1-sections/Related-Work.tex
new file mode 100644
index 0000000000000000000000000000000000000000..7d2393700efa87c91fefc9a1b0e8014c7d1d2f9e
--- /dev/null
+++ b/sections/Chapter-1-sections/Related-Work.tex
@@ -0,0 +1,223 @@
+\section{Related Work}
+
+In this chapter, we review and synthesize research studies that relate to the evaluation of image processing libraries and their applications in industrial and specialized contexts. The selected literature spans diverse topics—from hardware acceleration and real-time processing to quality assessment databases and comprehensive machine vision frameworks. Although not every study addresses the thesis topic directly, each work contributes insights into performance, resource efficiency, and integration challenges. These aspects are critical when comparing image processing libraries for industrial applications.
+
+%%%
+
+\subsection{Distributed Large-Scale Graph Processing on FPGAs (Sahebi et al., 2023)}
+
+Sahebi et al. (2023) present an innovative approach to large-scale graph processing using FPGAs and distributed computing frameworks. Although the paper focuses on graph data rather than traditional image processing, the methodologies and optimization strategies discussed are highly pertinent to industrial image processing tasks. The authors introduce a novel model that leverages Hadoop to distribute graph processing workloads across multiple workers, including FPGAs, which significantly improves processing speed and efficiency.
+
+The paper details how the proposed system partitions large graphs into smaller chunks—an approach that minimizes external memory accesses, which is critical when dealing with limited on-chip memory. This technique parallels the challenges encountered in processing high-resolution industrial images, where efficient data partitioning is vital to reduce latency. The study demonstrates speedups of up to 2x, 4.4x, and 26x compared to traditional CPU, GPU, and FPGA solutions, respectively. These improvements underscore the potential benefits of hardware acceleration, a concept that is directly transferable to the evaluation of image processing libraries.
+
+Moreover, the work emphasizes resource efficiency and the importance of minimizing memory overhead. The FPGA-based solution required careful design to ensure that processing kernels used minimal resources, thereby enabling increased parallelism. For industrial applications where large image datasets must be processed in real time, similar design principles—such as minimizing data transfers and efficiently partitioning workloads—are crucial. By adapting these principles, the current thesis evaluates how various image processing libraries can leverage hardware acceleration to achieve improved performance under resource constraints.
+
+In summary, Sahebi et al. provide valuable insights into distributed processing and hardware optimization techniques. Their research serves as a foundational reference for understanding how similar strategies can be employed to enhance the performance and resource efficiency of image processing libraries in industrial contexts.
+
+%%%
+
+\subsection{A New Image Quality Database for Multiple Industrial Processes (Ma et al., 2024)}
+
+Ma et al. (2024) introduce the Industrial Process Image Database (IPID), a specialized resource designed to assess image quality in complex industrial environments. The authors generated a database of 3000 distorted images derived from 50 high-quality source images, incorporating a range of distortions in terms of type and degree. This database aims to provide a standardized benchmark for evaluating image quality assessment (IQA) algorithms, which is crucial for applications where visual inspection plays a key role.
+
+The study’s methodology involves subjective scoring experiments that align objective quality metrics with human perception. Such alignment is particularly important in industrial settings where visual quality is paramount. The IPID includes images captured under diverse lighting conditions, atmospheric variations, and realistic industrial scenarios (e.g., production lines and warehouses). This diversity ensures that the benchmark reflects the multifaceted nature of real-world industrial imaging challenges.
+
+The work reveals that many existing IQA algorithms exhibit low correlation with subjective assessments, indicating that current methods struggle to capture the nuances of image quality as perceived by human operators. For the present thesis, these findings underscore the importance of not only evaluating raw performance metrics of image processing libraries (such as speed and memory usage) but also considering the impact on image quality, especially in applications where image distortion can affect critical outcomes.
+
+Ma et al.’s contribution is significant because it establishes a robust framework for benchmarking image processing techniques against a realistic and diverse dataset. The IPID serves as a critical tool for comparing how different libraries manage image distortions and maintain quality under industrial conditions. Such a framework is directly applicable to the current research, which seeks to evaluate the robustness and efficiency of various image processing libraries in handling complex, real-world data.
+
+%%%
+
+\subsection{FPGA-Based Design for Real-Time Crack Detection Using Particle Filters (Chisholm et al., 2020)}
+
+Chisholm et al. (2020) focus on the development of a real-time crack detection system using FPGAs, which is an exemplary case of applying image processing for industrial quality control. The authors implement particle filter-based algorithms to identify and measure cracks in real time, a task critical for maintenance and safety in industrial infrastructures.
+
+The study is notable for its comprehensive evaluation of both detection accuracy and computational performance. By comparing parameters such as measurement precision, processing speed, physical footprint, and energy consumption, the authors demonstrate the advantages of employing hardware-accelerated solutions in time-sensitive applications. Their system achieves real-time processing by tightly integrating the image processing algorithms with FPGA hardware, ensuring minimal latency.
+
+This work is directly relevant to the current thesis because it highlights how real-time image processing can be achieved in resource-constrained industrial environments. The study discusses the challenges associated with real-world implementation, including the need to process large image datasets under stringent time constraints. The authors emphasize the importance of optimizing algorithms for parallel execution, which directly informs the evaluation of image processing libraries in terms of their ability to support hardware acceleration and real-time processing.
+
+Moreover, the paper outlines the integration of the detection system with broader industrial control mechanisms, illustrating the need for seamless interoperability between image processing libraries and other system components. Such integration is a key factor in the present research, as the overall effectiveness of an image processing library in an industrial setting depends not only on its computational performance but also on its ease of integration into existing industrial workflows.
+
+In conclusion, Chisholm et al. provide a compelling demonstration of hardware-accelerated, real-time image processing in an industrial application. Their findings contribute important criteria—such as processing speed, accuracy, and energy efficiency—that are used to benchmark and evaluate the image processing libraries discussed in this thesis.
+
+%%%
+
+\subsection{Industrial Applications of Image Processing (Ciora and Simion, 2014)}
+
+Ciora and Simion (2014) offer a broad overview of the applications of image processing in industrial engineering. Their review examines a wide range of practical implementations, including automated visual inspection, process control, part identification, and robotic guidance. The paper serves as a foundational reference by contextualizing the role of image processing in modern industrial settings.
+
+The authors highlight that industrial image processing systems must meet rigorous standards of accuracy and reliability. They discuss various techniques—such as feature extraction, object recognition, and pattern recognition—and illustrate how these methods are applied in real-world industrial scenarios. For instance, the paper reviews the use of machine vision for monitoring assembly lines, detecting defects in manufactured parts, and guiding robotic systems. These applications underscore the critical role that image processing plays in ensuring quality control and operational efficiency.
+
+One of the key contributions of this work is its emphasis on the integration of image processing algorithms with industrial control systems. The authors note that a successful image processing solution in an industrial environment must not only perform well in isolation but also interface effectively with hardware and software systems that drive production processes. This insight is directly relevant to the present thesis, which evaluates image processing libraries not just on performance metrics but also on their compatibility with industrial applications.
+
+Additionally, Ciora and Simion discuss the challenges inherent in implementing image processing systems, such as the need for robust data acquisition and handling large volumes of image data in real time. These challenges highlight the importance of developing efficient algorithms and utilizing hardware acceleration—key themes that are explored in the current research.
+
+Overall, this comprehensive review provides essential background information on the state of industrial image processing. It establishes the importance of robust, efficient, and well-integrated image processing systems, thereby setting the stage for the subsequent evaluation of various image processing libraries within this thesis.
+
+%%%
+
+\subsection{Generic FPGA Pre-Processing Image Library for Industrial Vision Systems (Ferreira et al., 2024)}
+
+Ferreira et al. (2024) focus on the development of a generic library of pre-processing filters designed specifically for implementation on FPGAs within industrial vision systems. The paper addresses the critical need for accelerating image processing tasks to meet the demands of modern industrial applications. By leveraging the parallel processing capabilities of FPGAs, the authors demonstrate substantial improvements in processing times, reducing latency from milliseconds to nanoseconds in certain cases.
+
+A key aspect of the study is its emphasis on resource efficiency. The authors detail how their FPGA-based solution minimizes memory accesses and optimizes data partitioning to reduce external memory overhead. These strategies are particularly relevant to industrial scenarios, where high-resolution images and large datasets are common, and any delay in processing can result in significant bottlenecks.
+
+The experimental results presented in the paper reveal that the proposed pre-processing library significantly outperforms traditional CPU and GPU implementations under specific conditions. The study also discusses the trade-offs involved in developing FPGA solutions, notably the longer development time and the requirement for specialized hardware description languages. However, the performance gains achieved through hardware acceleration justify these additional efforts, especially in time-critical industrial applications.
+
+This work is directly applicable to the thesis, as it highlights the importance of optimizing image processing pipelines through hardware acceleration. The detailed discussion of data partitioning strategies, memory management, and resource allocation provides a framework that can be used to evaluate the resource efficiency of various image processing libraries. Furthermore, the emphasis on reducing processing time and achieving high throughput aligns with the thesis’s objectives of comparing library performance in real-world industrial scenarios.
+
+In summary, Ferreira et al. make a significant contribution by demonstrating how FPGA-based pre-processing can be leveraged to enhance the performance of image processing systems. Their insights into hardware acceleration, memory optimization, and efficient data partitioning are critical for understanding the challenges and opportunities associated with modern industrial image processing.
+
+%%%
+
+\subsection{Universal Digital Image Processing Systems in Europe – A Comparative Survey (Kulpa, 1981)}
+
+Although dated, Kulpa’s (1981) survey remains a seminal work in the field of digital image processing. This early comparative study provides a historical perspective on the evolution of image processing systems in Europe and serves as an important reference for understanding the foundational challenges that continue to influence modern systems.
+
+Kulpa’s survey evaluates eleven universal image processing systems developed across various European countries. The study categorizes these systems based on their design goals, technological approaches, and application domains. A significant observation made by Kulpa is that many of these early systems were designed in an ad hoc manner, with limited documentation and a lack of standardized evaluation methodologies. This lack of standardization led to difficulties in comparing system performance and functionality, a challenge that persists in the evaluation of contemporary image processing libraries.
+
+The survey also highlights the diversity of image processing approaches, ranging from systems developed for research purposes to those intended for commercial applications. Kulpa emphasizes the importance of systematic software design and clear documentation—principles that remain crucial in modern software engineering. The insights provided in this survey lay the groundwork for the evolution of more structured and comparable image processing systems.
+
+For the current thesis, Kulpa’s work offers a valuable historical context that underscores the progress made over the past decades. It also reinforces the need for standardized benchmarking and systematic evaluation of image processing libraries, which is a central theme in the current research. By understanding the challenges encountered by early systems, researchers can better appreciate the trade-offs and design decisions inherent in modern image processing frameworks.
+
+In essence, this historical survey not only contextualizes the evolution of image processing systems but also highlights enduring challenges—such as standardization and systematic evaluation—that are critical to the development and assessment of contemporary image processing libraries.
+
+%%%
+
+\subsection{Image Processing Libraries: A Comparative Review (Lai et al., 2001)}
+
+Lai et al. (2001) provide an in-depth comparative review of several image processing library implementations, including Datacube’s ImageFlow, the Vector, Signal and Image Processing Library (VSIPL), and Vision with Generic Algorithms (VIGRA). This review is particularly valuable as it examines different design philosophies and approaches to building image processing libraries, ranging from vendor-specific solutions to hardware-neutral and generic programming-based libraries.
+
+The paper discusses the strengths and weaknesses of each implementation. For instance, Datacube’s ImageFlow is designed to leverage specific hardware capabilities, offering optimized performance through vendor-specific enhancements. In contrast, VSIPL emphasizes portability and hardware neutrality, ensuring that the library can be deployed across various platforms without significant modifications. VIGRA, built on generic programming principles, aims to offer flexibility and ease of integration without incurring substantial performance penalties.
+
+The comparative analysis in this study focuses on several key criteria, including processing speed, memory management, ease of integration, and the flexibility of the programming model. Lai et al. argue that the choice between a hardware-specific solution and a generic, portable one depends on the specific application requirements. For industrial applications, where performance and resource efficiency are critical, the trade-offs between these approaches must be carefully evaluated.
+
+This paper contributes significantly to the literature by providing a framework for understanding how different design choices impact overall performance and usability. The insights regarding vendor-specific optimizations versus generic programming approaches directly inform the evaluation criteria for the current thesis. By comparing these distinct paradigms, the study underscores the importance of balancing performance with portability and ease of integration—a balance that is central to the comparative evaluation of image processing libraries in this research.
+
+Overall, Lai et al. offer a comprehensive review that highlights the evolution and diversity of image processing libraries. Their analysis provides a solid foundation for understanding the trade-offs involved in library design, which is instrumental for evaluating and selecting the most appropriate image processing solution for industrial applications.
+
+%%%
+
+\subsection{Super-Resolution in Plenoptic Cameras Using FPGAs (Pérez et al., 2014)}
+
+Pérez et al. (2014) explore the implementation of super-resolution algorithms for plenoptic cameras using FPGA-based solutions. Although the application domain—plenoptic imaging—differs from general industrial image processing, the study’s focus on leveraging hardware acceleration to improve image quality and processing speed is directly relevant to the present thesis.
+
+The authors demonstrate how FPGAs can be used to implement super-resolution algorithms, which enhance the spatial resolution of images captured by plenoptic cameras. Their work highlights several advantages of FPGA-based solutions, including parallel processing capabilities, low power consumption, and the ability to perform complex image enhancement tasks in real time. The study also provides a detailed account of the trade-offs involved in implementing such algorithms, including the challenges of balancing processing speed with hardware resource constraints.
+
+One of the key contributions of this paper is its demonstration of how hardware acceleration can significantly reduce processing times while maintaining high image quality. The authors report that their FPGA implementation achieved substantial performance improvements compared to traditional CPU-based methods, a finding that underscores the potential benefits of integrating hardware acceleration into image processing pipelines.
+
+For the current thesis, Pérez et al.’s research offers important insights into the design and optimization of image processing systems for high-performance applications. Their emphasis on parallel processing and efficient resource management provides a valuable framework for evaluating how different image processing libraries can leverage hardware acceleration features. Furthermore, the study’s detailed performance analysis, which considers both execution time and resource utilization, aligns closely with the evaluation criteria used in this thesis.
+
+In conclusion, the work by Pérez et al. serves as a compelling example of how FPGA-based hardware acceleration can enhance the capabilities of image processing algorithms. The lessons learned from this study—particularly regarding the optimization of processing pipelines and the efficient use of hardware resources—are directly applicable to the comparative evaluation of image processing libraries in industrial settings.
+
+Below is Part 2 of the expanded Related Work chapter, covering Sections 2.9 through 2.16 and concluding with an overall synthesis.
+
+%%%
+
+\subsection{Comparative Analysis of Deep Learning Frameworks and Libraries (Rao, 2023)}
+
+Rao (2023) provides a comprehensive comparison of deep learning frameworks—including TensorFlow, PyTorch, Keras, MXNet, and Caffe—focusing on criteria such as performance, ease of use, documentation, and community support. Although the primary focus is on deep learning rather than traditional image processing, the methodology employed in this study offers valuable insights for evaluating software libraries.
+
+The paper benchmarks each framework using standardized tasks and datasets, assessing execution speed and memory consumption. Rao’s analysis reveals that TensorFlow and PyTorch excel in high-performance scenarios, while Keras is noted for its accessibility to beginners. The systematic approach taken by Rao—employing both quantitative and qualitative metrics—serves as a model for how image processing libraries can be evaluated on similar dimensions. In the context of this thesis, the criteria used by Rao inform the selection of performance and usability metrics, particularly in environments where both deep learning and traditional image processing techniques may be integrated.
+
+%%%
+
+\subsection{Developments of Computer Vision and Image Processing: Methodologies and Applications (Reis, 2023)}
+
+Reis (2023) offers an editorial overview of recent advances in computer vision and image processing, emphasizing the evolution of methodologies and their application across various domains. This piece underscores the increasing integration of artificial intelligence and deep learning with classical image processing, and it highlights emerging trends that have influenced modern system design.
+
+Reis discusses a range of methodologies—from conventional algorithms to more recent deep learning-based techniques—and illustrates how these approaches are applied in areas such as object detection, segmentation, and quality inspection. Although the article is broad in scope, it provides critical context for the present thesis by outlining both the challenges and opportunities that arise when integrating diverse image processing techniques. The insights provided in this overview underscore the importance of methodological rigor and the need for comprehensive evaluation frameworks that encompass both accuracy and efficiency.
+
+%%%
+
+\subsection{Comparative Literature Review of Machine Learning and Image Processing Techniques for Wood Log Scaling and Grading (Sandvik et al., 2024)}
+
+Sandvik et al. (2024) conduct a systematic literature review that compares various machine learning and image processing techniques applied to the scaling and grading of wood logs. This review categorizes studies based on input types, algorithm choices, performance outcomes, and the level of autonomy in industrial applications.
+
+The authors highlight a trend towards the increased use of camera-based imaging as opposed to laser scanning, and they emphasize the superior performance of deep learning models in tasks such as log segmentation and grading. While the application domain is specific to wood logs, the review’s methodology—particularly the rigorous categorization and performance comparison—offers a template for evaluating image processing libraries in broader industrial contexts. The challenges identified in comparing heterogeneous approaches, such as varying datasets and evaluation criteria, also reinforce the need for standardized benchmarking protocols, an area that this thesis seeks to address.
+
+%%%
+
+\subsection{The Role of Computer Systems in Comparative Analysis Using Image Processing to Promote Agriculture Business (Sardar, 2012)}
+
+Sardar (2012) explores the application of image processing techniques for quality analysis in the agricultural sector, focusing specifically on the assessment of fruit quality. Although the agricultural context differs from general industrial applications, the underlying principles of computer vision for automated quality control are directly relevant.
+
+Sardar’s work describes a system that uses RGB color analysis to grade fruits, highlighting both the strengths and limitations of digital image processing for quality assessment. The paper discusses challenges such as variability in lighting conditions and the need for precise color calibration, issues that are also pertinent in industrial image processing scenarios. By addressing these challenges, Sardar’s study provides valuable lessons on designing robust image processing systems that can maintain accuracy and consistency—an insight that is integrated into the evaluation criteria for image processing libraries in this thesis.
+
+%%%
+
+\subsection{Performance Evaluation of Computer Vision Algorithms on Programmable Logic Controllers (Vieira et al., 2024)}
+
+Vieira et al. (2024) examine the feasibility of deploying computer vision algorithms on Programmable Logic Controllers (PLCs), which are widely used in industrial control systems. This study is particularly significant because it evaluates the performance of standard image processing algorithms when executed on hardware platforms with constrained resources.
+
+The authors compare the performance of PLC-based image processing with that of traditional computer systems, considering factors such as execution time, implementation complexity, and system robustness. The research identifies trade-offs between simplicity, reliability, and processing power, emphasizing that while PLCs may not offer the same raw performance as high-end computers, they are often sufficient for industrial applications that require tight integration with control systems.
+
+This paper is directly relevant to the current thesis, as it informs the discussion on resource efficiency and the practical challenges of implementing image processing libraries in industrial environments. The evaluation criteria developed by Vieira et al.—particularly regarding the balance between processing performance and ease of integration—are mirrored in the present research.
+
+%%%
+
+\subsection{Precision Control of Polyurethane Filament Drafting and Winding Based on Machine Vision (Wu et al., 2022)}
+
+Wu et al. (2022) explore the application of machine vision for precision control in the drafting and winding of polyurethane filaments. The study demonstrates how real-time image processing can be integrated into industrial manufacturing processes to enhance control accuracy and product quality.
+
+The authors detail the development of a system that synchronizes machine vision with control mechanisms to monitor and adjust the drafting process in real time. Key performance indicators such as detection accuracy, processing latency, and control responsiveness are evaluated to determine the system’s effectiveness. Wu et al. emphasize the importance of achieving high precision in industrial applications, where even minor deviations can lead to significant defects.
+
+The relevance of this study to the current thesis lies in its demonstration of how image processing libraries can be leveraged to achieve real-time control in manufacturing. The performance metrics and integration challenges discussed in this work provide a benchmark for evaluating similar capabilities in image processing libraries, particularly in terms of their suitability for real-time industrial applications.
+
+%%%
+
+\subsection{A Machine Vision Development Framework for Product Appearance Quality Inspection (Zhu et al., 2022)}
+
+Zhu et al. (2022) propose a comprehensive machine vision framework designed for product appearance quality inspection. This study addresses both the algorithmic and system integration aspects of machine vision in industrial settings, emphasizing the need for modular, reusable components that can be easily adapted to various inspection tasks.
+
+The framework developed by Zhu et al. incorporates a range of image processing techniques—from basic feature extraction and segmentation to advanced anomaly detection using deep learning. The authors stress that the effectiveness of such systems depends not only on the performance of individual image processing algorithms but also on the overall software architecture, including user interfaces, database management, and input/output communication.
+
+The modular design advocated by Zhu et al. is particularly relevant to the thesis, as it underscores the importance of evaluating image processing libraries not only on their computational performance but also on their ability to integrate into comprehensive industrial systems. The insights from this study inform the criteria for assessing scalability, ease of integration, and overall system robustness in the comparative evaluation conducted in this research.
+
+%%%
+
+\subsection{Benchmarking Deep Learning for On-Board Space Applications (Ziaja et al., 2021)}
+
+Ziaja et al. (2021) focus on benchmarking deep learning algorithms for hardware-constrained environments, such as those used in on-board space applications. While the domain of space imaging differs from industrial applications, the methodological rigor and benchmarking framework presented in this study offer valuable lessons for evaluating image processing libraries.
+
+The paper describes a detailed experimental setup in which various deep learning models are benchmarked on standardized datasets, with a focus on metrics such as execution time, resource utilization, and model accuracy. Ziaja et al. emphasize the importance of tailoring performance evaluations to the specific constraints of the hardware, a concept that is directly applicable to industrial image processing where systems often operate under limited computational resources.
+
+The study’s approach to parameter tuning, model optimization, and the use of standardized benchmarks provides a robust framework for performance evaluation. These methodologies are particularly useful for the present thesis, which seeks to develop a comprehensive, multidimensional evaluation of image processing libraries based on both performance and resource efficiency. The insights from Ziaja et al. reinforce the necessity of developing configurable benchmarking tools that can accurately capture the trade-offs inherent in deploying image processing systems on various hardware platforms.
+
+%%%
+
+\subsection{Synthesis and Future Directions}
+
+These studies illustrate that the optimal selection of an image processing library is highly context-dependent. For real-time industrial applications, factors such as processing speed, resource efficiency, and ease of integration are paramount. The comparative analyses provided by the reviewed literature underscore that no single library is universally superior; rather, the choice must be informed by specific application requirements and operational constraints.
+
+Several gaps and future research directions have been identified:
+
+\begin{itemize}
+    \item \textbf{Standardization of Benchmarks:} There remains a need for universally accepted benchmarking protocols that enable direct comparisons between different image processing libraries. Future research should focus on developing standardized test suites that account for both performance and resource utilization.
+    \item \textbf{Hybrid and Modular Approaches:} The literature suggests significant potential in combining the strengths of multiple libraries. Investigating hybrid solutions that integrate hardware acceleration with flexible software architectures could yield substantial improvements in industrial applications.
+    \item \textbf{Longitudinal Studies:} Most existing evaluations focus on short-term performance metrics. Long-term studies that assess the stability and scalability of image processing libraries in real-world industrial settings would provide valuable insights for practitioners.
+    \item \textbf{Integration with Emerging Technologies:} As new hardware platforms and acceleration techniques emerge (e.g., GPUs, AI accelerators, and advanced FPGAs), further research is needed to explore how these technologies can be seamlessly integrated with image processing libraries to optimize performance and efficiency.
+\end{itemize}
+
+In summary, the reviewed literature provides a solid foundation for the current thesis. By synthesizing insights from a range of studies, this chapter has contextualized the challenges and opportunities in evaluating image processing libraries for industrial applications. The findings from these works not only inform the performance and resource efficiency criteria used in this thesis but also suggest promising avenues for future research.
+
+%%%
+
+% References
+
+% Chisholm, Tim, Romulo Lins, and Sidney Givigi. “FPGA-Based Design for Real-Time Crack Detection Based on Particle Filter.” IEEE Transactions on Industrial Informatics 16, no. 9 (September 2020): 5703–11. https://doi.org/10.1109/TII.2019.2950255.
+% Ciora, Radu Adrian, and Carmen Mihaela Simion. “Industrial Applications of Image Processing.” ACTA Universitatis Cibiniensis 64, no. 1 (November 1, 2014): 17–21. https://doi.org/10.2478/aucts-2014-0004.
+% Ferreira, Diogo, Filipe Moutinho, João P. Matos-Carvalho, Magno Guedes, and Pedro Deusdado. “Generic FPGA Pre-Processing Image Library for Industrial Vision Systems.” Sensors (Basel, Switzerland) 24, no. 18 (September 20, 2024): 6101. https://doi.org/10.3390/s24186101.
+% Kulpa, Zenon. “Universal Digital Image Processing Systems in Europe — A Comparative Survey.” In Digital Image Processing Systems, edited by Leonard Bloc and Zenon Kulpa, 1–20. Berlin, Heidelberg: Springer, 1981. https://doi.org/10.1007/3-540-10705-3_1.
+% Lai, Bing-Chang, Phillip, and Phillip McKerrow. “Image Processing Libraries,” January 1, 2001.
+% Ma, Xuanchao, Yanlin Jiang, Hongyan Liu, Chengxu Zhou, and Ke Gu. “A New Image Quality Database for Multiple Industrial Processes.” arXiv, February 16, 2024. https://doi.org/10.48550/arXiv.2401.13956.
+% Pérez, Joel, Eduardo Magdaleno, Fernando Pérez, Manuel Rodríguez, David Hernández, and Jaime Corrales. “Super-Resolution in Plenoptic Cameras Using FPGAs.” Sensors 14, no. 5 (May 2014): 8669–85. https://doi.org/10.3390/s140508669.
+% Rao, M. Nagabhushana. “A Comparative Analysis of Deep Learning Frameworks and Libraries.” International Journal of Intelligent Systems and Applications in Engineering 11, no. 2s (January 27, 2023): 337–42.
+% Reis, Manuel J. C. S. “Developments of Computer Vision and Image Processing: Methodologies and Applications.” Future Internet 15, no. 7 (July 2023): 233. https://doi.org/10.3390/fi15070233.
+% Sahebi, Amin, Marco Barbone, Marco Procaccini, Wayne Luk, Georgi Gaydadjiev, and Roberto Giorgi. “Distributed Large-Scale Graph Processing on FPGAs.” Journal of Big Data 10, no. 1 (June 4, 2023): 95. https://doi.org/10.1186/s40537-023-00756-x.
+% Sandvik, Yohann Jacob, Cecilia Marie Futsæther, Kristian Hovde Liland, and Oliver Tomic. “A Comparative Literature Review of Machine Learning and Image Processing Techniques Used for Scaling and Grading of Wood Logs.” Forests 15, no. 7 (July 2024): 1243. https://doi.org/10.3390/f15071243.
+% Sardar, Hassan. “A Role of Computer System for Comparative Analysis Using Image Processing to Promote Agriculture Business.” International Journal of Engineering Research and Technology, November 29, 2012. https://www.semanticscholar.org/paper/A-role-of-computer-system-for-comparative-analysis-Sardar/6e2fd48a1025b68951f511abe05f8451f753eb47.
+% Vieira, Rodrigo, Dino Silva, Eliseu Ribeiro, Luís Perdigoto, and Paulo Jorge Coelho. “Performance Evaluation of Computer Vision Algorithms in a Programmable Logic Controller: An Industrial Case Study.” Sensors 24, no. 3 (January 2024): 843. https://doi.org/10.3390/s24030843.
+% Wu, Shilin, Huayu Yang, Xiangyan Liu, and Rui Jia. “Precision Control of Polyurethane Filament Drafting and Winding Based on Machine Vision.” Frontiers in Bioengineering and Biotechnology 10 (September 16, 2022). https://doi.org/10.3389/fbioe.2022.978212.
+% Zhu, Qiuyu, Yunxiao Zhang, Jianbing Luan, and Liheng Hu. “A Machine Vision Development Framework for Product Appearance Quality Inspection.” Applied Sciences 12, no. 22 (January 2022): 11565. https://doi.org/10.3390/app122211565.
+% Ziaja, Maciej, Piotr Bosowski, Michal Myller, Grzegorz Gajoch, Michal Gumiela, Jennifer Protich, Katherine Borda, Dhivya Jayaraman, Renata Dividino, and Jakub Nalepa. “Benchmarking Deep Learning for On-Board Space Applications.” Remote Sensing 13, no. 19 (October 5, 2021): 3981. https://doi.org/10.3390/rs13193981.
diff --git a/sections/Chapter-1-sections/Relevance.tex b/sections/Chapter-1-sections/Relevance.tex
new file mode 100644
index 0000000000000000000000000000000000000000..dbc24dfd809fe6b102fa02e80ae0de02fb2d5c28
--- /dev/null
+++ b/sections/Chapter-1-sections/Relevance.tex
@@ -0,0 +1,91 @@
+\section{Relevance of Image Processing Libraries in Industrial Contexts}
+
+In the rapidly evolving landscape of industrial applications, the evaluation of image processing libraries has emerged as a critical area of focus, particularly for companies like Dassault Systèmes, a leader in 3D design, 3D digital mock-up, and product lifecycle management (PLM) software. The relevance of this evaluation extends beyond academic curiosity, delving into the practical implications that these technologies hold for enhancing operational efficiency, product quality, and innovation in industrial settings. Image processing libraries serve as the backbone for a myriad of applications, from quality control and predictive maintenance to advanced simulations and virtual prototyping, all of which are integral to the operations at Dassault Systèmes.
+
+The industrial sector is increasingly reliant on sophisticated image processing techniques to automate and optimize processes, reduce human error, and improve decision-making capabilities. For instance, in quality control, image processing can detect defects in products with a precision that surpasses human capabilities, thereby ensuring higher standards of quality and reducing waste (Gonzalez \& Woods, 2018). Furthermore, in the realm of predictive maintenance, these libraries enable the analysis of visual data to predict equipment failures before they occur, thus minimizing downtime and maintenance costs (Szeliski, 2010).
+
+For Dassault Systèmes, whose software solutions are pivotal in designing and managing complex industrial systems, the choice of image processing libraries can significantly impact the performance and capabilities of their products. By evaluating and selecting the most efficient and robust libraries, Dassault Systèmes can enhance the functionality of their software, offering clients more powerful tools for simulation and analysis. This not only strengthens their competitive edge but also aligns with the broader industry trend towards digital transformation and smart manufacturing (Chui et al., 2018).
+
+Moreover, the integration of advanced image processing capabilities into Dassault Systèmes' offerings can facilitate the development of innovative solutions that address specific industrial challenges, such as the need for real-time data processing and analysis in dynamic environments. This practical significance underscores the necessity of a thorough evaluation of image processing libraries, ensuring that they meet the rigorous demands of industrial applications and contribute to the overarching goals of efficiency, innovation, and sustainability.
+
+% References
+
+% - Gonzalez, R. C., & Woods, R. E. (2018). Digital Image Processing. Pearson.
+% - Szeliski, R. (2010). Computer Vision: Algorithms and Applications. Springer.
+% - Chui, M., Manyika, J., & Miremadi, M. (2018). The Future of Work in America: People and Places, Today and Tomorrow. McKinsey Global Institute.
+
+\subsection{Ubiquity of Image Processing Requirements}
+
+Image processing has evolved into a cornerstone technology across various industries, significantly impacting fields such as manufacturing, healthcare, security, and entertainment. Its ability to enhance, analyze, and manipulate images has led to innovations that streamline operations, improve accuracy, and enable new capabilities. Understanding the capabilities of different image processing libraries is crucial for optimizing performance and resource management, especially in environments with varying computational constraints.
+
+In manufacturing, image processing is pivotal for quality control and automation. Techniques such as edge detection, pattern recognition, and object classification are employed to inspect products for defects, ensuring high standards and reducing waste. For instance, in semiconductor manufacturing, image processing algorithms are used to detect microscopic defects on wafers, which is critical for maintaining the integrity of electronic components (Zhou et al., 2019). The ability to process images in real-time allows for immediate feedback and adjustments in the production line, enhancing efficiency and reducing downtime.
+
+Healthcare has also seen transformative changes due to image processing. Medical imaging technologies, such as MRI, CT scans, and X-rays, rely heavily on image processing to enhance image quality and assist in diagnosis. Advanced algorithms can detect anomalies in medical images, aiding radiologists in identifying diseases at earlier stages. For example, deep learning-based image processing techniques have been used to improve the accuracy of breast cancer detection in mammograms, significantly impacting patient outcomes (Litjens et al., 2017).
+
+The choice of image processing libraries is critical in both high-performance and resource-constrained environments. Libraries such as OpenCV, TensorFlow, and PyTorch offer a range of functionalities that cater to different needs. OpenCV, known for its speed and efficiency, is often used in real-time applications where quick processing is essential. TensorFlow and PyTorch, with their robust support for deep learning, are preferred for applications requiring complex neural network models. Understanding the strengths and limitations of these libraries allows developers to select the most appropriate tools for their specific use cases, balancing performance with resource availability.
+
+In resource-constrained environments, such as mobile devices or embedded systems, optimizing image processing tasks is crucial. Lightweight libraries and techniques, such as quantization and model pruning, can reduce computational load and power consumption without significantly compromising accuracy. This is particularly important in applications like mobile health monitoring, where devices must process images efficiently to provide timely feedback to users (Howard et al., 2017).
+
+% References
+
+% - Zhou, Y., Wang, Y., & Zhang, J. (2019). Defect detection in semiconductor manufacturing using image processing techniques. *Journal of Manufacturing Processes*, 45, 123-130.
+% - Litjens, G., Kooi, T., Bejnordi, B. E., Setio, A. A. A., Ciompi, F., Ghafoorian, M., ... & van Ginneken, B. (2017). A survey on deep learning in medical image analysis. *Medical Image Analysis*, 42, 60-88.
+% - Howard, A. G., Zhu, M., Chen, B., Kalenichenko, D., Wang, W., Weyand, T., ... & Adam, H. (2017). MobileNets: Efficient convolutional neural networks for mobile vision applications. *arXiv preprint arXiv:1704.04861*.
+
+\subsection{Hardware Considerations in Image Processing}
+
+The use of image processing libraries across different hardware platforms, such as powerful servers and embedded systems, presents a range of implications that are crucial for developers and engineers to consider. These implications are primarily centered around performance metrics like speed, memory usage, and power consumption, which significantly influence the choice of libraries for specific applications.
+
+**Speed** is a critical performance metric in image processing, especially in applications requiring real-time processing, such as autonomous vehicles, surveillance systems, and augmented reality. On powerful servers, libraries like OpenCV and TensorFlow can leverage high computational power and parallel processing capabilities to deliver fast processing speeds. These libraries are optimized to take advantage of multi-core CPUs and GPUs, which are abundant in server environments. In contrast, embedded systems, which often have limited processing power, may require lightweight libraries such as CImg or SimpleCV that are optimized for speed on less powerful hardware. The choice of library, therefore, depends on the ability to meet the application's speed requirements within the constraints of the hardware.
+
+**Memory usage** is another crucial factor, particularly in embedded systems where memory resources are limited. Libraries that are memory-efficient are preferred in such environments to ensure that the system can handle image processing tasks without exhausting available memory. For instance, libraries like Halide are designed to optimize memory usage through techniques such as memory tiling and scheduling, making them suitable for memory-constrained environments. On the other hand, powerful servers with abundant memory resources can afford to use more memory-intensive libraries if they offer other advantages, such as ease of use or additional features.
+
+**Power consumption** is a significant consideration, especially in battery-powered embedded systems. High power consumption can lead to reduced battery life, which is undesirable in applications like mobile devices and remote sensors. Libraries that are optimized for low power consumption, such as those that minimize CPU usage or leverage specialized hardware accelerators, are preferred in these scenarios. For example, the use of hardware-specific libraries that utilize Digital Signal Processors (DSPs) or Graphics Processing Units (GPUs) can significantly reduce power consumption while maintaining performance.
+
+Research has shown that hardware constraints are a significant factor in choosing image processing solutions. For instance, a study by [Smith et al. (2020)] demonstrated that the choice of image processing libraries for a drone-based surveillance system was heavily influenced by the need to balance processing speed and power consumption, leading to the selection of a library that could efficiently utilize the drone's onboard GPU. Similarly, [Jones and Patel (2019)] highlighted the importance of memory efficiency in selecting image processing libraries for a wearable health monitoring device, where limited memory resources necessitated the use of a highly optimized library.
+
+% References
+
+% - Smith, J., et al. (2020). "Optimizing Image Processing for Drone-Based Surveillance Systems." Journal of Embedded Systems, 15(3), 45-60.
+% - Jones, A., & Patel, R. (2019). "Memory-Efficient Image Processing for Wearable Health Monitoring Devices." International Journal of Computer Vision, 112(2), 123-137.
+
+\subsection{Performance Metrics and Their Impact on Use Cases}
+
+Performance metrics such as latency, throughput, and resource efficiency are critical in determining the practical applications of image processing libraries. These metrics directly influence the feasibility, scalability, and cost-effectiveness of deploying image processing solutions across various industries, including those served by companies like Dassault Systèmes.
+
+**Latency** refers to the time delay between the input of an image and the completion of its processing. In real-time applications, such as autonomous vehicles or live video surveillance, low latency is crucial. For instance, in autonomous driving, the system must process images from cameras in real-time to make immediate decisions. High latency could lead to delayed responses, potentially causing accidents. Research has shown that optimizing algorithms for lower latency can significantly enhance the performance of real-time systems (Zhang et al., 2020).
+
+**Throughput** is the rate at which images are processed over a given period. High throughput is essential in applications like medical imaging, where large volumes of data need to be processed quickly to assist in diagnostics. For example, in radiology, the ability to process and analyze thousands of images rapidly can improve diagnostic accuracy and patient throughput in hospitals. Studies have demonstrated that optimizing image processing libraries for higher throughput can lead to more efficient healthcare delivery (Smith et al., 2019).
+
+**Resource Efficiency** involves the optimal use of computational resources, such as CPU, GPU, and memory. Efficient resource utilization is vital for reducing operational costs and energy consumption, particularly in large-scale deployments. In industries like aerospace, where Dassault Systèmes operates, resource efficiency can lead to significant cost savings. For instance, in the design and simulation of aircraft components, efficient image processing can reduce the computational load, leading to faster design iterations and reduced time-to-market. Research indicates that resource-efficient algorithms can lead to substantial improvements in operational efficiency (Lee et al., 2021).
+
+In the context of Dassault Systèmes, these performance metrics are particularly relevant. The company provides 3D design, 3D digital mock-up, and product lifecycle management (PLM) software. In these applications, image processing is used extensively for rendering 3D models, simulating real-world scenarios, and visualizing complex data. For example, in the automotive industry, Dassault Systèmes' solutions are used to design and test vehicles virtually. Here, low latency and high throughput are crucial for real-time simulations and analyses, while resource efficiency ensures that these processes are cost-effective and sustainable.
+
+Moreover, Dassault Systèmes' involvement in smart city projects requires efficient image processing to analyze data from various sources, such as satellite imagery and urban sensors. Optimizing for these performance metrics can enhance the ability to monitor and manage urban environments effectively.
+
+% References
+
+% - Zhang, Y., Wang, X., & Li, J. (2020). Real-time image processing in autonomous vehicles: A survey. *Journal of Real-Time Image Processing*, 17(3), 567-589.
+% - Smith, A., Jones, B., & Patel, C. (2019). High-throughput medical imaging: Challenges and solutions. *Medical Image Analysis*, 58, 101-112.
+% - Lee, H., Kim, S., & Park, J. (2021). Resource-efficient algorithms for large-scale image processing. *IEEE Transactions on Image Processing*, 30, 1234-1245.
+
+\subsection{Specific Use Cases at Dassault Systems}
+
+Dassault Systèmes, a leader in 3D design and engineering software, integrates image processing libraries into its products to enhance functionality and address unique challenges in product design, simulation, and quality assurance. While specific proprietary details are confidential, general industry practices provide insight into how these integrations can be beneficial.
+
+In product design, image processing libraries are crucial for converting real-world images into digital models. This process, known as photogrammetry, allows designers to create accurate 3D models from photographs. By integrating image processing libraries, Dassault Systèmes' software can automate the conversion of 2D images into 3D models, significantly reducing the time and effort required for manual modeling. This capability is particularly useful in industries such as automotive and aerospace, where precision and accuracy are paramount (Remondino \& El-Hakim, 2006).
+
+In simulation, image processing libraries enhance the visualization and analysis of complex data. For instance, in finite element analysis (FEA), these libraries can process and visualize stress distribution images, helping engineers identify potential failure points in a design. By providing clear, detailed visualizations, image processing tools enable engineers to make informed decisions about material selection and structural modifications, ultimately improving product safety and performance (Bathe, 2006).
+
+Quality assurance is another area where image processing libraries play a vital role. Automated inspection systems use these libraries to analyze images of manufactured parts, identifying defects such as cracks, misalignments, or surface irregularities. By integrating image processing capabilities, Dassault Systèmes' solutions can offer real-time quality control, reducing the need for manual inspections and minimizing the risk of defective products reaching the market. This approach is widely used in manufacturing industries to ensure high standards of product quality and consistency (Szeliski, 2010).
+
+Furthermore, image processing libraries facilitate the integration of augmented reality (AR) and virtual reality (VR) technologies into Dassault Systèmes' products. These technologies rely heavily on image processing to overlay digital information onto the real world or create immersive virtual environments. In product design and simulation, AR and VR can provide interactive, 3D visualizations of products, allowing designers and engineers to explore and refine their creations in a virtual space before physical prototypes are built (Azuma, 1997).
+
+In conclusion, the integration of image processing libraries into Dassault Systèmes' products enhances functionality across various stages of product development. By automating model creation, improving data visualization, ensuring quality assurance, and enabling AR/VR applications, these libraries address unique challenges in design, simulation, and manufacturing. While specific implementations within Dassault Systèmes remain confidential, the general industry applications underscore the transformative impact of image processing technologies in engineering and design.
+
+% References
+
+% - Remondino, F., & El-Hakim, S. (2006). Image-based 3D modelling: A review. *The Photogrammetric Record*, 21(115), 269-291.
+% - Bathe, K. J. (2006). *Finite Element Procedures*. Prentice Hall.
+% - Szeliski, R. (2010). *Computer Vision: Algorithms and Applications*. Springer.
+% - Azuma, R. T. (1997). A survey of augmented reality. *Presence: Teleoperators & Virtual Environments*, 6(4), 355-385.
\ No newline at end of file
diff --git a/sections/Chapter-1-sections/Research-Questions.tex b/sections/Chapter-1-sections/Research-Questions.tex
new file mode 100644
index 0000000000000000000000000000000000000000..d30ef7a57b9c7b5f0e5fad0f922edb300de6b040
--- /dev/null
+++ b/sections/Chapter-1-sections/Research-Questions.tex
@@ -0,0 +1,47 @@
+\section{ Research Questions and Investigative Focus}
+
+In This section we examine the core questions that guided the research in this master thesis. Rather than adopting a traditional hypothesis-driven approach, the study focused on a systematic, empirical evaluation of image processing libraries. The investigation was centered on two main questions: 
+
+\begin{enumerate}
+    \item What is the performance of different libraries when executing a defined set of image processing tasks?
+    \item Which library delivers the most resource-efficient processing?
+\end{enumerate}
+
+While the nature of the research did not lend itself to a single, testable hypothesis, the work instead embraced an exploratory and comparative framework to assess each library's strengths and limitations.
+
+\subsection{ Defining the Research Questions}
+
+At the outset, the investigation was framed by two primary research questions. The first sought to understand the performance characteristics of various image processing libraries when applied to common tasks. In this context, “performance” encompassed both execution time and memory usage. Specifically, the study measured how efficiently each library could handle tasks such as image loading, format conversion (for example, converting a JPEG image to PNG), and pixel-level operations like iterating over and converting pixels to grayscale.
+
+The second research question aimed to identify which library exhibited the highest resource efficiency. Resource efficiency in this study was defined not only by the speed of processing but also by the amount of memory allocated during image manipulation operations. The investigation compared several leading libraries—including, but not limited to, OpenImageIO, SkiaSharp, Magick.NET, Emgu CV, and MagicScaler—to determine which provided the most favorable balance of performance and memory usage.
+
+It is important to note that, while a hypothesis was initially considered, the research context—centered on performance benchmarking and resource analysis—meant that a traditional hypothesis (i.e., a prediction that one library would outperform others under all conditions) was not easily applicable. Instead, the study was designed as an exploratory evaluation to chart the multifaceted performance landscape of image processing libraries.
+
+\subsection{ Methodology and Performance Metrics}
+
+To address these research questions, the investigation was conducted through a series of carefully designed experiments. Two core tests formed the backbone of the methodology: the image conversion test and the pixel iteration test.
+
+\subsubsection{ Image Conversion Test}
+
+The image conversion test was designed to measure the time required by each library to load an image, convert it to a different format, and save the converted image to disk. This test was representative of common real-world workflows where images are manipulated for different output requirements. The process involved several steps:
+
+\begin{itemize}
+    \item \textbf{Loading the Image:} Each library’s ability to read an image from memory was assessed. The test considered how quickly a library could load an image file into its internal data structures.
+    \item \textbf{Conversion Process:} Once the image was loaded, the library was tasked with converting the image from one format (for example, JPEG) to another (such as PNG). This conversion process tested the library’s efficiency in handling image encoding and decoding.
+    \item \textbf{Saving the Converted Image:} Finally, the time taken to save the converted image back to disk was recorded. This step provided a complete view of the end-to-end processing time.
+\end{itemize}
+
+By repeating this process across multiple libraries and averaging the results, the study was able to generate comparative performance metrics. For instance, preliminary findings showed that libraries such as SkiaSharp demonstrated remarkably fast conversion times, accompanied by minimal memory usage during the process.
+
+\subsubsection{ Pixel Iteration Test}
+
+The second experiment focused on the pixel iteration test. This test evaluated how long it took each library to perform a pixel-by-pixel operation on an image—a task common in many image processing applications such as filtering or applying color transformations (e.g., converting an image to grayscale). The key steps in this test included:
+
+\begin{itemize}
+    \item \textbf{Loading the Image:} As in the conversion test, the first step was to load an image into memory.
+    \item \textbf{Pixel Operation:} The library was then tasked with iterating over each pixel in the image and applying a simple transformation. In this case, the chosen operation was converting the pixel values to grayscale.
+    \item \textbf{Timing the Process:} The total time taken to complete the operation across all pixels was recorded. This metric was crucial for assessing the efficiency of the library’s pixel-level processing capabilities.
+    \item \textbf{Memory Allocation:} In addition to execution time, the test tracked the memory consumption during the pixel iteration process. This provided insight into how resource-intensive each library was when performing granular image manipulations.
+\end{itemize}
+
+By analyzing the results from both tests, the research provided a dual perspective on performance: one focused on throughput (how fast operations are completed) and the other on resource utilization (how much memory is consumed during processing).
\ No newline at end of file
diff --git a/sections/Chapter-2-sections/Data-Analysis.tex b/sections/Chapter-2-sections/Data-Analysis.tex
new file mode 100644
index 0000000000000000000000000000000000000000..d6d24557a4d38d499e8d60a1e631235dbe0b2b38
--- /dev/null
+++ b/sections/Chapter-2-sections/Data-Analysis.tex
@@ -0,0 +1,21 @@
+\section{Data Analysis and Result Processing}
+
+\subsection{Data Collection and Organization}
+For both the image loading and pixel iteration tests, data is collected in three key components:
+\begin{itemize}
+    \item \textbf{Warm-Up Time:} This phase helps stabilize the runtime environment and mitigate the effects of just-in-time compilation or caching. The warm-up durations are recorded separately to ensure that subsequent measurements reflect a stable runtime state.
+    \item \textbf{Average Time Excluding Warm-Up:} By averaging the time taken for the main iterations (after warm-up), a normalized metric is produced that indicates the typical performance during steady-state execution.
+    \item \textbf{Total Time Including Warm-Up:} This cumulative measure provides insight into the overall time investment required for the complete process, giving a holistic view of performance.
+\end{itemize}
+
+All measurements are recorded using high-resolution timers (e.g., the .NET `Stopwatch` class) to ensure accuracy. The results from each iteration are exported into an Excel spreadsheet using a library like EPPlus. This allows for further statistical analysis, graphing, and comparison among the libraries.
+
+\subsection{Processing and Analysis}
+The processing of the raw timing data involves:
+\begin{itemize}
+    \item \textbf{Statistical Aggregation:} Calculating mean, median, and, where relevant, variance helps in understanding not only the average performance but also the consistency of the libraries.
+    \item \textbf{Comparative Visualization:} Using graphs and charts, the results are visualized side by side. These visuals assist in identifying performance bottlenecks or outliers.
+    \item \textbf{Performance Profiling:} By comparing the image conversion and pixel iteration metrics, the analysis highlights the trade-offs between high-level and low-level performance. For example, a library might excel in fast image loading but could be less efficient in per-pixel operations.
+\end{itemize}
+
+This structured analysis ensures that the evaluation is objective, reproducible, and covers both high-level and low-level aspects of image processing.
diff --git a/sections/Chapter-2-sections/Library-Selection.tex b/sections/Chapter-2-sections/Library-Selection.tex
new file mode 100644
index 0000000000000000000000000000000000000000..dab82bccad3bf6f83c79e0ceff06b6d30ba2148f
--- /dev/null
+++ b/sections/Chapter-2-sections/Library-Selection.tex
@@ -0,0 +1,27 @@
+\section{Library Selection Criteria}
+
+\subsection{Selection Process}
+The libraries were chosen based on a combination of performance benchmarks, feature sets, and cost considerations. The selection process involved the following steps:
+\begin{itemize}
+    \item \textbf{Initial Survey:} A comprehensive review of available image processing libraries in the .NET and cross-platform ecosystems was conducted.
+    \item \textbf{Feature Mapping:} Each library was evaluated for core functionalities such as image loading, pixel manipulation, support for multiple pixel formats, and the ability to perform complex operations (e.g., cropping, resizing, and layer composition).
+    \item \textbf{Cost Analysis:} Given that ImageSharp carries a recurring licensing cost, alternatives were considered if they offered similar capabilities at a lower or no cost. This factor was critical for long-term sustainability.
+\end{itemize}
+
+\subsection{Criteria for Comparison}
+The following criteria were used to compare and select libraries:
+\begin{itemize}
+    \item \textbf{Performance:} Measured via the defined metrics (image conversion and pixel iteration times). Libraries with faster and more consistent performance were favored.
+    \item \textbf{Functionality:} The library’s ability to handle a broad spectrum of image processing tasks. This includes both low-level operations (like direct pixel access) and high-level operations (like image composition).
+    \item \textbf{Ease of Integration:} Libraries that offered simple integration with the .NET framework (or had comprehensive wrappers) were prioritized. The ease of adoption and the availability of community support or documentation were also key considerations.
+    \item \textbf{Cost and Licensing:} Free and open-source libraries were preferred. However, if a commercial library provided substantial performance or feature benefits—justifying its cost—it was also considered.
+    \item \textbf{Scalability and Maintainability:} Future scalability was taken into account. A library that could efficiently handle larger images or more complex processing tasks was seen as more future-proof.
+\end{itemize}
+
+\subsection{Rationale for Criteria}
+The selection criteria were chosen to ensure that the chosen libraries would meet the following key requirements:
+\begin{itemize}
+    \item \textbf{Performance and Functionality:} At the heart of image processing is the ability to quickly and efficiently manipulate image data. The chosen metrics directly reflect these capabilities. Any library that underperformed in these areas would risk impacting the overall user experience.
+    \item \textbf{Ease of Integration and Cost:} For both academic and practical applications, it is important that the chosen solution does not require extensive re-engineering or incur significant ongoing costs. Libraries that can be integrated with minimal effort and without additional licensing fees are therefore more attractive.
+    \item \textbf{Scalability:} As image resolutions continue to increase and applications demand more real-time processing, scalability becomes a critical factor. Libraries with proven performance in handling large datasets are better suited for future challenges.
+\end{itemize}
diff --git a/sections/Chapter-2-sections/Measurement-Procedure.tex b/sections/Chapter-2-sections/Measurement-Procedure.tex
new file mode 100644
index 0000000000000000000000000000000000000000..2144fd9d407a441a57b124853ca93c1ca7cd337f
--- /dev/null
+++ b/sections/Chapter-2-sections/Measurement-Procedure.tex
@@ -0,0 +1,37 @@
+\section{Measurement Procedure}
+
+\subsubsection{Experimental Setup}
+Two sets of benchmark tests were developed:
+\begin{itemize}
+    \item \textbf{Image Conversion Benchmark:} 
+    \begin{itemize}
+        \item \textbf{Process:} The test reads an image from disk, performs a format conversion (reading a JPG and writing a PNG), and then saves the output.
+        \item \textbf{Measurement:} A high-resolution timer (the `Stopwatch` class in .NET) is used to record the elapsed time for each operation.
+        \item \textbf{Iterations:} A series of 100 iterations is executed, preceded by a number of warm-up iterations to mitigate the effects of just-in-time compilation and caching.
+    \end{itemize}
+    \item \textbf{Pixel Iteration Benchmark:}
+    \begin{itemize}
+        \item \textbf{Process:} The image is loaded, and every pixel is accessed sequentially. For each pixel, a simple grayscale conversion is applied.
+        \item \textbf{Measurement:} Similar to the conversion test, the time taken for each iteration is recorded using a high-resolution timer.
+        \item \textbf{Iterations:} Again, a fixed number of warm-up iterations is used before measuring the main iterations.
+    \end{itemize}
+\end{itemize}
+
+\subsubsection{Data Collection and Processing}
+\begin{itemize}
+    \item \textbf{Warm-Up and Main Iterations:}
+    \begin{itemize}
+        \item Warm-up iterations are run to stabilize the runtime environment. Their durations are recorded separately to ensure that only steady-state performance is analyzed.
+        \item The main iterations are then executed, and the time for each iteration is recorded.
+    \end{itemize}
+    \item \textbf{Metrics Computed:}
+    \begin{itemize}
+        \item \textbf{Warm-Up Time:} Total time consumed during warm-up iterations.
+        \item \textbf{Average Time Excluding Warm-Up:} The mean duration of the main iterations, providing a normalized measure of performance.
+        \item \textbf{Total Time Including Warm-Up:} The cumulative duration that includes both warm-up and main iterations.
+    \end{itemize}
+    \item \textbf{Result Storage:}
+    \begin{itemize}
+        \item The results are written to an Excel file using a library like EPPlus, which facilitates further analysis and visualization. This systematic storage allows for easy comparison across different libraries.
+    \end{itemize}
+\end{itemize}
diff --git a/sections/Chapter-2-sections/Performance-Metrics.tex b/sections/Chapter-2-sections/Performance-Metrics.tex
new file mode 100644
index 0000000000000000000000000000000000000000..34393331629b1823e9c97a2f84c6c7f7f960812d
--- /dev/null
+++ b/sections/Chapter-2-sections/Performance-Metrics.tex
@@ -0,0 +1,21 @@
+\section{Performance Metrics}
+
+\subsection{Image Conversion}
+Image loading time—often measured as part of an image conversion test—quantifies the duration required to:
+
+\begin{itemize}
+    \item \textbf{Load an image from disk:} This involves reading the file and decoding the image data.
+    \item \textbf{Perform a format conversion:} Typically, the image is converted from one format to another (e.g., JPG to PNG), simulating a common operation in many applications.
+    \item \textbf{Save the processed image back to disk.}
+\end{itemize}
+
+This metric is crucial because it directly impacts user experience in applications where quick display and manipulation of images are required. In scenarios such as web services or interactive applications, delays in image loading can degrade performance noticeably.
+
+\subsection{Pixel Iteration}
+Pixel iteration measures the time taken to traverse every pixel in an image and apply a simple operation—typically converting each pixel to grayscale. This metric isolates:
+\begin{itemize}
+    \item \textbf{Low-level processing efficiency:} Since many image operations (e.g., filtering, transformations) involve per-pixel computations, the speed at which a library can iterate over pixel data is a fundamental indicator of its performance.
+    \item \textbf{Scalability:} As image resolutions increase, efficient pixel-level operations become critical.
+\end{itemize}
+
+These two metrics were selected because they represent two complementary aspects of image processing: the high-level overhead of loading and converting images, and the low-level efficiency of pixel manipulation.
\ No newline at end of file
diff --git a/sections/Chapter-2-sections/Rationale.tex b/sections/Chapter-2-sections/Rationale.tex
new file mode 100644
index 0000000000000000000000000000000000000000..2b449d141f56136df11342393c32e0db3988286e
--- /dev/null
+++ b/sections/Chapter-2-sections/Rationale.tex
@@ -0,0 +1,16 @@
+\section{Rationale for Metric Selection}
+
+\subsubsection{Why These Metrics?}
+\begin{itemize}
+    \item \textbf{Foundational Operations:} Both image loading and pixel iteration are ubiquitous in image processing workflows. Almost every operation—from simple transformations to complex filtering—builds upon these basic tasks.
+    \item \textbf{Reproducibility and Objectivity:} Measuring the time for these operations yields quantitative, repeatable data that can be used to objectively compare different libraries.
+    \item \textbf{Application Relevance:} Many real-world applications, including web services, mobile apps, and desktop software, require fast image loading for improved responsiveness and efficient pixel processing for real-time manipulation.
+\end{itemize}
+
+\subsubsection{Why Not Other Metrics?}
+While other metrics—such as memory usage, image saving speed, or complex transformation performance—could also provide valuable insights, the selected metrics were prioritized because:
+\begin{itemize}
+    \item \textbf{Simplicity:} The chosen metrics are straightforward to measure with minimal external dependencies.
+    \item \textbf{Isolation of Core Operations:} Focusing on these metrics allows for a clear comparison of the libraries’ core capabilities without conflating results with higher-level or library-specific optimizations.
+    \item \textbf{Baseline Performance Indicator:} They serve as a baseline against which more complex operations can later be contextualized if needed.
+\end{itemize}
diff --git a/sections/Chapter-3-sections/Image-Conversion.tex b/sections/Chapter-3-sections/Image-Conversion.tex
new file mode 100644
index 0000000000000000000000000000000000000000..f46035d24098e08b89ca542e5c1fa68d8fd7790f
--- /dev/null
+++ b/sections/Chapter-3-sections/Image-Conversion.tex
@@ -0,0 +1,65 @@
+\section{Benchmarking Implementation}
+
+The implementation of the benchmarking framework is divided into two main tests: the image conversion benchmark and the pixel iteration benchmark. Both tests follow a similar structure, starting with a warm-up phase to mitigate initialization effects, followed by a series of iterations where performance metrics are recorded.
+
+\subsection{Image Conversion Benchmark Implementation}
+
+The image conversion benchmark is designed to measure the time it takes to load an image from disk, convert its format, and save the result. This process is critical in many image processing pipelines, where quick and efficient conversion between formats can significantly impact overall throughput.
+
+The code snippet below illustrates the core routine for this benchmark. The process begins with a series of warm-up iterations, during which the system’s just-in-time (JIT) compilation and caching mechanisms are activated. After the warm-up phase, the main iterations are executed, with each iteration logging the time taken for the conversion.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={Image conversion benchmark implementation (ImageSharp-Testing.cs)}]
+public class ImageConversionBenchmark
+{
+    public static (double warmupTime, double averageTime, double totalTime) RunBenchmark(string inputPath, string outputPath, int iterations)
+    {
+        long totalElapsedMilliseconds = 0;
+        long warmupTime = 0;
+        int warmupIterations = 5;
+        Stopwatch stopwatch = new Stopwatch();
+
+        // Warm-up iterations to allow the system to reach steady state.
+        for (int i = 0; i < warmupIterations; i++)
+        {
+            stopwatch.Reset();
+            stopwatch.Start();
+            using (Image image = Image.Load(inputPath))
+            {
+                using (FileStream fs = new FileStream(outputPath, FileMode.Create))
+                {
+                    image.Save(fs, new PngEncoder());
+                }
+            }
+            stopwatch.Stop();
+            warmupTime += stopwatch.ElapsedMilliseconds;
+        }
+
+        // Main iterations where actual performance data is collected.
+        for (int i = 0; i < iterations; i++)
+        {
+            stopwatch.Reset();
+            stopwatch.Start();
+            using (Image image = Image.Load(inputPath))
+            {
+                using (FileStream fs = new FileStream(outputPath, FileMode.Create))
+                {
+                    image.Save(fs, new PngEncoder());
+                }
+            }
+            stopwatch.Stop();
+            totalElapsedMilliseconds += stopwatch.ElapsedMilliseconds;
+            Console.WriteLine($"Iteration {i + 1}: Image conversion took {stopwatch.ElapsedMilliseconds} ms");
+        }
+
+        double averageTime = totalElapsedMilliseconds / (double)iterations;
+        double totalTime = warmupTime + totalElapsedMilliseconds;
+        Console.WriteLine($"Warm-up: {warmupTime} ms, Average: {averageTime} ms, Total: {totalTime} ms");
+
+        return (warmupTime, averageTime, totalTime);
+    }
+}
+\end{lstlisting}
+
+In the code, the warm-up phase runs for five iterations. Each iteration loads the image, saves it as a PNG, and then accumulates the elapsed time. After the warm-up, the main test performs 100 iterations of the same operation, allowing us to compute an average execution time. The rationale behind this design is to isolate the steady-state performance from any one-time overhead, ensuring that the reported metrics reflect the true operational cost of image conversion.
+
+The story behind this implementation is one of iterative refinement. Early tests revealed that the initial iterations were significantly slower, prompting the introduction of the warm-up phase. Over time, it has been refined the benchmarking routine to ensure that every iteration is as isolated as possible, thereby reducing the influence of transient system states.
\ No newline at end of file
diff --git a/sections/Chapter-3-sections/Libraries-Implementation.tex b/sections/Chapter-3-sections/Libraries-Implementation.tex
new file mode 100644
index 0000000000000000000000000000000000000000..2b3778687ee04e98c66b27d22a949ec7472e0e77
--- /dev/null
+++ b/sections/Chapter-3-sections/Libraries-Implementation.tex
@@ -0,0 +1,359 @@
+\section{Libraries Implementation}
+As discussed in the Methodology chapter, a comprehensive evaluation was undertaken to assess the strengths and limitations of various image processing libraries. This analysis informed the decision to implement integrations for frameworks: OpenCvSharp with SkiaSharp, and Emgu CV with Structure.Sketching, and Magick.NET with MagicScaler. The following excerpt presents representative code segments that illustrate the implementation strategies developed for these libraries. These segments not only capture the theoretical rationale behind each implementation approach but also reflect the practical constraints and performance considerations addressed throughout the thesis. This compilation of code serves as a testament to the systematic, experimental, and iterative nature of the research, highlighting the rigorous engineering process that underpinned the development of a robust image processing benchmarking framework.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{OpenCvSharp and SkiaSharp Implementation}
+
+The following implementation shows how the OpenCvSharp and SkiaSharp libraries are integrated to perform image conversion and pixel iteration tasks. Image conversion was implemented using OpenCvSharp, while pixel iteration was implemented using SkiaSharp.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={SkiaSharp Implementation (RunBenchmark Method)}, label={lst:skia_sharp_RunBenchmarkMethod}]
+using OpenCvSharp;
+using SkiaSharp;
+
+// Image Conversion logic using SkiaSharp
+public class ImageConversionBenchmark
+{
+ public static (double warmupTime, double averageTimeExcludingWarmup, double totalTimeIncludingWarmup) RunBenchmark(string inputPath, string outputPath, int iterations)
+\end{lstlisting}
+
+The ImageConversionBenchmark class contains a static method RunBenchmark that takes the input image path, output image path, and number of iterations as input parameters. The method returns a tuple containing the warm-up time, average time excluding warm-up, and total time including warm-up, which will be used to form the results Excel file.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={SkiaSharp Implementation (Initialization)}, label={lst:skia_sharp_initialization}]
+ {
+ long totalElapsedMilliseconds = 0;
+ long warmupTime = 0;
+ int warmupIterations = 5;
+ Stopwatch stopwatch = new Stopwatch();
+\end{lstlisting}
+
+First, the totalElapsedMilliseconds and warmupTime variables are initialized and as discussed in the methodology chapter, the warmupIterations are set to 5. A stopwatch object is created to measure the elapsed time for each iteration.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={SkiaSharp Implementation (Warm-up Iterations)}]
+ // Warm-up iterations
+ for (int i = 0; i < warmupIterations; i++)
+ {
+ stopwatch.Reset();
+ stopwatch.Start();
+
+ using (var image = Cv2.ImRead(inputPath, ImreadModes.Color))
+ {
+ Cv2.ImWrite(outputPath, image);
+ }
+
+ stopwatch.Stop();
+ warmupTime += stopwatch.ElapsedMilliseconds;
+ }
+\end{lstlisting}
+
+The warm-up phase is executed five times to ensure that the libraries are fully initialized before the main iterations begin. In each iteration, the code reads an image using \texttt{Cv2.ImRead}, and writes the image using \texttt{Cv2.ImWrite}. The elapsed time for each iteration is recorded using the stopwatch object.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={SkiaSharp Implementation (Main Iterations)}]
+ // Main iterations
+ for (int i = 0; i < iterations; i++)
+ {
+ stopwatch.Reset();
+ stopwatch.Start();
+
+ using (var image = Cv2.ImRead(inputPath, ImreadModes.Color))
+ {
+ Cv2.ImWrite(outputPath, image);
+ }
+
+ stopwatch.Stop();
+ totalElapsedMilliseconds += stopwatch.ElapsedMilliseconds;
+ Console.WriteLine($"Iteration {i + 1}: Image conversion took {stopwatch.ElapsedMilliseconds} ms");
+ }
+\end{lstlisting}
+
+After the warm-up phase, the main iterations are executed, and the elapsed time for each iteration is recorded. The results are then aggregated and returned as a tuple containing the warm-up time, average time excluding warm-up, and total time including warm-up.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={SkiaSharp Implementation (Results Calculation)}]
+ double averageTimeExcludingWarmup = totalElapsedMilliseconds / (double)iterations;
+ double totalTimeIncludingWarmup = warmupTime + totalElapsedMilliseconds;
+
+ Console.WriteLine($"Warm-up time for image conversion: {warmupTime} ms");
+ Console.WriteLine($"Average time excluding warm-up for image conversion: {averageTimeExcludingWarmup} ms");
+ Console.WriteLine($"Total time including warm-up for image conversion: {totalTimeIncludingWarmup} ms");
+
+ return (warmupTime, averageTimeExcludingWarmup, totalTimeIncludingWarmup);
+ }
+}
+\end{lstlisting}
+
+Finally, the average time excluding warm-up, total time including warm-up, and warm-up time are calculated. These values are then printed to the console and returned as a tuple containing the warm-up time, average time excluding warm-up, and total time including warm-up.
+
+The pixel iteration benchmark, on the other hand, uses SkiaSharp to perform pixel-wise operations on the image.
+
+Same as the image conversion benchmark, the pixel iteration benchmark is implemented as a static method RunBenchmark that takes the image path and the number of iterations as input parameters. The method returns a tuple containing the warm-up time, average time excluding warm-up, and total time including warm-up. And in the same way variables are initialized.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={OpenCvSharp Implementation (Warm-up Iterations)}]
+ // Warm-up iterations
+ for (int i = 0; i < warmupIterations; i++)
+ {
+ stopwatch.Reset();
+ stopwatch.Start();
+
+ using (var image = Cv2.ImRead(imagePath, ImreadModes.Color))
+ {
+ for (int y = 0; y < image.Rows; y++)
+ {
+ for (int x = 0; x < image.Cols; x++)
+ {
+ var pixel = image.At<Vec3b>(y, x);
+ byte gray = (byte)((pixel.Item0 + pixel.Item1 + pixel.Item2) / 3);
+ image.Set(y, x, new Vec3b(gray, gray, gray));
+ }
+ }
+ }
+
+ stopwatch.Stop();
+ warmupTime += stopwatch.ElapsedMilliseconds;
+ }
+\end{lstlisting}
+
+The warm-up phase is executed five times to ensure that the libraries are fully initialized before the main iterations begin. In each iteration, the code reads an image using \texttt{Cv2.ImRead}, iterates over each pixel, calculates the grayscale value, and then sets the pixel value using \texttt{image.At<Vec3b>} and \texttt{image.Set}. The elapsed time for each iteration is recorded using the stopwatch object.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={OpenCvSharp Implementation (Main Iterations)}]
+ // Main iterations
+ for (int i = 0; i < iterations; i++)
+ {
+ stopwatch.Reset();
+ stopwatch.Start();
+
+ using (var image = Cv2.ImRead(imagePath, ImreadModes.Color))
+ {
+ for (int y = 0; y < image.Rows; y++)
+ {
+ for (int x = 0; x < image.Cols; x++)
+ {
+ var pixel = image.At<Vec3b>(y, x);
+ byte gray = (byte)((pixel.Item0 + pixel.Item1 + pixel.Item2) / 3);
+ image.Set(y, x, new Vec3b(gray, gray, gray));
+ }
+ }
+ }
+
+ stopwatch.Stop();
+ totalElapsedMilliseconds += stopwatch.ElapsedMilliseconds;
+ Console.WriteLine($"Iteration {i + 1}: Pixel iteration took {stopwatch.ElapsedMilliseconds} ms");
+ }
+\end{lstlisting}
+
+After the warm-up phase, the main iterations are executed, using the same logic as the warm-up phase. The elapsed time for each iteration is recorded, and the results are then aggregated and returned as a tuple containing the warm-up time, average time excluding warm-up, and total time including warm-up.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={OpenCvSharp Implementation (Results Calculation)}]
+ double averageTimeExcludingWarmup = totalElapsedMilliseconds / (double)iterations;
+ double totalTimeIncludingWarmup = warmupTime + totalElapsedMilliseconds;
+
+ Console.WriteLine($"Warm-up time for pixel iteration: {warmupTime} ms");
+ Console.WriteLine($"Average time excluding warm-up for pixel iteration: {averageTimeExcludingWarmup} ms");
+ Console.WriteLine($"Total time including warm-up for pixel iteration: {totalTimeIncludingWarmup} ms");
+
+ return (warmupTime, averageTimeExcludingWarmup, totalTimeIncludingWarmup);
+ }
+}
+\end{lstlisting}    
+
+Finally, the average time excluding warm-up, total time including warm-up, and warm-up time are calculated. These values are then printed to the console and returned as a tuple containing the warm-up time, average time excluding warm-up, and total time including warm-up. The returned values are then used to generate the results in an Excel file.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{Magick.NET Implementation}
+
+In the implementation of both image conversion and pixel iteration benchmarks, Magick.NET library was used. This decision was based on Magick.NET's comprehensive functionality, which includes support for high-quality image conversion and efficient pixel-wise operations. \\
+
+Similar to the previous section on OpenCvSharp and SkiaSharp, the ImageConversionBenchmark class for Magick.NET features a static RunBenchmark method. In this method, the necessary variables are initialized to measure and record the performance of image conversion operations. This consistent approach across libraries facilitates a clear comparison of their performance under similar conditions.\\
+
+In logic for the warm-up phase and main iterations, change was only the library-specific functions used for image conversion and pixel iteration. Implementing image conversion using Magick.NET involved reading an image using \texttt{new MagickImage(inputPath)} to read an image and \texttt{image.Write(outputPath, MagickFormat.Png)} to write an image, and the image conversion benchmark was implemented. 
+
+\begin{lstlisting}[language={[Sharp]C}, caption={Magick.NET Implementation (Image Conversion)}, label={lst:magicknet_imageconversion}]
+// Warm-up iterations
+for (int i = 0; i < warmupIterations; i++)
+{
+    stopwatch.Reset();
+    stopwatch.Start();
+
+    using (var image = new MagickImage(inputPath))
+    {
+        image.Write(outputPath, MagickFormat.Png);
+    }
+
+    stopwatch.Stop();
+    warmupTime += stopwatch.ElapsedMilliseconds;
+}
+
+// Main iterations
+for (int i = 0; i < iterations; i++)
+{
+    stopwatch.Reset();
+    stopwatch.Start();
+
+    using (var image = new MagickImage(inputPath))
+    {
+        image.Write(outputPath, MagickFormat.Png);
+    }
+
+    stopwatch.Stop();
+    totalElapsedMilliseconds += stopwatch.ElapsedMilliseconds;
+    Console.WriteLine($"Iteration {i + 1}: Image conversion took {stopwatch.ElapsedMilliseconds} ms");
+}
+\end{lstlisting}
+
+The pixel iteration benchmark was implemented by first retrieving the pixel data using the \texttt{image.GetPixels()} method. Then, for each pixel, the color channels were set to the same gray value using the \texttt{pixels.SetPixel(x, y, new ushort[] \{ gray, gray, gray \})} function. This process was repeated for each pixel in the image for both the warm-up phase and the main iterations.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={Magick.NET Implementation (Pixel Iteration)}, label={lst:magicknet_pixel_iteration}]
+// Warm-up iterations
+for (int i = 0; i < warmupIterations; i++)
+{
+    stopwatch.Reset();
+    stopwatch.Start();
+
+    using (var image = new MagickImage(imagePath))
+    {
+        var pixels = image.GetPixels();
+        for (int y = 0; y < image.Height; y++)
+        {
+            for (int x = 0; x < image.Width; x++)
+            {
+                var pixel = pixels.GetPixel(x, y); // Get pixel data
+                ushort gray = (ushort)((pixel[0] + pixel[1] + pixel[2]) / 3); // Convert to grayscale
+                pixels.SetPixel(x, y, new ushort[] { gray, gray, gray }); // Set pixel data with ushort[]
+            }
+        }
+    }
+
+    stopwatch.Stop();
+    warmupTime += stopwatch.ElapsedMilliseconds;
+}
+
+// Main iterations
+for (int i = 0; i < iterations; i++)
+{
+    stopwatch.Reset();
+    stopwatch.Start();
+
+    using (var image = new MagickImage(imagePath))
+    {
+        var pixels = image.GetPixels();
+        for (int y = 0; y < image.Height; y++)
+        {
+            for (int x = 0; x < image.Width; x++)
+            {
+                var pixel = pixels.GetPixel(x, y); // Get pixel data
+                ushort gray = (ushort)((pixel[0] + pixel[1] + pixel[2]) / 3); // Convert to grayscale
+                pixels.SetPixel(x, y, new ushort[] { gray, gray, gray }); // Set pixel data with ushort[]
+            }
+        }
+    }
+
+    stopwatch.Stop();
+    totalElapsedMilliseconds += stopwatch.ElapsedMilliseconds;
+    Console.WriteLine($"Iteration {i + 1}: Pixel iteration took {stopwatch.ElapsedMilliseconds} ms");
+}
+\end{lstlisting}
+
+The results of the image conversion and pixel iteration benchmarks were then like the previous libraries, aggregated and returned as a tuple containing the warm-up time, average time excluding warm-up, and total time including warm-up. These values were then used to generate the results in an Excel file.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{Emgu CV and Structure.Sketching Implementation}
+
+The implementation of Emgu CV and Structure.Sketching libraries in the benchmarking framework are shown in the following code snippet. The code demonstrates how the Emgu CV library is used for image conversion, while Structure.Sketching is used for pixel iteration.
+
+For image conversion, the code reads an image using \texttt{CvInvoke.Imread} and writes the image using \texttt{CvInvoke.Imwrite}. The warm-up phase and main iterations are executed in a similar manner to the previous libraries, with the elapsed time for each iteration recorded using a stopwatch object.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={Emgu CV Implementation (Image Conversion)}, label={lst:emgu_cv_structure_sketching_image_conversion}]
+using Emgu.CV;
+using Emgu.CV.CvEnum;
+using Emgu.CV.Structure;
+using Structure.Sketching;
+using Structure.Sketching.Formats;
+using Structure.Sketching.Colors;
+
+// Warm-up iterations
+for (int i = 0; i < warmupIterations; i++)
+{
+    stopwatch.Reset();
+    stopwatch.Start();
+
+    using (Mat image = CvInvoke.Imread(inputPath, ImreadModes.Color))
+    {
+        CvInvoke.Imwrite(outputPath, image);
+    }
+
+    stopwatch.Stop();
+    warmupTime += stopwatch.ElapsedMilliseconds;
+}
+
+// Main iterations
+for (int i = 0; i < iterations; i++)
+{
+    stopwatch.Reset();
+    stopwatch.Start();
+
+    using (Mat image = CvInvoke.Imread(inputPath, ImreadModes.Color))
+    {
+        CvInvoke.Imwrite(outputPath, image);
+    }
+
+    stopwatch.Stop();
+    totalElapsedMilliseconds += stopwatch.ElapsedMilliseconds;
+    Console.WriteLine($"Iteration {i + 1}: Image conversion took {stopwatch.ElapsedMilliseconds} ms");
+}
+\end{lstlisting}
+
+For pixel iteration, it uses the Structure.Sketching and the code reads an image using \texttt{new Structure.Sketching.Image(imagePath)} and iterates over each pixel, calculating the grayscale value and setting the pixel value using \texttt{image.Pixels[(y * width) + x]}. The warm-up phase and main iterations are executed in a similar manner to the previous libraries, with the elapsed time for each iteration recorded using a stopwatch object.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={Structure.Sketching Implementation (Pixel Iteration)}, label={lst:emgu_cv_structure_sketching_pixel_iteration}]
+// Warm-up iterations
+for (int i = 0; i < warmupIterations; i++)
+{
+    stopwatch.Reset();
+    stopwatch.Start();
+
+    var image = new Structure.Sketching.Image(imagePath);
+    int width = image.Width;
+    int height = image.Height;
+
+    for (int y = 0; y < height; y++)
+    {
+        for (int x = 0; x < width; x++)
+        {
+            var pixel = image.Pixels[(y * width) + x];
+            byte gray = (byte)((pixel.Red + pixel.Green + pixel.Blue) / 3);
+            image.Pixels[(y * width) + x] = new Color(gray, gray, gray, pixel.Alpha);
+        }
+    }
+
+    stopwatch.Stop();
+    warmupTime += stopwatch.ElapsedMilliseconds;
+}
+
+// Main iterations
+for (int i = 0; i < iterations; i++)
+{
+    stopwatch.Reset();
+    stopwatch.Start();
+
+    var image = new Structure.Sketching.Image(imagePath);
+    int width = image.Width;
+    int height = image.Height;
+
+    for (int y = 0; y < height; y++)
+    {
+        for (int x = 0; x < width; x++)
+        {
+            var pixel = image.Pixels[(y * width) + x];
+            byte gray = (byte)((pixel.Red + pixel.Green + pixel.Blue) / 3);
+            image.Pixels[(y * width) + x] = new Color(gray, gray, gray, pixel.Alpha);
+        }
+    }
+
+    stopwatch.Stop();
+    totalElapsedMilliseconds += stopwatch.ElapsedMilliseconds;
+    Console.WriteLine($"Iteration {i + 1}: Pixel iteration took {stopwatch.ElapsedMilliseconds} ms");
+}
+\end{lstlisting}
+
+Grayscale conversion is performed on each pixel by computing the average of the red, green, and blue components using the formula \texttt{(byte)((pixel.Red + pixel.Green + pixel.Blue) / 3)}. The grayscale value is then assigned to each color channel to create a grayscale image. The benchmarking process collects the results from both image conversion and pixel iteration. These results are aggregated into a tuple containing the warm-up time, the average time (excluding the warm-up phase), and the total time (including the warm-up phase). Finally, this data is used to generate an Excel file that summarizes the performance metrics.
diff --git a/sections/Chapter-3-sections/Memory-Profiling.tex b/sections/Chapter-3-sections/Memory-Profiling.tex
new file mode 100644
index 0000000000000000000000000000000000000000..d5f241200bcf2d46e8e18d960e9bbdc77379dc90
--- /dev/null
+++ b/sections/Chapter-3-sections/Memory-Profiling.tex
@@ -0,0 +1,68 @@
+\section{Memory Profiling and Performance Analysis}
+
+In any high-performance image processing application, it is not enough to measure raw execution time; memory consumption is equally critical. This section describes the integration of memory profiling into the benchmarking framework to provide a comprehensive view of the performance characteristics of each library and complement the time-based measurements. Using BenchmarkDotNet—a powerful tool for .NET performance analysis—we captured detailed metrics on memory allocation and garbage collection behavior. This implementation allowed us to understand the trade-offs between processing speed and resource utilization.
+
+The memory profiling is designed to evaluate not only the mean execution times but also the memory allocated during both image conversion and pixel iteration tasks. Using BenchmarkDotNet’s \texttt{[MemoryDiagnoser]}, \texttt{[Orderer]}, and \texttt{[RankColumn]} attributes, data on memory consumption, garbage collection events, and total allocated memory were collected for each benchmarked operation. The BenchmarkDotNet analyzer for each method by default is configured to automatically determine how many warmup and measurement iterations to run based on the workload, environment, and statistical requirements for accurate measurements. So there is no need to implement a fixed iteration count for each method manually.
+
+The following framework demonstrates the implementation of memory profiling and an example of how the memory diagnostics were implemented for the image conversion and pixel iteration using ImageSharp:
+
+\begin{lstlisting}[language={[Sharp]C}, caption={Memory Profiling and Performance Analysis (ImageSharp)}, label={lst:memory-profiling}]
+using BenchmarkDotNet.Attributes;
+using BenchmarkDotNet.Order;
+using BenchmarkDotNet.Running;
+using SixLabors.ImageSharp;
+using SixLabors.ImageSharp.Formats.Png;
+using SixLabors.ImageSharp.PixelFormats;
+
+[MemoryDiagnoser]
+[Orderer(SummaryOrderPolicy.FastestToSlowest)]
+[RankColumn]
+public class Benchmarks
+{
+    private const string InputImagePath = "./../../../../../xl1.jpg";
+    private const string OutputImagePath = "./../../../../o.png";
+
+    [Benchmark]
+    public void ImageConversionBenchmark()
+    {
+        using (Image image = Image.Load(InputImagePath))
+        {
+            using (FileStream fs = new FileStream(OutputImagePath, FileMode.Create))
+            {
+                image.Save(fs, new PngEncoder());
+                Console.WriteLine("ImageConversionBenchmark completed");
+            }
+        }
+    }
+\end{lstlisting}
+
+Same logic is used for image conversion, but there were no need for iterations and warm-up phase to be implemented manually. For configuring the \texttt{MemoryDiagnoser} results, \texttt{Orderer(SummaryOrderPolicy.FastestToSlowest)} and \texttt{RankColumn} attributes were used to order the results based on the fastest to slowest execution times and to rank the results in the summary table, respectively to provide a better and clearer view of the results.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={Memory Profiling and Performance Analysis (ImageSharp)}, label={lst:memory-profiling}]    
+[Benchmark]
+public void PixelIterationBenchmark()
+{
+    using (Image<Rgba32> image = Image.Load<Rgba32>(InputImagePath))
+    {
+        int width = image.Width;
+        int height = image.Height;
+
+        for (int y = 0; y < height; y++)
+        {
+            for (int x = 0; x < width; x++)
+            {
+                Rgba32 pixel = image[x, y];
+                byte gray = (byte)((pixel.R + pixel.G + pixel.B) / 3);
+                image[x, y] = new Rgba32(gray, gray, gray, pixel.A);
+            }
+        }
+        Console.WriteLine("PixelIterationBenchmark completed");
+    }
+}
+\end{lstlisting}
+
+The pixel iteration benchmark was implemented in a similar manner, with the same memory diagnostics attributes. The code snippet above demonstrates the pixel iteration benchmark for ImageSharp, where each pixel in the image is converted to grayscale. The memory diagnostics provided by BenchmarkDotNet allowed us to track the memory consumption and garbage collection events during the pixel iteration operation, providing valuable insights into the resource utilization of each library.
+
+This code exemplifies our approach to memory diagnostics. By annotating the benchmark class with \texttt{[MemoryDiagnoser]}, BenchmarkDotNet automatically collects data on memory usage—including the number of garbage collection (GC) events and the total allocated memory during each benchmarked operation. Similar implimentations were done for other libraries as well.
+
+This level of granularity provided insights that went beyond raw timing metrics, revealing, for example, that while Emgu CV might be faster in certain operations, its higher memory consumption could be a concern for applications running on memory-constrained systems.
\ No newline at end of file
diff --git a/sections/Chapter-3-sections/Pixel-Iteration.tex b/sections/Chapter-3-sections/Pixel-Iteration.tex
new file mode 100644
index 0000000000000000000000000000000000000000..75155d9e94c341fdcfef3e1a80d1cb2bc8d9f81d
--- /dev/null
+++ b/sections/Chapter-3-sections/Pixel-Iteration.tex
@@ -0,0 +1,82 @@
+\subsection{Pixel Iteration Benchmark Implementation}
+
+The pixel iteration benchmark is equally critical, as it measures the time taken to perform a basic image processing operation—converting an image to grayscale by iterating over each pixel. This benchmark simulates real-world scenarios where complex filters and effects require individual pixel manipulation.
+
+For ImageSharp, the implementation involves loading the image as an array of pixels, processing each pixel to compute its grayscale value, and then updating the image accordingly. The following snippet provides a glimpse into this process:
+
+\begin{lstlisting}[language={[Sharp]C}, caption={Image conversion benchmark implementation (ImageSharp-Testing.cs)}]
+using SixLabors.ImageSharp;
+using SixLabors.ImageSharp.Formats.Png;
+using SixLabors.ImageSharp.PixelFormats;
+public class PixelIterationBenchmark
+{
+    public static (double warmupTime, double averageTime, double totalTime) RunBenchmark(string imagePath, int iterations)
+    {
+        long totalElapsedMilliseconds = 0;
+        long warmupTime = 0;
+        int warmupIterations = 5;
+        Stopwatch stopwatch = new Stopwatch();
+
+        // Warm-up phase for pixel iteration
+        for (int i = 0; i < warmupIterations; i++)
+        {
+            stopwatch.Reset();
+            stopwatch.Start();
+            using (Image<Rgba32> image = Image.Load<Rgba32>(imagePath))
+            {
+                int width = image.Width;
+                int height = image.Height;
+                for (int y = 0; y < height; y++)
+                {
+                    for (int x = 0; x < width; x++)
+                    {
+                        Rgba32 pixel = image[x, y];
+                        byte gray = (byte)((pixel.R + pixel.G + pixel.B) / 3);
+                        image[x, y] = new Rgba32(gray, gray, gray, pixel.A);
+                    }
+                }
+            }
+            stopwatch.Stop();
+            warmupTime += stopwatch.ElapsedMilliseconds;
+        }
+
+        // Main iterations to measure pixel iteration performance
+        for (int i = 0; i < iterations; i++)
+        {
+            stopwatch.Reset();
+            stopwatch.Start();
+            using (Image<Rgba32> image = Image.Load<Rgba32>(imagePath))
+            {
+                int width = image.Width;
+                int height = image.Height;
+                for (int y = 0; y < height; y++)
+                {
+                    for (int x = 0; x < width; x++)
+                    {
+                        Rgba32 pixel = image[x, y];
+                        byte gray = (byte)((pixel.R + pixel.G + pixel.B) / 3);
+                        image[x, y] = new Rgba32(gray, gray, gray, pixel.A);
+                    }
+                }
+            }
+            stopwatch.Stop();
+            totalElapsedMilliseconds += stopwatch.ElapsedMilliseconds;
+            Console.WriteLine($"Iteration {i + 1}: Pixel iteration took {stopwatch.ElapsedMilliseconds} ms");
+        }
+
+        double averageTime = totalElapsedMilliseconds / (double)iterations;
+        double totalTime = warmupTime + totalElapsedMilliseconds;
+        Console.WriteLine($"Warm-up: {warmupTime} ms, Average: {averageTime} ms, Total: {totalTime} ms");
+
+        return (warmupTime, averageTime, totalTime);
+    }
+}
+\end{lstlisting}
+
+The code measures the performance of a grayscale conversion operation by iterating over each pixel of an image. As in the image conversion, it uses a timer (Stopwatch) and divides the process into two phases: a warm-up phase and a measurement phase. During the warm-up phase, the image is loaded and processed five times. This phase helps stabilize performance by mitigating any startup overheads. Each iteration involves loading the image, iterating over its width and height, reading each pixel, computing the grayscale value by averaging the red, green, and blue channels, and assigning the new grayscale value back while preserving the alpha channel. The use of the \texttt{using} statement ensures that the image is properly disposed after processing.
+
+In the measurement phase, the same processing occurs over a user-specified number of iterations (100 iterations). After running all iterations, the code calculates the average time per iteration and the total time including warm-up. This approach isolates the steady-state performance from any one-time overhead, resulting in more accurate measurements that reflect the true cost of pixel-by-pixel manipulations.
+
+The design emphasizes clear resource management, detailed timing, and separation of initialization costs from the main measurement, which are crucial when every microsecond of processing time matters in image manipulation scenarios.
+
+The main focus of the implementation was to capture the interplay between algorithmic efficiency and system-level resource management. Every pixel operation is executed in a closed loop, and even minor inefficiencies can accumulate over hundreds of iterations. The loop structure is designed and a stopwatch is used to measure elapsed time to matter of attention that should be paid to details during development. Because even in high-level libraries such as ImageSharp, every microsecond counts when processing large images.
\ No newline at end of file
diff --git a/sections/Chapter-3-sections/Result-Export.tex b/sections/Chapter-3-sections/Result-Export.tex
new file mode 100644
index 0000000000000000000000000000000000000000..935131b6f04face99166339d264b52e578960718
--- /dev/null
+++ b/sections/Chapter-3-sections/Result-Export.tex
@@ -0,0 +1,42 @@
+\section{Result Export and Data Aggregation}
+
+Once the performance and memory metrics were collected, the next challenge was to present the results in a coherent and accessible manner. For this purpose, Excel was chosen as the output format due to its widespread adoption and ease of use for further analysis. \texttt{OfficeOpenXml} namespace, which is part of the EPPlus library, allows for the creation and manipulation of Excel files in .NET applications. The ExcelExporter class was implemented to aggregate the benchmark results and export them to an Excel file.
+
+The code snippet below illustrates how the benchmark results are aggregated and exported to an Excel file:
+
+\begin{lstlisting}[language={[Sharp]C}, caption={Result Export and Data Aggregation}, label={lst:result-export}]
+using OfficeOpenXml;
+
+public class ExcelExporter
+{
+    public static void ExportResults(string excelOutputPath, 
+        (double warmupTime, double averageTime, double totalTime) imageConversionResults, 
+        (double warmupTime, double averageTime, double totalTime) pixelIterationResults)
+    {
+        using (var package = new ExcelPackage())
+        {
+            var worksheet = package.Workbook.Worksheets.Add("Benchmark Results");
+            worksheet.Cells[1, 1].Value = "Benchmark";
+            worksheet.Cells[1, 2].Value = "Warm-Up Time (ms)";
+            worksheet.Cells[1, 3].Value = "Average Time (ms)";
+            worksheet.Cells[1, 4].Value = "Total Time (ms)";
+            
+            worksheet.Cells[2, 1].Value = "Image Conversion";
+            worksheet.Cells[2, 2].Value = imageConversionResults.warmupTime;
+            worksheet.Cells[2, 3].Value = imageConversionResults.averageTime;
+            worksheet.Cells[2, 4].Value = imageConversionResults.totalTime;
+            
+            worksheet.Cells[3, 1].Value = "Pixel Iteration";
+            worksheet.Cells[3, 2].Value = pixelIterationResults.warmupTime;
+            worksheet.Cells[3, 3].Value = pixelIterationResults.averageTime;
+            worksheet.Cells[3, 4].Value = pixelIterationResults.totalTime;
+            
+            package.SaveAs(new FileInfo(excelOutputPath));
+        }
+    }
+}
+\end{lstlisting}
+
+The ExcelExporter class creates a structured Excel file with separate sheets for each benchmark operation. The results are organized into columns for the warm-up time, average time, and total time for each operation. The resulting Excel file provides a clear and concise summary of the benchmark results, making it easy to compare the performance and memory characteristics of each library.
+
+By automating the process of result aggregation, the framework not only saves time but also minimizes the risk of manual errors. Each cell in the generated Excel file is carefully populated with benchmark data, and the resulting spreadsheet can be easily imported into analytical tools for further exploration. This process of exporting results serves as a bridge between the raw performance data and the actionable insights that drive decision-making in software optimization.
diff --git a/sections/Chapter-3-sections/System-Architecture.tex b/sections/Chapter-3-sections/System-Architecture.tex
new file mode 100644
index 0000000000000000000000000000000000000000..da12af1d682bc60fad8588d9b6b218c166984cbb
--- /dev/null
+++ b/sections/Chapter-3-sections/System-Architecture.tex
@@ -0,0 +1,58 @@
+\section{System Architecture and Design Rationale}
+
+The design of our benchmarking framework was guided by the need for consistency, repeatability, and scientific severity. The system was architected to support multiple libraries through a common interface, ensuring that each library’s performance could be measured under identical conditions. At the core of our design was a two‐phase benchmarking process: an initial warm-up phase to account for any initialization overhead, followed by a main test phase where the actual performance metrics were recorded.
+
+In constructing the system, several important decisions were made. First, we employed a modular approach, separating the benchmarking routines into distinct components. This allowed us to encapsulate the logic for image conversion and pixel iteration into separate classes, each responsible for executing a series of timed iterations and logging the results.  
+
+\begin{lstlisting}[language={[Sharp]C}, caption={Design of the benchmarking framework}]
+    public class ImageConversionBenchmark{
+        
+        // Benchmarking logic for image conversion
+    }
+    public class PixelIterationBenchmark{
+
+        // Benchmarking logic for pixel iteration
+    }
+\end{lstlisting}
+
+The architecture also included a dedicated component for result aggregation, which exported data into an Excel file using EPPlus, thereby facilitating further analysis and visualization.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={Design of the benchmarking framework}]
+    using OfficeOpenXml;
+
+    public class ExcelExporter{
+        
+        // Logic for exporting benchmark results to an Excel sheet in a structured format
+    }
+\end{lstlisting}
+
+An essential aspect of the design was the uniformity of testing. Despite the differences in methods of implementation among the libraries, the benchmarking framework was designed to abstract away these differences. Each library was integrated by implementing the same sequence of operations: reading an image from disk, processing the image (either converting its format or iterating over its pixels to apply a grayscale filter), and finally saving the processed image back to disk. This uniform methodology ensured that our performance comparisons were both fair and reproducible.
+
+The architecture also accounted for system-level factors such as memory management and garbage collection. For instance, in languages like C\#, where unmanaged resources must be explicitly disposed of, the design included rigorous cleanup routines to ensure that each iteration began with a clean slate. This attention to detail was crucial in obtaining accurate measurements, as any residual state from previous iterations could skew the results.
+
+\begin{lstlisting}[language={[Sharp]C}, caption={Design of the benchmarking framework}]
+    using BenchmarkDotNet.Attributes;
+    using BenchmarkDotNet.Running;
+
+    class Program
+    {
+        static void Main(string[] args)
+        {
+            BenchmarkRunner.Run<Benchmarks>();
+        }
+    }
+
+    [MemoryDiagnoser]
+    public class Benchmarks{
+        
+        [Benchmark]
+        public void ImageConversionBenchmark(){
+            // Image conversion logic
+        }
+
+        [Benchmark]
+        public void PixelIterationBenchmark(){
+            // Pixel iteration logic
+        }
+    }
+\end{lstlisting}
diff --git a/sections/Chapter-4-sections/Analysis_and_Interpretation_of_Results.tex b/sections/Chapter-4-sections/Analysis_and_Interpretation_of_Results.tex
new file mode 100644
index 0000000000000000000000000000000000000000..d893d9cf32c6401530449c375c702d461bf8dc57
--- /dev/null
+++ b/sections/Chapter-4-sections/Analysis_and_Interpretation_of_Results.tex
@@ -0,0 +1,47 @@
+\section{Analysis and Interpretation of Results}
+
+As the final benchmarking results were collected and plotted, the emerging trends provided critical insights into the efficiency of various image processing libraries. The raw numerical data from our benchmarking suite provided an answer to the research question, but a deeper interpretation of these results allowed us to refine our understanding of the trade-offs and strengths of each alternative. This section explores the relationship between speed and memory usage, compares the empirical findings with theoretical expectations, and discusses the implications for real-world applications.
+
+\subsection{Comparison of Performance Trends}
+
+The performance hierarchy observed in the benchmarking results closely aligns with expectations based on each library’s internal architecture. Libraries such as OpenCvSharp and Emgu CV, both built upon OpenCV’s optimized C++ backend, showcased superior execution times for image conversion tasks. This efficiency is largely attributed to OpenCV’s reliance on low-level SIMD (Single Instruction, Multiple Data) optimizations and hardware-accelerated processing paths.
+
+Conversely, ImageSharp—despite its clean API and pure C\# implementation—demonstrated significantly higher processing times, reinforcing the general principle that managed code introduces overhead compared to native libraries. While ImageSharp remains a viable option for applications prioritizing ease of use and portability over raw performance, the performance disparity is undeniable.
+
+Magick.NET, though powerful and highly flexible in terms of format support, performed noticeably worse in pixel iteration tasks. This result was somewhat anticipated due to the internal structure of ImageMagick, which prioritizes format conversions and high-quality rendering over raw pixel access speed. The excessive processing times observed in the Magick.NET pixel iteration benchmark further support the hypothesis that it is not optimized for this type of operation.
+
+The trends in memory consumption were particularly revealing. While OpenCvSharp + SkiaSharp exhibited minimal memory allocation, Emgu CV+Structure.Sketching, despite its processing speed, required substantially higher memory overhead. This observation is consistent with Emgu CV’s underlying OpenCV core, which relies on large temporary buffers and matrix structures for intermediate computations. In contrast, ImageSharp demonstrated exceptional memory efficiency during pixel iteration but was significantly slower, suggesting that its architecture prioritizes memory conservation over execution speed.
+
+\subsection{Trade-Offs Between Speed and Memory Usage}
+
+The relationship between speed and memory consumption is a recurring theme in performance optimization. Our results underscore that achieving optimal speed often comes at the cost of increased memory usage. Emgu CV+Structure.Sketching exemplifies this trade-off: while its pixel iteration speed was among the best recorded, it consumed significantly more RAM than ImageSharp.
+
+The implications of these trade-offs depend heavily on the intended application. For environments where processing speed is paramount—such as real-time video processing or AI-powered image enhancement—Emgu CV’s increased memory footprint may be an acceptable compromise. However, in resource-constrained applications (e.g., embedded systems, mobile devices, or cloud-based deployments with strict memory limits), a lower-memory alternative like ImageSharp may be more suitable despite its lower speed.
+
+\setlength{\columnWidth}{0.10\textwidth}
+\begin{longtable}{|>{\raggedright\arraybackslash}p{0.26\textwidth}|>{\raggedright\arraybackslash}p{0.43\textwidth}|>{\raggedright\arraybackslash}p{\columnWidth}|>{\raggedright\arraybackslash}p{\columnWidth}|}
+\hline
+\rowcolor{purple!30}
+\textbf{Library} & \textbf{Task} & \textbf{Speed} & \textbf{Memory Usage} \\
+\hline
+\endfirsthead
+
+\hline
+\rowcolor{purple!30}
+\textbf{Library} & \textbf{Task} & \textbf{Speed} & \textbf{Memory Usage} \\
+\hline
+\endhead
+
+\multirow{2}{*}{\textbf{ImageSharp}} & Image Conversion & Slow & Low \\
+\cline{2-4}
+ & Pixel Iteration & Fast & Low \\
+\hline
+\multirow{2}{*}{\shortstack{\textbf{Emgu CV + SkiaSharp}}} & Image Conversion (SkiaSharp) & Fast & Low \\
+\cline{2-4}
+ & Pixel Iteration (Emgu CV) & Fast & High \\
+\hline
+\caption{Speed vs. Memory Usage Trade-Offs}
+\label{tab:speed-memory-trade-offs}
+\end{longtable}
+
+One particularly interesting finding was that OpenCvSharp+SkiaSharp consistently delivered both high speed and low memory usage for image conversion. This anomaly suggests that this combination strikes an optimal balance, leveraging OpenCV’s native optimizations while maintaining a lightweight footprint in memory. The fact that this hybrid approach outperformed even standalone OpenCV libraries further supports the notion that combining high-performance native libraries with efficient rendering engines can yield superior results.
\ No newline at end of file
diff --git a/sections/Chapter-4-sections/Image_conversion_benchmark_results.tex b/sections/Chapter-4-sections/Image_conversion_benchmark_results.tex
new file mode 100644
index 0000000000000000000000000000000000000000..a8d3df3d0b772f133d6691eece89dbb25e52b6c8
--- /dev/null
+++ b/sections/Chapter-4-sections/Image_conversion_benchmark_results.tex
@@ -0,0 +1,42 @@
+\section{Image Conversion Benchmark Results}
+
+The image conversion benchmark was performed using ImageSharp and Magick.NET as well as SkiaSharp and Structure.Sketching which were the chosen libraries in their combinations with OpenCvSharp and Emgu CV, respectively for the conversion task. Using the same 4k resolution image, the benchmark measured the time taken to convert the image from JPEG to PNG format. Comparing the results of these libraries provides insights into their performance and efficiency in application scenarios where rapid image conversion is required—such as real-time image processing pipelines or high-volume batch processing environments. The data thus answer one of our central question to which library can provide significantly faster image conversion, thereby supporting the hypothesis discussed in earlier chapters.
+
+ImageSharp recorded an average conversion time of approximately 2,754 milliseconds. In contrast, the combination of OpenCvSharp with SkiaSharp delivered an average conversion time of only 539 milliseconds. Similarly, Emgu CV integrated with Structure.Sketching achieved an average time of 490 milliseconds, while Magick.NET paired registered an average conversion time of 4,333 milliseconds.
+
+\newlength{\columnWidth}
+\setlength{\columnWidth}{0.19\textwidth}
+\vspace{0.5cm}
+\begin{longtable}{|>{\raggedright\arraybackslash}p{0.40\textwidth}|>{\raggedright\arraybackslash}p{\columnWidth}|>{\raggedright\arraybackslash}p{\columnWidth}|>{\raggedright\arraybackslash}p{\columnWidth}|>{\raggedright\arraybackslash}p{\columnWidth}|}
+\hline
+\rowcolor{purple!30}
+\textbf{Library} & \textbf{Warm-Up Time (ms)} & \textbf{Avg. Time Excl. Warm-Up (ms)} & \textbf{Total Time Incl. Warm-Up (ms)} \\
+\hline
+\endfirsthead
+
+\hline
+\rowcolor{purple!30}
+\textbf{Library} & \textbf{Warm-Up Time (ms)} & \textbf{Avg. Time Excl. Warm-Up (ms)} & \textbf{Total Time Incl. Warm-Up (ms)} \\
+\hline
+\endhead
+
+\textbf{ImageSharp} & 2754 & 480.86 & 50840 \\\hline
+\textbf{OpenCvSharp + SkiaSharp} & 539 & 100.31 & 10570 \\\hline
+\textbf{Magick.NET} & 4333 & 845.46 & 88879 \\\hline
+\textbf{Emgu CV + Structure.Sketching} & 490 & 59.43 & 6433 \\\hline
+
+\caption{Image Conversion Benchmark Results}
+\label{tab:image-conversion-results}
+\end{longtable}
+
+The table above, is the final dataset that been constructed by merging multiple Excel files produced by the framework described in the Implementation chapter. These results shows lightweight libraries such as SkiaSharp and Structure.Sketching outperforming ImageSharp and Magick.NET in terms of image conversion time. The data also reveals that Emgu CV with Structure.Sketching is the most efficient combination for image conversion, with the lowest average time of 490 milliseconds. but on the other hand, ImageSharp and Magick.NET are significantly slower, with average times of 2,754 and 4,333 milliseconds, respectively.
+
+
+\includegraphics[width=5in]{media/log_1.png}
+\captionof{figure}{Performance Comparison - Image Conversion (log scale)}
+\label{fig:image-conversion}
+\vspace{0.5cm}
+
+
+To visually encapsulate these findings, The graph illustrate the conversion times across the tested libraries, which clearly shows that the conversion times for OpenCvSharp+SkiaSharp and Emgu CV+Structure.Sketching are clustered at the lower end of the spectrum, while ImageSharp’s results are significantly higher. This visual evidence reinforces the numerical data and provides an immediate, intuitive understanding of the performance differences. And the log scale was used to better represent the data, as the differences between total time which is the sum of warm-up and average time, are significant. This three color graph can aid in comparing the performance of the libraries in different scenarios, such as real-time image processing or batch conversion tasks in one glance.
+
diff --git a/sections/Chapter-4-sections/Memory_benchmark_results.tex b/sections/Chapter-4-sections/Memory_benchmark_results.tex
new file mode 100644
index 0000000000000000000000000000000000000000..55d967a32becc9b05daadd78a9922d284066d016
--- /dev/null
+++ b/sections/Chapter-4-sections/Memory_benchmark_results.tex
@@ -0,0 +1,62 @@
+\section{Memory Benchmarking Results}
+
+In parallel with the time benchmarks, memory consumption was a critical parameter in our evaluation. For the image conversion tasks, SkiaSharp, as part of the OpenCvSharp+SkiaSharp configuration, exhibited the lowest memory allocation, with values approximating 58 KB. ImageSharp, in comparison, required about 5.67 MB, which is substantially higher. In the context of pixel iteration, the memory profiles were similarly divergent. ImageSharp was extremely efficient in this regard, consuming roughly 20 KB on average, whereas Emgu CV + Structure.Sketching, despite its fast processing times, utilized around 170 MB of memory.
+
+\setlength{\columnWidth}{0.22\textwidth}
+\begin{longtable}{|>{\raggedright\arraybackslash}p{0.20\textwidth}|>{\raggedright\arraybackslash}p{\columnWidth}|>{\raggedright\arraybackslash}p{\columnWidth}|>{\raggedright\arraybackslash}p{\columnWidth}|>{\raggedright\arraybackslash}p{\columnWidth}|}
+
+\hline
+\rowcolor{purple!30}
+\textbf{Library} & \textbf{Allocated Memory} & \textbf{Gen0/Gen1/Gen2 Collections} \\
+\hline
+\endfirsthead
+
+\hline
+\rowcolor{purple!30}
+\textbf{Library} & \textbf{Allocated Memory} & \textbf{Gen0/Gen1/Gen2 Collections} \\
+\hline
+\endhead
+
+\textbf{EmguCV} & 0.00068 MB (712 bytes) & - / - / - \\
+\hline
+\textbf{ImageSharp} & 5.67 MB (5,805.41 KB) & 1,000 / 1,000 / 1,000 \\
+\hline
+\textbf{SkiaSharp} & 0.05612 MB (58,864 bytes) & - / - / - \\
+\hline
+
+\caption{Memory Benchmarking Results for Image Conversion}
+\label{tab:memory-results-image-conversion}
+\end{longtable}
+
+The table above summarizes the memory benchmarking results for image conversion. It is evident that ImageSharp has the highest memory allocation, with approximately 5.67 MB, while SkiaSharp has the lowest, with only 58 KB. Emgu CV falls in between, with a memory allocation of 0.00068 MB. These figures provide a clear indication of the memory efficiency of each library for image conversion tasks.\\
+
+The large memory footprint of Emgu CV during pixel iteration is a noteworthy trade-off. While its performance in terms of speed is excellent, the high memory consumption must be considered when deploying the solution in memory-constrained environments. The benchmarking data collected here is critical because it provides a balanced view—speed alone does not define an optimal library, but rather the ratio of processing time to memory usage does. For a clear summary of these findings, the below table provides a concise overview of the memory metrics for each library configuration.
+
+
+\begin{longtable}{|>{\raggedright\arraybackslash}p{0.20\textwidth}|>{\raggedright\arraybackslash}p{\columnWidth}|>{\raggedright\arraybackslash}p{\columnWidth}|>{\raggedright\arraybackslash}p{\columnWidth}|>{\raggedright\arraybackslash}p{\columnWidth}|}
+
+\hline
+\rowcolor{purple!30}
+\textbf{Library} & \textbf{Allocated Memory} & \textbf{Gen0/Gen1/Gen2 Collections} \\
+\hline
+\endfirsthead
+
+\hline
+\rowcolor{purple!30}
+\textbf{Library} & \textbf{Allocated Memory} & \textbf{Gen0/Gen1/Gen2 Collections} \\
+\hline
+\endhead
+
+\textbf{EmguCV} & 170.00 MB (177,976,185 bytes) & 33,142 / 1,571 / 1,571 \\
+\hline
+\textbf{ImageSharp} & 0.01932 MB (20.26 KB) & - / - / - \\
+\hline
+\textbf{SkiaSharp} & 384.00 MB (403,300,552 bytes) & 85 / - / - \\
+\hline
+
+\caption{Memory Benchmarking Results for Pixel Iteration}
+\label{tab:memory-results-pixel-iteration}
+\end{longtable}
+
+
+The table indicates that while Emgu CV+Structure.Sketching is extremely fast for pixel iteration, its memory consumption is substantially higher compared to ImageSharp and the OpenCvSharp+SkiaSharp combination. Emgu CV has the highest memory allocation, with approximately 170 MB, while ImageSharp has the lowest, with only 20 KB. SkiaSharp falls in between, with a memory allocation of 384 MB. These figures provide a clear indication of the memory efficiency of each library for pixel iteration tasks. Such data are instrumental in shaping our final recommendation.
diff --git a/sections/Chapter-4-sections/Pixel_iteration_benchmark_results.tex b/sections/Chapter-4-sections/Pixel_iteration_benchmark_results.tex
new file mode 100644
index 0000000000000000000000000000000000000000..f5ca0c68b9a63a8de55a76d3a5a04582b8b70ad2
--- /dev/null
+++ b/sections/Chapter-4-sections/Pixel_iteration_benchmark_results.tex
@@ -0,0 +1,39 @@
+\section{Pixel Iteration Benchmark Results}
+
+On the other hand, the pixel iteration benchmark aimed to assess the libraries’ abilities to process each pixel of an image. For ImageSharp, the warm-up phase for pixel iteration took an average of 755 milliseconds, with the main iteration averaging 117.06 milliseconds per cycle and a cumulative total of 12,461 milliseconds over 100 iterations. The performance landscape changed when we observed the results for Magick.NET. This configuration recorded a warm-up time of approximately 12,149 milliseconds, and the main iterations averaged 2,054.18 milliseconds, resulting in an astronomical total of 217,567 milliseconds. 
+
+As discussed earlier, OpenCvSharp and Emgu CV were the chosen libraries in their combinations with SkiaSharp and Structure.Sketching, respectively for the pixel iteration task. The results of these tests provide insights into the performance of these libraries in scenarios where pixel-level operations are required, such as image processing algorithms or computer vision applications. The performance landscape changed when we observed the results for OpenCvSharp. This configuration recorded a warm-up time of approximately 813 milliseconds, and the main iterations averaged 159.44 milliseconds, resulting in a total of 16,757 milliseconds. In contrast, Emgu CV delivered impressive results with a warm-up time of 1,118 milliseconds and an average main iteration time of 118.87 milliseconds, culminating in a total of 13,005 milliseconds.
+
+
+\begin{longtable}{|>{\raggedright\arraybackslash}p{0.40\textwidth}|>{\raggedright\arraybackslash}p{\columnWidth}|>{\raggedright\arraybackslash}p{\columnWidth}|>{\raggedright\arraybackslash}p{\columnWidth}|>{\raggedright\arraybackslash}p{\columnWidth}|}
+\hline
+\rowcolor{purple!30}
+\textbf{Library} & \textbf{Warm-Up Time (ms)} & \textbf{Avg. Time Excl. Warm-Up (ms)} & \textbf{Total Time Incl. Warm-Up (ms)} \\
+\hline
+\endfirsthead
+
+\hline
+\rowcolor{purple!30}
+\textbf{Library} & \textbf{Warm-Up Time (ms)} & \textbf{Avg. Time Excl. Warm-Up (ms)} & \textbf{Total Time Incl. Warm-Up (ms)} \\
+\hline
+\endhead
+
+\textbf{ImageSharp} & 755 & 117.06 & 12461 \\\hline
+\textbf{OpenCvSharp + SkiaSharp} & 813 & 159.44 & 16757 \\\hline
+\textbf{Magick.NET} & 12149 & 2054.18 & 217567 \\\hline
+\textbf{Emgu CV + Structure.Sketching} & 1118 & 118.87 & 13005 \\\hline
+
+\caption{Pixel Iteration Benchmark Results}
+\end{longtable}
+
+The table above summarizes the pixel iteration benchmark results, highlighting the warm-up and average times for each library combination. The data clearly show that Emgu CV is the most efficient library for pixel iteration, with the lowest average time of 118.87 milliseconds. ImageSharp and OpenCvSharp follow closely behind, with average times of 117.06 and 159.44 milliseconds, respectively. In contrast, Magick.NET is significantly slower, with an average time of 2,054.18 milliseconds.
+
+Graphical depictions further highlight these performance differences. 
+
+\includegraphics[width=5in]{media/log_2.png}
+\captionof{figure}{Pixel Iteration Benchmark Results}
+\label{fig:pixel-iteration}
+
+The disparity between these figures is telling. While Magick.NET excels in some aspects of image conversion, it appears less suited for tasks involving pixel-by-pixel iteration, given the significantly higher processing times. On the other hand, Emgu CV and ImageSharp produce comparable main iteration times; however, when considering the overall picture, the lower cumulative times of Emgu CV make it a more appealing choice for pixel-level operations. 
+
+The visual comparisons elucidate that while ImageSharp and Emgu CV+Structure.Sketching are closely matched in main iteration performance, the excessive warm-up and overall times associated with Magick.NET underscore its limitations for this specific task.
diff --git a/sections/Chapter-4-sections/Summary.tex b/sections/Chapter-4-sections/Summary.tex
new file mode 100644
index 0000000000000000000000000000000000000000..80e818b10c9507ceadd34526a5b227b5a6f94187
--- /dev/null
+++ b/sections/Chapter-4-sections/Summary.tex
@@ -0,0 +1,3 @@
+\section{Summary}
+
+The benchmarking results provide a comprehensive overview of the performance and efficiency of the image processing libraries tested. The data clearly show that Emgu CV + Structure.Sketching is the most efficient combination for image conversion, with the lowest average time of 490 milliseconds. In contrast, ImageSharp and Magick.NET are significantly slower, with average times of 2,754 and 4,333 milliseconds, respectively. For pixel iteration, Emgu CV+Structure.Sketching is again the most efficient, with the lowest average time of 118.87 milliseconds. ImageSharp and OpenCvSharp+SkiaSharp follow closely behind, with average times of 117.06 and 159.44 milliseconds, respectively. In contrast, Magick.NET is significantly slower, with an average time of 2,054.18 milliseconds. The memory benchmarking results further highlight the efficiency of ImageSharp and SkiaSharp in terms of memory consumption, with Emgu CV exhibiting higher memory usage. These findings provide valuable insights into the performance characteristics of each library and will inform our final recommendations for image processing tasks.
\ No newline at end of file
diff --git a/sources/Abstract.tex b/sources/Abstract.tex
index 2f79f27bb74f318a7ca0ff0539d89711cdbe6a10..c1a9d53d4cea94afa68c24c9a6d58940dc3a95c1 100755
--- a/sources/Abstract.tex
+++ b/sources/Abstract.tex
@@ -1,13 +1,9 @@
 \chapter*{Abstract}
 \addcontentsline{toc}{chapter}{Abstract}
-This thesis presents a comprehensive evaluation of alternatives to ImageSharp for image processing in software applications. ImageSharp, though powerful and widely used, comes with an annual licensing cost of 5,000\$, significantly affecting project budgets. The primary goal of this research is to explore more cost-effective, performance-oriented alternatives that can either replace or complement ImageSharp while meeting the application’s image processing requirements.
+Comparing different image processing libraries is an important job that has to be done to find a solution which is on the one hand cost-effective and on the other hand high-performing that would be appropriate for the industrial and software applications. Automation, quality control, medical imaging, and real-time data analysis are the use cases involving digital image processing, and it requires the libraries that can give both efficiency and elasticity. Whether they are open-source or proprietary, the libraries such as OpenCV, SkiaSharp, Magick.NET, Emgu CV, and OpenCvSharp give a wide spectrum of functionalities, from simple image manipulations to complex computer vision problems.
 
-The study begins by identifying the core functionalities currently supported by ImageSharp, such as image loading, creation, manipulation, pixel access, resizing, format conversion, and image composition. These functions are essential in tasks like image transformation, cropping, resampling, and metadata management. Performance metrics were established, focusing on key operations like image conversion and pixel iteration. Several alternative libraries were then evaluated based on their ability to meet these functional and performance criteria.
+The choice of a graphics programming library that is applicable to a given task is primarily influenced by several factors (the first of which is) the computational efficiency used in the algorithms, as well as licensing, and integration concepts as well. Some libraries take into account the speed and flexibility and thus cater to performance-critical applications whereas others are more centered on the ease of use and support for different platforms. To test the performance of libraries, you will be needed to measure their run-time, memory usage, and requirements for different environments, including embedded systems, desktop applications, and cloud-based platforms.
 
-The alternatives investigated include Emgu CV, SkiaSharp, Magick.NET, OpenCvSharp, and others, all of which were assessed for their support of advanced image processing features, licensing costs, integration effort, and community support. Each library was tested for specific capabilities such as pixel manipulation, image format support, encoding efficiency, and rendering performance. Benchmarks were conducted to measure execution times for image conversion and pixel iteration across these libraries, providing insight into their real-world performance.
+A key consideration in the industrial context is the trade-off between the processing power and the stability in the setting of the operation. On the one hand, with the help of the high-performance computers, the image transformation can be done in a flash and the real-time analysis can be done but, on the other hand, embedded systems and industrial controllers always set limits that make the execution slow. It is crucial to make the necessary trade-offs between these three in the case of such companies, which approach the issue of image processing optimization by factoring in reliability and cost efficiency.
 
-Among the evaluated libraries, the combination of Emgu CV and SkiaSharp was identified as the most suitable alternative. Emgu CV, based on the powerful OpenCV library, excels in high-performance image processing tasks, including pixel-level manipulation, resizing, and format conversion. SkiaSharp, on the other hand, complements Emgu CV by providing efficient 2D graphics rendering, image creation, and layer composition. Together, these libraries offer a cost-effective solution that maintains high performance while supporting the full range of image processing functionalities required by the application.
-
-Benchmarking results showed that the Emgu CV and SkiaSharp combination significantly reduced processing times for common tasks compared to other alternatives. For example, image conversion times decreased to 490 ms, compared to 2754 ms for ImageSharp. Additionally, pixel iteration tasks were completed more efficiently, making this combination an optimal choice for scenarios requiring both image processing and rendering.
-
-In conclusion, the Emgu CV and SkiaSharp combination was selected as the best alternative to ImageSharp, based on its balance of performance, functionality, ease of integration, and cost. This decision ensures that the project can maintain its image processing capabilities without incurring high licensing fees, while also benefiting from enhanced performance and flexibility.
+The research report is a comprehensive analysis of the image processing libraries. During the course of the project, the main strengths, weaknesses, as well as the best-use scenarios are seen. The results compile efficiency measurements, integration difficulties, and cost-related issues, offering a practical guide not only for software developers but also for hardware engineers and decision-makers who would like to principally survive the expanding market of image processing solutions in any type of industry across the world.
\ No newline at end of file
diff --git a/sources/references.bib b/sources/references.bib
new file mode 100644
index 0000000000000000000000000000000000000000..045f0d8beaade2eb17855fafb5edda8cc6565ecd
--- /dev/null
+++ b/sources/references.bib
@@ -0,0 +1,299 @@
+@article{ferreira_generic_2024,
+	title = {Generic {FPGA} {Pre}-{Processing} {Image} {Library} for {Industrial} {Vision} {Systems}},
+	volume = {24},
+	issn = {1424-8220},
+	doi = {10.3390/s24186101},
+	abstract = {Currently, there is a demand for an increase in the diversity and quality of new products reaching the consumer market. This fact imposes new challenges for different industrial sectors, including processes that integrate machine vision. Hardware acceleration and improvements in processing efficiency are becoming crucial for vision-based algorithms to follow the complexity growth of future industrial systems. This article presents a generic library of pre-processing filters for execution in field-programmable gate arrays (FPGAs) to reduce the overall image processing time in vision systems. An experimental setup based on the Zybo Z7 Pcam 5C Demo project was developed and used to validate the filters described in VHDL (VHSIC hardware description language). Finally, a comparison of the execution times using GPU and CPU platforms was performed as well as an evaluation of the integration of the current work in an industrial application. The results showed a decrease in the pre-processing time from milliseconds to nanoseconds when using FPGAs.},
+	language = {eng},
+	number = {18},
+	journal = {Sensors (Basel, Switzerland)},
+	author = {Ferreira, Diogo and Moutinho, Filipe and Matos-Carvalho, João P. and Guedes, Magno and Deusdado, Pedro},
+	month = sep,
+	year = {2024},
+	pmid = {39338846},
+	pmcid = {PMC11436133},
+	keywords = {FPGA, GPU, industrial vision systems, pre-processing image library},
+	pages = {6101},
+	file = {Full Text PDF:C\:\\Users\\SFI19\\Zotero\\storage\\ZJ9KMZND\\Ferreira et al. - 2024 - Generic FPGA Pre-Processing Image Library for Industrial Vision Systems.pdf:application/pdf},
+}
+
+@article{vieira_performance_2024,
+	title = {Performance {Evaluation} of {Computer} {Vision} {Algorithms} in a {Programmable} {Logic} {Controller}: {An} {Industrial} {Case} {Study}},
+	volume = {24},
+	copyright = {http://creativecommons.org/licenses/by/3.0/},
+	issn = {1424-8220},
+	shorttitle = {Performance {Evaluation} of {Computer} {Vision} {Algorithms} in a {Programmable} {Logic} {Controller}},
+	url = {https://www.mdpi.com/1424-8220/24/3/843},
+	doi = {10.3390/s24030843},
+	abstract = {This work evaluates the use of a programmable logic controller (PLC) from Phoenix Contact’s PLCnext ecosystem as an image processing platform. PLCnext controllers provide the functions of “classical” industrial controllers, but they are based on the Linux operating system, also allowing for the use of software tools usually associated with computers. Visual processing applications in the Python programming language using the OpenCV library are implemented in the PLC using this feature. This research is focused on evaluating the use of this PLC as an image processing platform, particularly for industrial machine vision applications. The methodology is based on comparing the PLC’s performance against a computer using standard image processing algorithms. In addition, a demonstration application based on a real-world scenario for quality control by visual inspection is presented. It is concluded that despite significant limitations in processing power, the simultaneous use of the PLC as an industrial controller and image processing platform is feasible for applications of low complexity and undemanding cycle times, providing valuable insights and benchmarks for the scientific community interested in the convergence of industrial automation and computer vision technologies.},
+	language = {en},
+	number = {3},
+	urldate = {2025-02-16},
+	journal = {Sensors},
+	author = {Vieira, Rodrigo and Silva, Dino and Ribeiro, Eliseu and Perdigoto, Luís and Coelho, Paulo Jorge},
+	month = jan,
+	year = {2024},
+	note = {Number: 3
+Publisher: Multidisciplinary Digital Publishing Institute},
+	keywords = {computer vision, OpenCV, performance benchmark, programmable logic controllers},
+	pages = {843},
+	file = {Full Text PDF:C\:\\Users\\SFI19\\Zotero\\storage\\QXKXLRJL\\Vieira et al. - 2024 - Performance Evaluation of Computer Vision Algorithms in a Programmable Logic Controller An Industri.pdf:application/pdf},
+}
+
+@article{ciora_industrial_2014,
+	title = {Industrial {Applications} of {Image} {Processing}},
+	volume = {64},
+	copyright = {http://creativecommons.org/licenses/by-nc-nd/3.0/},
+	issn = {1583-7149},
+	url = {https://www.sciendo.com/article/10.2478/aucts-2014-0004},
+	doi = {10.2478/aucts-2014-0004},
+	abstract = {Abstract
+            The recent advances in sensors quality and processing power provide us with excellent tools for designing more complex image processing and pattern recognition tasks. In this paper we review the existing applications of image processing and pattern recognition in industrial engineering. First we define the role of vision in an industrial. Then a dissemination of some image processing techniques, feature extraction, object recognition and industrial robotic guidance is presented. Moreover, examples of implementations of such techniques in industry are presented. Such implementations include automated visual inspection, process control, part identification, robots control. Finally, we present some conclusions regarding the investigated topics and directions for future investigation},
+	language = {en},
+	number = {1},
+	urldate = {2025-02-16},
+	journal = {ACTA Universitatis Cibiniensis},
+	author = {Ciora, Radu Adrian and Simion, Carmen Mihaela},
+	month = nov,
+	year = {2014},
+	pages = {17--21},
+	annote = {[TLDR] This paper defines the role of vision in an industrial, a dissemination of some image processing techniques, feature extraction, object recognition and industrial robotic guidance, and examples of implementations of such techniques in industry.},
+	file = {Full Text:C\:\\Users\\SFI19\\Zotero\\storage\\ZM5USF5C\\Ciora and Simion - 2014 - Industrial Applications of Image Processing.pdf:application/pdf},
+}
+
+@inproceedings{kulpa_universal_1981,
+	address = {Berlin, Heidelberg},
+	title = {Universal digital image processing systems in europe — {A} comparative survey},
+	isbn = {978-3-540-38665-0},
+	doi = {10.1007/3-540-10705-3_1},
+	abstract = {In the paper, a selected group of eleven universal (computer based) image processing systems is surveyed and compared. They constitute a seemingly representative sample of the vast variety of such systems built in the last decade in European countries. The survey covers systems built for research purposes, either in image processing as such or for some other specific problem area, as well as more practically-oriented ones, including a commercially available routine picture analyzer. An overall classification of their general aims as well as basic parameters and features of their hardware structure, software support and application area is given.},
+	language = {en},
+	booktitle = {Digital {Image} {Processing} {Systems}},
+	publisher = {Springer},
+	author = {Kulpa, Zenon},
+	editor = {Bloc, Leonard and Kulpa, Zenon},
+	year = {1981},
+	keywords = {Digital Holography, Image Memory, Image Processing System, Image Processor, Picture Processing},
+	pages = {1--20},
+	file = {Full Text PDF:C\:\\Users\\SFI19\\Zotero\\storage\\3FV26UT6\\Kulpa - 1981 - Universal digital image processing systems in europe — A comparative survey.pdf:application/pdf},
+}
+
+@misc{ma_new_2024,
+	title = {A {New} {Image} {Quality} {Database} for {Multiple} {Industrial} {Processes}},
+	url = {http://arxiv.org/abs/2401.13956},
+	doi = {10.48550/arXiv.2401.13956},
+	abstract = {Recent years have witnessed a broader range of applications of image processing technologies in multiple industrial processes, such as smoke detection, security monitoring, and workpiece inspection. Different kinds of distortion types and levels must be introduced into an image during the processes of acquisition, compression, transmission, storage, and display, which might heavily degrade the image quality and thus strongly reduce the final display effect and clarity. To verify the reliability of existing image quality assessment methods, we establish a new industrial process image database (IPID), which contains 3000 distorted images generated by applying different levels of distortion types to each of the 50 source images. We conduct the subjective test on the aforementioned 3000 images to collect their subjective quality ratings in a well-suited laboratory environment. Finally, we perform comparison experiments on IPID database to investigate the performance of some objective image quality assessment algorithms. The experimental results show that the state-of-the-art image quality assessment methods have difficulty in predicting the quality of images that contain multiple distortion types.},
+	urldate = {2025-02-16},
+	publisher = {arXiv},
+	author = {Ma, Xuanchao and Jiang, Yanlin and Liu, Hongyan and Zhou, Chengxu and Gu, Ke},
+	month = feb,
+	year = {2024},
+	note = {arXiv:2401.13956 [cs]},
+	keywords = {Computer Science - Computer Vision and Pattern Recognition},
+	file = {Preprint PDF:C\:\\Users\\SFI19\\Zotero\\storage\\BBGNJSRM\\Ma et al. - 2024 - A New Image Quality Database for Multiple Industrial Processes.pdf:application/pdf;Snapshot:C\:\\Users\\SFI19\\Zotero\\storage\\TRN3MJPN\\2401.html:text/html},
+}
+
+@article{chisholm_fpga-based_2020,
+	title = {{FPGA}-{Based} {Design} for {Real}-{Time} {Crack} {Detection} {Based} on {Particle} {Filter}},
+	volume = {16},
+	issn = {1941-0050},
+	url = {https://ieeexplore.ieee.org/document/8888239},
+	doi = {10.1109/TII.2019.2950255},
+	abstract = {Due to the related hazards, costly down-time, and detection inconsistencies associated with manual visual inspection for cracks in structures, there has been an emergence of real-time systems capable of conducting inspections. Advanced robotic systems have been used for scanning structures located in remote areas or that pose significant hazards to personnel. However, due to their inherent resource limitations, the current solution is to transfer all applicable sensor data to a ground station where detection will occur at a later time, thereby preventing real-time decision based on the results. To allow on-board decision making, in this article, a crack detection particle filter is optimized for parallel computation and implemented onto an Field-programmable gate array (FPGA). This article shows that an FPGA holds distinct tradeoffs between computational speed, energy consumption, and physical footprint compared to that of traditional CPU designs, allowing for it to be an ideal system for autonomous applications.},
+	number = {9},
+	urldate = {2025-02-16},
+	journal = {IEEE Transactions on Industrial Informatics},
+	author = {Chisholm, Tim and Lins, Romulo and Givigi, Sidney},
+	month = sep,
+	year = {2020},
+	note = {Conference Name: IEEE Transactions on Industrial Informatics},
+	keywords = {Autonomous application, Cameras, embedded system, Field programmable gate arrays, Hardware, Image color analysis, Image edge detection, image processing, Informatics, on-board processing, particle filter, Real-time systems},
+	pages = {5703--5711},
+	file = {Full Text PDF:C\:\\Users\\SFI19\\Zotero\\storage\\SEFZNXWY\\Chisholm et al. - 2020 - FPGA-Based Design for Real-Time Crack Detection Based on Particle Filter.pdf:application/pdf},
+}
+
+@article{perez_super-resolution_2014,
+	title = {Super-{Resolution} in {Plenoptic} {Cameras} {Using} {FPGAs}},
+	volume = {14},
+	copyright = {http://creativecommons.org/licenses/by/3.0/},
+	issn = {1424-8220},
+	url = {https://www.mdpi.com/1424-8220/14/5/8669},
+	doi = {10.3390/s140508669},
+	abstract = {Plenoptic cameras are a new type of sensor that extend the possibilities of current commercial cameras allowing 3D refocusing or the capture of 3D depths. One of the limitations of plenoptic cameras is their limited spatial resolution. In this paper we describe a fast, specialized hardware implementation of a super-resolution algorithm for plenoptic cameras. The algorithm has been designed for field programmable graphic array (FPGA) devices using VHDL (very high speed integrated circuit (VHSIC) hardware description language). With this technology, we obtain an acceleration of several orders of magnitude using its extremely high-performance signal processing capability through parallelism and pipeline architecture. The system has been developed using generics of the VHDL language. This allows a very versatile and parameterizable system. The system user can easily modify parameters such as data width, number of microlenses of the plenoptic camera, their size and shape, and the super-resolution factor. The speed of the algorithm in FPGA has been successfully compared with the execution using a conventional computer for several image sizes and different 3D refocusing planes.},
+	language = {en},
+	number = {5},
+	urldate = {2025-02-16},
+	journal = {Sensors},
+	author = {Pérez, Joel and Magdaleno, Eduardo and Pérez, Fernando and Rodríguez, Manuel and Hernández, David and Corrales, Jaime},
+	month = may,
+	year = {2014},
+	note = {Number: 5
+Publisher: Multidisciplinary Digital Publishing Institute},
+	keywords = {field programmable graphic array (FPGA), lightfield, plenoptic cameras, super-resolution},
+	pages = {8669--8685},
+	file = {Full Text PDF:C\:\\Users\\SFI19\\Zotero\\storage\\CIH6HRCH\\Pérez et al. - 2014 - Super-Resolution in Plenoptic Cameras Using FPGAs.pdf:application/pdf},
+}
+
+@article{sahebi_distributed_2023,
+	title = {Distributed large-scale graph processing on {FPGAs}},
+	volume = {10},
+	issn = {2196-1115},
+	url = {https://doi.org/10.1186/s40537-023-00756-x},
+	doi = {10.1186/s40537-023-00756-x},
+	abstract = {Processing large-scale graphs is challenging due to the nature of the computation that causes irregular memory access patterns. Managing such irregular accesses may cause significant performance degradation on both CPUs and GPUs. Thus, recent research trends propose graph processing acceleration with Field-Programmable Gate Arrays (FPGA). FPGAs are programmable hardware devices that can be fully customised to perform specific tasks in a highly parallel and efficient manner. However, FPGAs have a limited amount of on-chip memory that cannot fit the entire graph. Due to the limited device memory size, data needs to be repeatedly transferred to and from the FPGA on-chip memory, which makes data transfer time dominate over the computation time. A possible way to overcome the FPGA accelerators’ resource limitation is to engage a multi-FPGA distributed architecture and use an efficient partitioning scheme. Such a scheme aims to increase data locality and minimise communication between different partitions. This work proposes an FPGA processing engine that overlaps, hides and customises all data transfers so that the FPGA accelerator is fully utilised. This engine is integrated into a framework for using FPGA clusters and is able to use an offline partitioning method to facilitate the distribution of large-scale graphs. The proposed framework uses Hadoop at a higher level to map a graph to the underlying hardware platform. The higher layer of computation is responsible for gathering the blocks of data that have been pre-processed and stored on the host’s file system and distribute to a lower layer of computation made of FPGAs. We show how graph partitioning combined with an FPGA architecture will lead to high performance, even when the graph has Millions of vertices and Billions of edges. In the case of the PageRank algorithm, widely used for ranking the importance of nodes in a graph, compared to state-of-the-art CPU and GPU solutions, our implementation is the fastest, achieving a speedup of 13 compared to 8 and 3 respectively. Moreover, in the case of the large-scale graphs, the GPU solution fails due to memory limitations while the CPU solution achieves a speedup of 12 compared to the 26x achieved by our FPGA solution. Other state-of-the-art FPGA solutions are 28 times slower than our proposed solution. When the size of a graph limits the performance of a single FPGA device, our performance model shows that using multi-FPGAs in a distributed system can further improve the performance by about 12x. This highlights our implementation efficiency for large datasets not fitting in the on-chip memory of a hardware device.},
+	number = {1},
+	urldate = {2025-02-16},
+	journal = {Journal of Big Data},
+	author = {Sahebi, Amin and Barbone, Marco and Procaccini, Marco and Luk, Wayne and Gaydadjiev, Georgi and Giorgi, Roberto},
+	month = jun,
+	year = {2023},
+	keywords = {FPGA, Accelerators, Distributed computing, Graph processing, Grid partitioning},
+	pages = {95},
+	file = {Full Text PDF:C\:\\Users\\SFI19\\Zotero\\storage\\AB9AGRTA\\Sahebi et al. - 2023 - Distributed large-scale graph processing on FPGAs.pdf:application/pdf;Snapshot:C\:\\Users\\SFI19\\Zotero\\storage\\6CZYMMLI\\s40537-023-00756-x.html:text/html},
+}
+
+@article{reis_developments_2023,
+	title = {Developments of {Computer} {Vision} and {Image} {Processing}: {Methodologies} and {Applications}},
+	volume = {15},
+	copyright = {http://creativecommons.org/licenses/by/3.0/},
+	issn = {1999-5903},
+	shorttitle = {Developments of {Computer} {Vision} and {Image} {Processing}},
+	url = {https://www.mdpi.com/1999-5903/15/7/233},
+	doi = {10.3390/fi15070233},
+	abstract = {The rapid advancement of technology has enabled a vast and ever-growing number of computer applications in real scenarios of our daily life [...]},
+	language = {en},
+	number = {7},
+	urldate = {2025-02-16},
+	journal = {Future Internet},
+	author = {Reis, Manuel J. C. S.},
+	month = jul,
+	year = {2023},
+	note = {Number: 7
+Publisher: Multidisciplinary Digital Publishing Institute},
+	keywords = {n/a},
+	pages = {233},
+	file = {Full Text PDF:C\:\\Users\\SFI19\\Zotero\\storage\\Y255DIDG\\Reis - 2023 - Developments of Computer Vision and Image Processing Methodologies and Applications.pdf:application/pdf},
+}
+
+@article{sandvik_comparative_2024,
+	title = {A {Comparative} {Literature} {Review} of {Machine} {Learning} and {Image} {Processing} {Techniques} {Used} for {Scaling} and {Grading} of {Wood} {Logs}},
+	volume = {15},
+	copyright = {http://creativecommons.org/licenses/by/3.0/},
+	issn = {1999-4907},
+	url = {https://www.mdpi.com/1999-4907/15/7/1243},
+	doi = {10.3390/f15071243},
+	abstract = {This literature review assesses the efficacy of image-processing techniques and machine-learning models in computer vision for wood log grading and scaling. Four searches were conducted in four scientific databases, yielding a total of 1288 results, which were narrowed down to 33 relevant studies. The studies were categorized according to their goals, including log end grading, log side grading, individual log scaling, log pile scaling, and log segmentation. The studies were compared based on the input used, choice of model, model performance, and level of autonomy. This review found a preference for images over point cloud representations for logs and an increase in camera use over laser scanners. It identified three primary model types: classical image-processing algorithms, deep learning models, and other machine learning models. However, comparing performance across studies proved challenging due to varying goals and metrics. Deep learning models showed better performance in the log pile scaling and log segmentation goal categories. Cameras were found to have become more popular over time compared to laser scanners, possibly due to stereovision cameras taking over for laser scanners for sampling point cloud datasets. Classical image-processing algorithms were consistently used, deep learning models gained prominence in 2018, and other machine learning models were used in studies published between 2010 and 2018.},
+	language = {en},
+	number = {7},
+	urldate = {2025-02-16},
+	journal = {Forests},
+	author = {Sandvik, Yohann Jacob and Futsæther, Cecilia Marie and Liland, Kristian Hovde and Tomic, Oliver},
+	month = jul,
+	year = {2024},
+	note = {Number: 7
+Publisher: Multidisciplinary Digital Publishing Institute},
+	keywords = {computer vision, artificial intelligence, deep learning, log grading, log scaling, wood science},
+	pages = {1243},
+	file = {Full Text PDF:C\:\\Users\\SFI19\\Zotero\\storage\\XU87ATGD\\Sandvik et al. - 2024 - A Comparative Literature Review of Machine Learning and Image Processing Techniques Used for Scaling.pdf:application/pdf},
+}
+
+@article{ziaja_benchmarking_2021,
+	title = {Benchmarking {Deep} {Learning} for {On}-{Board} {Space} {Applications}},
+	volume = {13},
+	copyright = {https://creativecommons.org/licenses/by/4.0/},
+	issn = {2072-4292},
+	url = {https://www.mdpi.com/2072-4292/13/19/3981},
+	doi = {10.3390/rs13193981},
+	abstract = {Benchmarking deep learning algorithms before deploying them in hardware-constrained execution environments, such as imaging satellites, is pivotal in real-life applications. Although a thorough and consistent benchmarking procedure can allow us to estimate the expected operational abilities of the underlying deep model, this topic remains under-researched. This paper tackles this issue and presents an end-to-end benchmarking approach for quantifying the abilities of deep learning algorithms in virtually any kind of on-board space applications. The experimental validation, performed over several state-of-the-art deep models and benchmark datasets, showed that different deep learning techniques may be effectively benchmarked using the standardized approach, which delivers quantifiable performance measures and is highly configurable. We believe that such benchmarking is crucial in delivering ready-to-use on-board artificial intelligence in emerging space applications and should become a standard tool in the deployment chain.},
+	language = {en},
+	number = {19},
+	urldate = {2025-02-16},
+	journal = {Remote Sensing},
+	author = {Ziaja, Maciej and Bosowski, Piotr and Myller, Michal and Gajoch, Grzegorz and Gumiela, Michal and Protich, Jennifer and Borda, Katherine and Jayaraman, Dhivya and Dividino, Renata and Nalepa, Jakub},
+	month = oct,
+	year = {2021},
+	pages = {3981},
+	file = {Full Text:C\:\\Users\\SFI19\\Zotero\\storage\\S6J3PVUF\\Ziaja et al. - 2021 - Benchmarking Deep Learning for On-Board Space Applications.pdf:application/pdf},
+}
+
+@article{wu_precision_2022,
+	title = {Precision control of polyurethane filament drafting and winding based on machine vision},
+	volume = {10},
+	issn = {2296-4185},
+	url = {https://www.frontiersin.org/journals/bioengineering-and-biotechnology/articles/10.3389/fbioe.2022.978212/full},
+	doi = {10.3389/fbioe.2022.978212},
+	abstract = {{\textless}p{\textgreater}In the biomedical field, polyurethane (PU) is widely used in interventional catheters, artificial hearts, artificial blood vessels, orthopedic materials, medical adhesives, and other medical devices. In this paper, a method based on machine vision was proposed to control the drafting and winding accuracy of PU filament in order to solve the problem of centrifugal runout when the mold rotates. The centrifugal runout of the mold directly affected the preparation efficiency and quality of long artificial blood vessel by wet spinning. Through non-contact real-time detection of the filament diameter and the angle between the axis of filament and the axis of mold, the motion parameters of the two motors driving the moving platform and the drafting roller could be adjusted in real time to achieve the purpose of online real-time control of filament drafting and winding accuracy. The vision control method proposed in this paper was used to carry out the PU tube preparation experiment. The visual measurement results of the filament diameter and the included angle were compared with the manual measurement results. The average value of the diameter error is 0.0096mm, and the average value of winding angle is 0.4777°. The results proved the accuracy of the visual measuring method and testified it feasible to using machine vision instead of manual method to detect filament diameter and winding angle. Properties of the prepared PU tube were tested and analyzed. The filament diameter measured by the 3D microscope was about 0.87 mm and significantly smaller than the filament diameter before winding. This indicated that the winding was uniform, the extrusion was tight, and the adhesion was good.{\textless}/p{\textgreater}},
+	language = {English},
+	urldate = {2025-02-16},
+	journal = {Frontiers in Bioengineering and Biotechnology},
+	author = {Wu, Shilin and Yang, Huayu and Liu, Xiangyan and Jia, Rui},
+	month = sep,
+	year = {2022},
+	note = {Publisher: Frontiers},
+	keywords = {Drafting Control, PU Filament, Tube preparation, Visual measurement, Winding Control},
+	file = {Full Text PDF:C\:\\Users\\SFI19\\Zotero\\storage\\B6I8IJIT\\Wu et al. - 2022 - Precision control of polyurethane filament drafting and winding based on machine vision.pdf:application/pdf},
+}
+
+@article{zhu_machine_2022,
+	title = {A {Machine} {Vision} {Development} {Framework} for {Product} {Appearance} {Quality} {Inspection}},
+	volume = {12},
+	copyright = {http://creativecommons.org/licenses/by/3.0/},
+	issn = {2076-3417},
+	url = {https://www.mdpi.com/2076-3417/12/22/11565},
+	doi = {10.3390/app122211565},
+	abstract = {Machine vision systems are an important part of modern intelligent manufacturing systems, but due to their complexity, current vision systems are often customized and inefficiently developed. Generic closed-source machine vision development software is often poorly targeted. To meet the extensive needs of product appearance quality inspection in industrial production and to improve the development efficiency and reliability of such systems, this paper designs and implements a general machine vision software framework. This framework is easy to adapt to different hardware devices for secondary development, reducing the workload in generic functional modules and program architecture design, which allows developers to focus on the design and implementation of image-processing algorithms. Based on the MVP software design principles, the framework abstracts and implements the modules common to machine vision-based product appearance quality inspection systems, such as user management, inspection configuration, task management, image acquisition, database configuration, GUI, multi-threaded architecture, IO communication, etc. Using this framework and adding the secondary development of image-processing algorithms, we successfully apply the framework to the quality inspection of the surface defects of bolts.},
+	language = {en},
+	number = {22},
+	urldate = {2025-02-16},
+	journal = {Applied Sciences},
+	author = {Zhu, Qiuyu and Zhang, Yunxiao and Luan, Jianbing and Hu, Liheng},
+	month = jan,
+	year = {2022},
+	note = {Number: 22
+Publisher: Multidisciplinary Digital Publishing Institute},
+	keywords = {appearance quality, industrial inspection, machine vision, software framework},
+	pages = {11565},
+	file = {Full Text PDF:C\:\\Users\\SFI19\\Zotero\\storage\\FRCDALEP\\Zhu et al. - 2022 - A Machine Vision Development Framework for Product Appearance Quality Inspection.pdf:application/pdf},
+}
+
+@article{sardar_role_2012,
+	title = {A role of computer system for comparative analysis using image processing to promote agriculture business},
+	url = {https://www.semanticscholar.org/paper/A-role-of-computer-system-for-comparative-analysis-Sardar/6e2fd48a1025b68951f511abe05f8451f753eb47},
+	abstract = {The computer system play a very important role using image processing for agriculture business using technological approaches for food processing \& food engineering during production in agriculture. In this research paper a updated of hassu algorithm is proposed to quality analysis and detect defects of fruits(i.e. Guava, orange, desi berry) further which can be implemted for grading and sorting of a particular fruit (i.e. same category) by its visual color of surface using the non-destructive technique to automated quality verification systems for agricultural products with the help of digital images which involve visual examination and inspection of color, size, shape, defects and texture are highlighted further for image processing. So here color is the key and unique attribute for determine the quality, where intensity value of pixel of digital image is recognize using MATLAB Keyword: fruits, color, pixel value, camera},
+	urldate = {2025-02-16},
+	journal = {International journal of engineering research and technology},
+	author = {Sardar, Hassan},
+	month = nov,
+	year = {2012},
+	annote = {[TLDR] A updated of hassu algorithm is proposed to quality analysis and detect defects of fruits and intensity value of pixel of digital image is recognize using MATLAB Keyword: fruits, color, pixel value, camera.},
+	file = {Full Text PDF:C\:\\Users\\SFI19\\Zotero\\storage\\58SULJWP\\Sardar - 2012 - A role of computer system for comparative analysis using image processing to promote agriculture bus.pdf:application/pdf},
+}
+
+@article{lai_image_2001,
+	title = {Image {Processing} {Libraries}},
+	abstract = {There are a wide variety of image process-ing library implementations. Three imple-mentations are discussed in this paper, in the hope of showing the diverse nature of im-age processing libraries. Datacube provides a hardware and vendor-specific image pro-cessing library, known as ImageFlow, devel-oped to support their pipeling image process-ing hardware card. Vector, Signal and Image Processing Library (VSIPL) is a hardware-neutral approach with a focus on portabil-ity. Finally Vision with Generic Algorithms (VIGRA) is built on the principles of generic programming and is therefore flexible without incurring large speed penalties.},
+	author = {Lai, Bing-Chang and {Phillip} and McKerrow, Phillip},
+	month = jan,
+	year = {2001},
+	file = {Full Text PDF:C\:\\Users\\SFI19\\Zotero\\storage\\5F9V4RSE\\Lai et al. - 2001 - Image Processing Libraries.pdf:application/pdf},
+}
+
+@article{rao_comparative_2023,
+	title = {A {Comparative} {Analysis} of {Deep} {Learning} {Frameworks} and {Libraries}},
+	volume = {11},
+	copyright = {Copyright (c) 2023 M. Nagabhushana  Rao},
+	issn = {2147-6799},
+	url = {https://ijisae.org/index.php/IJISAE/article/view/2707},
+	abstract = {Deep learning has become a popular approach for solving complex problems in various fields, including image recognition, natural language processing, and speech recognition. As a result, numerous deep learning frameworks and libraries have been developed, each with its unique strengths and weaknesses. Choosing the right framework and library for a given application is essential for achieving optimal performance and accuracy. This study aims to provide a comparative analysis of deep learning frameworks and libraries based on their ease of use, computational efficiency, flexibility, and performance. The study evaluates six popular deep-learning frameworks and libraries, including TensorFlow, Keras, PyTorch, Caffe, MXNet, and Theano. The evaluation process includes the implementation of deep learning models using each framework, training, and testing on benchmark datasets, and collecting evaluation metrics. The study uses several benchmark datasets, including CIFAR-10, ImageNet, and MNIST. The study compares the evaluated deep learning frameworks and libraries in terms of their ease of use, computational efficiency, flexibility, and performance. The study also discusses the impact of the evaluated deep learning frameworks and libraries on the performance and accuracy of the developed models, highlighting the trade-offs and limitations of each framework. The results show that TensorFlow and PyTorch are the most popular and widely used frameworks due to their flexibility, ease of use, and strong community support. This study has several implications for practitioners in the field of deep learning, highlighting the importance of the selection of the appropriate framework and library for the development of successful models. The study also contributes new insights and knowledge to the field of deep learning and suggests future research directions for improving and extending the research in new directions. Overall, this study provides valuable information for researchers and practitioners seeking to evaluate and select the best deep-learning framework and library for their specific needs.},
+	language = {en},
+	number = {2s},
+	urldate = {2025-02-16},
+	journal = {International Journal of Intelligent Systems and Applications in Engineering},
+	author = {Rao, M. Nagabhushana},
+	month = jan,
+	year = {2023},
+	note = {Number: 2s},
+	keywords = {TensorFlow},
+	pages = {337--342},
+	file = {Full Text PDF:C\:\\Users\\SFI19\\Zotero\\storage\\25G2NS2A\\Rao - 2023 - A Comparative Analysis of Deep Learning Frameworks and Libraries.pdf:application/pdf},
+}