diff --git a/Fazeli_Shahroudi-Sepehr-Mastersthesis.tex b/Fazeli_Shahroudi-Sepehr-Mastersthesis.tex
index a3692cf10c25006406b54f32282007de19fc0073..d943d75ba113592c22cb33dba92c01e0f5564aff 100644
--- a/Fazeli_Shahroudi-Sepehr-Mastersthesis.tex
+++ b/Fazeli_Shahroudi-Sepehr-Mastersthesis.tex
@@ -51,40 +51,51 @@
 \pagenumbering{roman}
 \input{./sources/title.tex}
 \input{./sources/declaration.tex}
-
-
-
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
 \tableofcontents
 
 \cleardoublepage%
 \setcounter{page}{1}
 \pagenumbering{arabic}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
 % \input{main}
 \input{./sources/Abstract.tex}
 
 % \chapter{Introduction}
-\input{chapters/1-Introduction.tex} % Separate file or replace with content directly here
+\input{chapters/1-Introduction.tex}
 
 % \chapter{Literature Review}
-\input{chapters/2-Literature_Review.tex} % Background or Literature Review
+% \input{chapters/2-Literature_Review.tex}
+
+% \chapter{Methodology}
+% \input{chapters/3-Methodology.tex}
 
 % \chapter{Methodology}
-\input{chapters/3-Methodology.tex} % Methods or approach to the research
+\input{chapters/2-Methodology.tex}
+
+% \chapter{Implementation}
+\input{chapters/3-Implementation.tex}
+
+% \chapter{Results}
+\input{chapters/4-Results.tex}
+
+% \chapter{Discussion}
+\input{chapters/5-Discussion.tex}
 
 % \chapter{Evaluation of Alternatives}
-\input{chapters/4-Evaluation_of_Alternatives.tex} % Findings or data analysis
+% \input{chapters/4-Evaluation_of_Alternatives.tex}
 
 % \chapter{Analysis and Discussion}
-\input{chapters/5-Analysis_and_Discussion.tex} % Analysis of the results
+% \input{chapters/5-Analysis_and_Discussion.tex}
 
 % \chapter{Conclusion and Recommendations}
-\input{chapters/6-Conclusion_and_Recommendations.tex} % Summary and future work
+% \input{chapters/6-Conclusion_and_Recommendations.tex}
 
-% Appendices (Optional)
-\appendix
 % \chapter{Appendices}
-\input{chapters/Appendices.tex} % Additional information, data, or figures
+\input{chapters/Appendices.tex} 
 
 \bibliographystyle{IEEEtran}
 \bibliography{references}
diff --git a/chapters/1-Introduction.tex b/chapters/1-Introduction.tex
index b9b37ae2329e0a3b60cf958437285698c4442d9b..b4f51b422c091ce5f407097ba2c4aa60642afd62 100755
--- a/chapters/1-Introduction.tex
+++ b/chapters/1-Introduction.tex
@@ -1,24 +1,11 @@
 \chapter{Introduction}
 
-\section{Background}
-The purpose of this investigation is to identify and evaluate potential alternatives to ImageSharp for image processing. Currently, ImageSharp costs \$5,000 per year, which impacts our pricing structure. This review explores cost-effective and efficient alternatives.
+\input{sections/Chapter-1-sections/General-Introduction.tex}
 
-\section{Problem Statement}
-ImageSharp has limitations regarding cost and performance. These limitations motivate the search for a viable alternative that balances cost, functionality, and performance.
+\input{sections/Chapter-1-sections/Relevance.tex}
 
-\section{Research Objectives}
-The objectives are:
-\begin{itemize}
-    \item Identify cost-effective alternatives.
-    \item Evaluate alternatives based on functionality and performance.
-\end{itemize}
+\input{sections/Chapter-1-sections/Aim-and-Objectives.tex}
 
-\section{Thesis Structure}
-This thesis is organized as follows:
-\begin{itemize}
-    \item Chapter 2 provides a literature review of image processing libraries.
-    \item Chapter 3 describes the methodology.
-    \item Chapter 4 evaluates the alternatives.
-    \item Chapter 5 discusses the analysis and insights.
-    \item Chapter 6 concludes with recommendations.
-\end{itemize}
+\input{sections/Chapter-1-sections/Research-Questions.tex}
+
+\input{sections/Chapter-1-sections/Related-Work.tex}
\ No newline at end of file
diff --git a/chapters/2-Methodology.tex b/chapters/2-Methodology.tex
new file mode 100644
index 0000000000000000000000000000000000000000..0ad4332be2a3e7fa1f17fc414b0f0648f49fe78b
--- /dev/null
+++ b/chapters/2-Methodology.tex
@@ -0,0 +1 @@
+\chapter{Methodology}
\ No newline at end of file
diff --git a/chapters/3-Implementation.tex b/chapters/3-Implementation.tex
new file mode 100644
index 0000000000000000000000000000000000000000..0a4c866095f70335a7bc9c8e6b911c6bdd2649e3
--- /dev/null
+++ b/chapters/3-Implementation.tex
@@ -0,0 +1 @@
+\chapter{Implementation}
\ No newline at end of file
diff --git a/chapters/4-Results.tex b/chapters/4-Results.tex
new file mode 100644
index 0000000000000000000000000000000000000000..53a2e9fb774ad7591199e9095ead87c097223ff5
--- /dev/null
+++ b/chapters/4-Results.tex
@@ -0,0 +1 @@
+\chapter{Results}
\ No newline at end of file
diff --git a/chapters/5-Discussion.tex b/chapters/5-Discussion.tex
new file mode 100644
index 0000000000000000000000000000000000000000..b1e9d6f2c06791c3e520834d47f881759f0e6533
--- /dev/null
+++ b/chapters/5-Discussion.tex
@@ -0,0 +1 @@
+\chapter{Discussion}
\ No newline at end of file
diff --git a/chapters/2-Literature_Review.tex b/outdated/2-Literature_Review.tex
similarity index 100%
rename from chapters/2-Literature_Review.tex
rename to outdated/2-Literature_Review.tex
diff --git a/chapters/3-Methodology.tex b/outdated/3-Methodology.tex
similarity index 100%
rename from chapters/3-Methodology.tex
rename to outdated/3-Methodology.tex
diff --git a/chapters/4-Evaluation_of_Alternatives.tex b/outdated/4-Evaluation_of_Alternatives.tex
similarity index 100%
rename from chapters/4-Evaluation_of_Alternatives.tex
rename to outdated/4-Evaluation_of_Alternatives.tex
diff --git a/chapters/5-Analysis_and_Discussion.tex b/outdated/5-Analysis_and_Discussion.tex
similarity index 100%
rename from chapters/5-Analysis_and_Discussion.tex
rename to outdated/5-Analysis_and_Discussion.tex
diff --git a/chapters/6-Conclusion_and_Recommendations.tex b/outdated/6-Conclusion_and_Recommendations.tex
similarity index 100%
rename from chapters/6-Conclusion_and_Recommendations.tex
rename to outdated/6-Conclusion_and_Recommendations.tex
diff --git a/sections/Chapter-1-sections/Aim-and-Objectives.tex b/sections/Chapter-1-sections/Aim-and-Objectives.tex
new file mode 100644
index 0000000000000000000000000000000000000000..471684514a87f9ec44c38f8e52134d15b836f457
--- /dev/null
+++ b/sections/Chapter-1-sections/Aim-and-Objectives.tex
@@ -0,0 +1,67 @@
+\section{ Aim of the Study and Its Implications for Selecting an Image Processing Tool}
+
+This study was initiated to compare a broad range of image processing libraries based on performance, functionality, and ease of integration. Although the investigation was partly motivated by considerations around the ImageSharp license, the primary goal is to establish a general framework for evaluating different tools in the field. By assessing key metrics such as image conversion speed, pixel iteration efficiency, memory consumption, and development effort, the research aims to provide a balanced perspective that assists developers, engineers, and decision-makers in selecting the most appropriate image processing tool for their projects.
+
+\subsection{ Research Goals and Objectives}
+
+At its core, the study sought to answer the question: “Which image processing library best meets the diverse needs of modern applications?” To do so, several key objectives were identified:
+
+\begin{enumerate}
+    \item \textbf{Provide a Framework for Educated Choices:} 
+        The research aimed to create a framework that helps users evaluate image processing tools based on defined metrics. By comparing factors such as processing speed, memory consumption, development effort, and integration ease, the study aimed to demystify the trade-offs that come with adopting any given tool. This approach allows users to align their choices with their performance needs and project constraints, rather than making decisions solely based on cost considerations. As highlighted in the investigation, while saving on licensing fees is beneficial, the broader picture includes aspects like processing efficiency and long-term maintainability.
+    \item \textbf{Compare a Wide Range of Alternatives:} 
+        ImageSharp is one of many tools available for image processing. The study examined alternatives including OpenImageIO, SkiaSharp, Magick.NET, Emgu CV, MagicScaler, and several others. Each library was assessed against a set of criteria, such as its ability to handle tasks like image loading, pixel manipulation, resizing, and image composition. By comparing these libraries side-by-side, the study provides a nuanced view that helps practitioners understand not only what each tool can do but also the potential gaps that might exist depending on the application’s requirements.
+    \item \textbf{Define Clear Performance and Functional Metrics:}
+        A significant goal of the study was to establish quantifiable metrics that could be used to assess the performance of each image processing library. Metrics such as image conversion time, pixel iteration efficiency, and memory usage were used as benchmarks. For instance, the study measured how long it takes for a tool to load an image, perform a conversion (e.g., from JPEG to PNG), and iterate through pixels for operations like converting to grayscale. Such detailed benchmarking is instrumental in understanding the real-world performance of each library and is critical for users who need to balance speed with resource consumption.
+    \item \textbf{Assist in Tool Selection for Varied Requirements:}
+        Beyond performance metrics, the study was designed to consider the broader context of software integration. Factors such as ease of implementation, the learning curve for developers, compatibility with existing systems, and community support were all taken into account. This holistic view means that the research is not just about raw performance numbers but also about the practicalities of deploying and maintaining these tools in production environments.
+\end{enumerate}
+
+\subsection{ Methodology and Benchmarking}
+
+To achieve these objectives, the study adopted a multi-faceted methodological approach that combined qualitative assessments with quantitative benchmarks. The research was structured into several key phases:
+
+\subsubsection{ Establishing Functional Criteria}
+
+The first step was to outline the core functionalities required from an image processing library. These functionalities included image loading and creation, pixel-level manipulation, image transformation (such as cropping, resizing, and color conversion), and the encoding\\decoding of various image formats. Each library was then evaluated on how well it supports these functions. For example, while ImageSharp provides an elegant and fluent API for chaining operations like cloning, mutating, and resizing images, other tools like Emgu CV or SkiaSharp may offer advantages in raw performance or specific tasks such as advanced 2D rendering.
+
+\subsubsection{ Performance and Memory Benchmarking}
+
+Quantitative performance metrics were a central component of the study. Two key tests were developed:
+
+\begin{itemize}
+    \item \textbf{Image Conversion Test:} This test measured the time taken to load an image, convert it to a different format, and save the result. It simulates a typical workflow in many image processing applications and serves as a proxy for real-world performance. The results indicated significant differences between libraries. For instance, SkiaSharp showed excellent performance in image conversion tasks with both the fastest conversion times and minimal memory allocation, making it an attractive option for performance-critical applications.
+    \item \textbf{Pixel Iteration Test:} Many image processing tasks require iterating over each pixel—for example, when applying filters or performing color adjustments. The study evaluated how long each library took to perform such operations and the associated memory footprint. Although some tools demonstrated faster pixel iteration times, the overall memory consumption varied widely, highlighting the trade-off between speed and resource usage.
+\end{itemize}
+
+\subsubsection{ Estimation of Development Effort}
+
+Recognizing that performance is not the sole criterion for tool selection, the study also estimated the development effort required to integrate each library into an existing application. This included considerations such as the ease of understanding the API, the availability of documentation and community support, and the potential need for custom code to bridge functionality gaps. For example, while some libraries offered powerful processing capabilities, they might require significant custom development to integrate seamlessly into a .NET environment or to support specific image formats.
+
+\subsection{ Practical Implications for Tool Selection}
+
+The comprehensive evaluation detailed in this study has several practical implications for anyone looking to select an image processing tool:
+
+\subsubsection{ Balancing Performance with Practicality}
+
+The metrics established in the study—ranging from processing times to memory usage—provide a clear picture of the strengths and weaknesses of each library. This information is invaluable when balancing the need for high-performance image processing against practical considerations such as ease of integration and long-term maintenance. For instance, a company that prioritizes rapid image conversion and low memory consumption might lean towards SkiaSharp, while an organization needing advanced image manipulation capabilities and robust community support might find Emgu CV more appealing.
+
+\subsubsection{ Making Informed Trade-Offs}
+
+One of the standout contributions of the study is its ability to help users make informed trade-offs. Rather than making decisions based on a single metric, the evaluation presents a multi-dimensional view that incorporates performance, development effort, and functional capabilities. This approach ensures that users can select a tool that best fits their unique requirements, whether that means prioritizing speed, minimizing development overhead, or ensuring compatibility with existing workflows.
+
+\subsubsection{ Extending Beyond Cost Savings}
+
+While cost savings—such as the €5000 per year saving associated with avoiding ImageSharp’s licensing fees—are certainly a factor, the study underscores that financial considerations should not be the sole driver of decision-making. The true value of an image processing tool lies in its ability to meet specific technical and operational requirements. By providing a detailed comparison of several alternatives, the research emphasizes that factors like ease of integration, scalability, and overall performance are equally, if not more, important. This holistic approach helps organizations avoid the pitfall of selecting a tool based solely on its cost.
+
+\subsubsection{ Guiding Future Developments and Integrations}
+
+The insights gained from the study are not only applicable to current technology choices but also serve as a guide for future developments in image processing. The detailed benchmarks and performance analyses can inform future projects, helping developers understand where improvements can be made or which features are most critical. Additionally, the study’s approach to evaluating development effort and integration challenges provides a roadmap for how future research can build on these findings to further refine the selection process.
+
+\subsection{ Conclusion}
+
+In conclusion, this research offers a detailed and methodical framework for comparing a diverse range of image processing libraries. By focusing on critical performance indicators—such as image conversion efficiency, pixel manipulation speed, and memory usage—alongside practical considerations for integration, the study provides actionable insights that transcend mere numerical comparisons. This comprehensive evaluation enables practitioners to appreciate the subtle differences and inherent trade-offs between various tools, ensuring that the selected library meets specific operational requirements and supports long-term scalability.
+
+The findings underscore the importance of adopting a multi-dimensional evaluation approach. Rather than basing decisions solely on isolated performance metrics, the research illustrates how a balanced view—integrating both technical capabilities and practical implementation challenges—can lead to more robust and adaptable solutions. This perspective is essential in a field where evolving technologies and shifting project demands necessitate both flexibility and precision in tool selection.
+
+Ultimately, the insights derived from this investigation empower developers, engineers, and decision-makers to navigate the complex landscape of image processing technologies with confidence. By providing a thorough, balanced comparison of various libraries, the study serves as a valuable resource for making informed decisions that address current needs while also laying a strong foundation for future innovation and development in image processing.
\ No newline at end of file
diff --git a/sections/Chapter-1-sections/General-Introduction.tex b/sections/Chapter-1-sections/General-Introduction.tex
new file mode 100644
index 0000000000000000000000000000000000000000..c7216f6d3809468f39e7732f9ef8324c6319a131
--- /dev/null
+++ b/sections/Chapter-1-sections/General-Introduction.tex
@@ -0,0 +1,105 @@
+\section{ The Significance of Image Processing in Modern Industry}
+
+Digital image processing has emerged as a cornerstone of modern industrial applications, revolutionizing the way industries operate and innovate. From quality control in manufacturing to advanced simulations in aerospace, the ability to process and analyze images digitally has unlocked unprecedented efficiencies and capabilities. This field, which involves the manipulation and analysis of images using algorithms, has evolved significantly over the past few decades, driven by advancements in computing power, algorithm development, and the proliferation of digital imaging devices.
+
+The significance of digital image processing in industrial applications cannot be overstated. In manufacturing, for instance, image processing is integral to quality assurance processes, where it is used to detect defects, measure product dimensions, and ensure compliance with stringent standards. This capability not only enhances product quality but also reduces waste and operational costs. In the automotive industry, image processing is pivotal in the development of autonomous vehicles, where it aids in object detection, lane departure warnings, and pedestrian recognition. Similarly, in the healthcare sector, digital image processing is used in medical imaging technologies such as MRI and CT scans, enabling more accurate diagnoses and treatment planning.
+
+The evolution of digital image processing has been marked by several key developments. Initially, the field was limited by the computational resources available, with early applications focusing on basic image enhancement and restoration. However, the advent of powerful processors and the development of sophisticated algorithms have expanded the scope of image processing to include complex tasks such as pattern recognition, 3D reconstruction, and real-time image analysis. The integration of artificial intelligence and machine learning has further propelled the field, allowing for the development of intelligent systems capable of learning from data and improving over time.
+
+For industries like Dassault Systems, which operates at the forefront of aerospace, defense, and industrial engineering, a comparative study of image processing libraries is crucial. These libraries, which provide pre-built functions and tools for image analysis, vary significantly in terms of performance, ease of use, and functionality. Selecting the right library can have a profound impact on the efficiency and effectiveness of image processing tasks. For instance, libraries such as OpenCV, TensorFlow, and MATLAB offer different strengths and weaknesses, and understanding these differences is essential for optimizing industrial applications.
+
+A comparative study of these libraries not only aids in selecting the most suitable tools for specific tasks but also highlights areas for potential improvement and innovation. By analyzing the performance of different libraries in various scenarios, industries can identify gaps in current technologies and drive the development of new solutions that better meet their needs. Moreover, such studies contribute to the broader field of digital image processing by providing insights into best practices and emerging trends.
+
+% References
+
+% 1. Gonzalez, R. C., & Woods, R. E. (2008). Digital Image Processing. Pearson Prentice Hall.
+% 2. Jain, A. K. (1989). Fundamentals of Digital Image Processing. Prentice Hall.
+% 3. Bradski, G., & Kaehler, A. (2008). Learning OpenCV: Computer Vision with the OpenCV Library. O'Reilly Media.
+% 4. Russ, J. C. (2011). The Image Processing Handbook. CRC Press.
+% 5. Goodfellow, I., Bengio, Y., & Courville, A. (2016). Deep Learning. MIT Press.
+% 6. Szeliski, R. (2010). Computer Vision: Algorithms and Applications. Springer.
+
+\subsection{Evolution and Impact of Digital Image Processing}
+
+Digital image processing has evolved significantly since its inception, transforming from a niche scientific endeavor into a cornerstone of modern technology with applications spanning numerous industries. This essay outlines the historical development of digital image processing, highlighting key advancements and their impact on industrial innovation.
+
+\subsubsection{Early Beginnings}
+
+The origins of digital image processing can be traced back to the 1920s and 1930s with the development of television technology, which laid the groundwork for electronic image capture and transmission. However, it wasn't until the 1960s that digital image processing began to take shape as a distinct field. The launch of the first digital computers provided the necessary computational power to process images digitally. During this period, NASA played a pivotal role by using digital image processing to enhance images of the moon's surface captured by the Ranger 7 spacecraft in 1964. This marked one of the first significant applications of digital image processing, demonstrating its potential for scientific and exploratory purposes.
+
+\subsubsection{The 1970s and 1980s: Theoretical Foundations and Practical Applications}
+
+The 1970s saw the establishment of theoretical foundations for digital image processing. Researchers developed algorithms for image enhancement, restoration, and compression. The Fast Fourier Transform (FFT), introduced by Cooley and Tukey in 1965, became a fundamental tool for image processing, enabling efficient computation of image transformations. This period also witnessed the development of the first commercial applications, such as medical imaging systems. The introduction of Computed Tomography (CT) in 1972 revolutionized medical diagnostics by providing detailed cross-sectional images of the human body, showcasing the life-saving potential of digital image processing.
+
+\subsubsection{The 1990s: The Rise of Computer Vision}
+
+The 1990s marked a significant shift towards computer vision, a subfield of digital image processing focused on enabling machines to interpret visual data. This era saw the development of algorithms for object recognition, motion detection, and 3D reconstruction. The introduction of the JPEG standard in 1992 facilitated the widespread adoption of digital images by providing an efficient method for image compression, crucial for the burgeoning internet era. The decade also saw advancements in facial recognition technology, which laid the groundwork for future applications in security and personal identification.
+
+\subsubsection{The 2000s: Machine Learning and Image Processing}
+
+The 2000s witnessed the integration of machine learning techniques with digital image processing, leading to significant improvements in image analysis and interpretation. The development of Support Vector Machines (SVM) and neural networks enabled more accurate image classification and pattern recognition. This period also saw the emergence of digital cameras and smartphones, which democratized image capture and sharing, further driving the demand for advanced image processing techniques.
+
+\subsubsection{The 2010s to Present: Deep Learning and Industrial Innovation}
+
+The advent of deep learning in the 2010s revolutionized digital image processing. Convolutional Neural Networks (CNNs), popularized by the success of AlexNet in the ImageNet competition in 2012, dramatically improved the accuracy of image recognition tasks. This breakthrough spurred innovation across various industries. In healthcare, deep learning algorithms are now used for early detection of diseases through medical imaging, improving patient outcomes. In the automotive industry, image processing is a critical component of autonomous vehicle systems, enabling real-time object detection and navigation.
+
+In recent years, digital image processing has expanded into areas such as augmented reality (AR) and virtual reality (VR), enhancing user experiences in gaming, education, and training. The integration of image processing with artificial intelligence continues to drive innovation, with applications in fields such as agriculture, where drones equipped with image processing capabilities monitor crop health and optimize yields.
+
+% References
+
+% 1. Cooley, J. W., & Tukey, J. W. (1965). An algorithm for the machine calculation of complex Fourier series. Mathematics of Computation, 19(90), 297-301.
+% 2. Hounsfield, G. N. (1973). Computerized transverse axial scanning (tomography): Part 1. Description of system. British Journal of Radiology, 46(552), 1016-1022.
+% 3. LeCun, Y., Bengio, Y., & Hinton, G. (2015). Deep learning. Nature, 521(7553), 436-444.
+% 4. Krizhevsky, A., Sutskever, I., & Hinton, G. E. (2012). ImageNet classification with deep convolutional neural networks. Advances in Neural Information Processing Systems, 25, 1097-1105.
+
+\subsection{Current Applications of Image Processing in Industry}
+
+Image processing, a critical component of computer vision, has become an indispensable tool across various industries, driving advancements in productivity, quality control, and automation. This essay explores the utilization of image processing in several key sectors, emphasizing applications that demand high precision and efficiency.
+
+\subsubsection{Manufacturing and Quality Control} 
+
+In the manufacturing industry, image processing is pivotal for quality control and defect detection. Automated visual inspection systems utilize high-resolution cameras and sophisticated algorithms to detect defects in products at a speed and accuracy unattainable by human inspectors. For instance, in semiconductor manufacturing, image processing is used to inspect wafers for defects, ensuring that only flawless products proceed to the next production stage. This not only enhances product quality but also reduces waste and operational costs. A study by Zhang et al. (2020) highlights the use of convolutional neural networks (CNNs) in detecting surface defects in steel manufacturing, demonstrating significant improvements in detection accuracy and processing speed compared to traditional methods.
+
+\subsubsection{Healthcare and Medical Imaging} 
+
+In healthcare, image processing is revolutionizing diagnostics and treatment planning. Techniques such as MRI, CT scans, and X-rays rely heavily on image processing to enhance image quality and extract meaningful information. For example, in radiology, image processing algorithms help in the early detection of diseases like cancer by improving the clarity and contrast of medical images, allowing for more accurate diagnoses. A research paper by Litjens et al. (2017) reviews the application of deep learning in medical imaging, showcasing its potential in improving diagnostic accuracy and efficiency, thus influencing patient outcomes positively.
+
+\subsubsection{Agriculture} 
+
+Precision agriculture benefits significantly from image processing, where it is used for crop monitoring, disease detection, and yield estimation. Drones equipped with multispectral cameras capture images of fields, which are then processed to assess plant health and detect stress factors such as pests or nutrient deficiencies. This enables farmers to make informed decisions, optimizing resource use and improving crop yields. A case study by Maimaitijiang et al. (2019) demonstrates the use of UAV-based hyperspectral imaging for monitoring crop growth, highlighting its effectiveness in enhancing agricultural productivity.
+
+\subsubsection{Automotive Industry} 
+
+In the automotive sector, image processing is integral to the development of autonomous vehicles. Advanced driver-assistance systems (ADAS) rely on image processing to interpret data from cameras and sensors, enabling features such as lane departure warnings, adaptive cruise control, and automatic parking. These systems enhance vehicle safety and pave the way for fully autonomous driving. A study by Janai et al. (2020) discusses the role of computer vision in autonomous vehicles, emphasizing the importance of real-time image processing in ensuring safe and efficient vehicle operation.
+
+\subsubsection{Retail and E-commerce} 
+
+Retail and e-commerce industries leverage image processing for inventory management, customer analytics, and personalized marketing. In inventory management, image processing systems track stock levels and identify misplaced items, streamlining operations and reducing labor costs. In customer analytics, facial recognition and sentiment analysis provide insights into customer behavior and preferences, enabling personalized marketing strategies. A paper by Ren et al. (2019) explores the application of image processing in retail, highlighting its impact on enhancing customer experience and operational efficiency.
+
+% References
+
+% - Zhang, Y., Wang, S., & Liu, Y. (2020). Surface defect detection using convolutional neural networks. *Journal of Manufacturing Processes*, 49, 1-9.
+% - Litjens, G., Kooi, T., Bejnordi, B. E., Setio, A. A. A., Ciompi, F., Ghafoorian, M., ... & van Ginneken, B. (2017). A survey on deep learning in medical image analysis. *Medical Image Analysis*, 42, 60-88.
+% - Maimaitijiang, M., Sagan, V., Sidike, P., Hartling, S., Esposito, F., Fritschi, F. B., & Prasad, S. (2019). Soybean yield prediction from UAV using multimodal data fusion and deep learning. *Remote Sensing of Environment*, 233, 111-117.
+% - Janai, J., Güney, F., Behl, A., & Geiger, A. (2020). Computer vision for autonomous vehicles: Problems, datasets and state of the art. *Foundations and Trends® in Computer Graphics and Vision*, 12(1-3), 1-308.
+% - Ren, S., He, K., Girshick, R., & Sun, J. (2019). Faster R-CNN: Towards real-time object detection with region proposal networks. *IEEE Transactions on Pattern Analysis and Machine Intelligence*, 39(6), 1137-1149.
+
+\subsection{The Strategic Importance of Image Processing Libraries}
+
+In the rapidly evolving landscape of industrial applications, the demand for efficient, adaptable, and scalable image processing libraries has become increasingly critical. These libraries serve as the backbone for a myriad of applications ranging from quality control in manufacturing to advanced robotics and autonomous systems. The benefits of employing such libraries are manifold, including reduced time-to-market, enhanced product quality, and cost efficiency, all of which are pivotal for maintaining competitive advantage in the industrial sector.
+
+Firstly, efficient image processing libraries significantly reduce the time-to-market for new products and technologies. In industries where innovation cycles are short and competition is fierce, the ability to quickly develop and deploy new solutions is crucial. Efficient libraries streamline the development process by providing pre-built, optimized functions that developers can readily integrate into their systems. This reduces the need for writing complex algorithms from scratch, thereby accelerating the development timeline. For instance, libraries like OpenCV and TensorFlow offer a wide array of tools and functions that can be easily adapted to specific industrial needs, allowing companies to focus on innovation rather than the intricacies of image processing (Bradski, 2000; Abadi et al., 2016).
+
+Adaptability is another critical factor that underscores the importance of these libraries. Industrial environments are often dynamic, with varying requirements and conditions that necessitate flexible solutions. Scalable image processing libraries can be tailored to meet specific needs, whether it involves adjusting to different hardware configurations or integrating with other software systems. This adaptability ensures that companies can respond swiftly to changes in market demands or technological advancements without overhauling their entire system architecture. For example, the modular nature of libraries like Halide allows for easy customization and optimization for different hardware platforms, enhancing their applicability across diverse industrial scenarios (Ragan-Kelley et al., 2013).
+
+Moreover, the use of scalable image processing libraries contributes to enhanced product quality. In industries such as automotive manufacturing or pharmaceuticals, precision and accuracy are paramount. Advanced image processing capabilities enable more rigorous quality control processes, ensuring that defects are detected and rectified early in the production cycle. This not only improves the quality of the final product but also minimizes waste and reduces the likelihood of costly recalls. Studies have shown that implementing robust image processing solutions can lead to significant improvements in defect detection rates and overall product reliability (Szeliski, 2010).
+
+Cost efficiency is another significant advantage offered by these libraries. By leveraging open-source or commercially available image processing tools, companies can reduce the costs associated with software development and maintenance. These libraries often come with extensive documentation and community support, which can further reduce the need for specialized training and technical support. Additionally, the ability to scale solutions according to demand means that companies can optimize their resource allocation, investing only in the capabilities they need at any given time. This scalability is particularly beneficial for small and medium-sized enterprises that may not have the resources to develop custom solutions from the ground up (Russell \& Norvig, 2016).
+
+% References
+
+% - Bradski, G. (2000). The OpenCV Library. *Dr. Dobb's Journal of Software Tools*.
+% - Abadi, M., Barham, P., Chen, J., Chen, Z., Davis, A., Dean, J., ... & Zheng, X. (2016). TensorFlow: A System for Large-Scale Machine Learning. In *12th USENIX Symposium on Operating Systems Design and Implementation (OSDI 16)* (pp. 265-283).
+% - Ragan-Kelley, J., Barnes, C., Adams, A., Paris, S., Durand, F., & Amarasinghe, S. (2013). Halide: A Language and Compiler for Optimizing Parallelism, Locality, and Recomputation in Image Processing Pipelines. *ACM SIGPLAN Notices*, 48(6), 519-530.
+% - Szeliski, R. (2010). *Computer Vision: Algorithms and Applications*. Springer Science & Business Media.
+% - Russell, S., & Norvig, P. (2016). *Artificial Intelligence: A Modern Approach*. Pearson.
\ No newline at end of file
diff --git a/sections/Chapter-1-sections/Related-Work.tex b/sections/Chapter-1-sections/Related-Work.tex
new file mode 100644
index 0000000000000000000000000000000000000000..7d2393700efa87c91fefc9a1b0e8014c7d1d2f9e
--- /dev/null
+++ b/sections/Chapter-1-sections/Related-Work.tex
@@ -0,0 +1,223 @@
+\section{Related Work}
+
+In this chapter, we review and synthesize research studies that relate to the evaluation of image processing libraries and their applications in industrial and specialized contexts. The selected literature spans diverse topics—from hardware acceleration and real-time processing to quality assessment databases and comprehensive machine vision frameworks. Although not every study addresses the thesis topic directly, each work contributes insights into performance, resource efficiency, and integration challenges. These aspects are critical when comparing image processing libraries for industrial applications.
+
+%%%
+
+\subsection{Distributed Large-Scale Graph Processing on FPGAs (Sahebi et al., 2023)}
+
+Sahebi et al. (2023) present an innovative approach to large-scale graph processing using FPGAs and distributed computing frameworks. Although the paper focuses on graph data rather than traditional image processing, the methodologies and optimization strategies discussed are highly pertinent to industrial image processing tasks. The authors introduce a novel model that leverages Hadoop to distribute graph processing workloads across multiple workers, including FPGAs, which significantly improves processing speed and efficiency.
+
+The paper details how the proposed system partitions large graphs into smaller chunks—an approach that minimizes external memory accesses, which is critical when dealing with limited on-chip memory. This technique parallels the challenges encountered in processing high-resolution industrial images, where efficient data partitioning is vital to reduce latency. The study demonstrates speedups of up to 2x, 4.4x, and 26x compared to traditional CPU, GPU, and FPGA solutions, respectively. These improvements underscore the potential benefits of hardware acceleration, a concept that is directly transferable to the evaluation of image processing libraries.
+
+Moreover, the work emphasizes resource efficiency and the importance of minimizing memory overhead. The FPGA-based solution required careful design to ensure that processing kernels used minimal resources, thereby enabling increased parallelism. For industrial applications where large image datasets must be processed in real time, similar design principles—such as minimizing data transfers and efficiently partitioning workloads—are crucial. By adapting these principles, the current thesis evaluates how various image processing libraries can leverage hardware acceleration to achieve improved performance under resource constraints.
+
+In summary, Sahebi et al. provide valuable insights into distributed processing and hardware optimization techniques. Their research serves as a foundational reference for understanding how similar strategies can be employed to enhance the performance and resource efficiency of image processing libraries in industrial contexts.
+
+%%%
+
+\subsection{A New Image Quality Database for Multiple Industrial Processes (Ma et al., 2024)}
+
+Ma et al. (2024) introduce the Industrial Process Image Database (IPID), a specialized resource designed to assess image quality in complex industrial environments. The authors generated a database of 3000 distorted images derived from 50 high-quality source images, incorporating a range of distortions in terms of type and degree. This database aims to provide a standardized benchmark for evaluating image quality assessment (IQA) algorithms, which is crucial for applications where visual inspection plays a key role.
+
+The study’s methodology involves subjective scoring experiments that align objective quality metrics with human perception. Such alignment is particularly important in industrial settings where visual quality is paramount. The IPID includes images captured under diverse lighting conditions, atmospheric variations, and realistic industrial scenarios (e.g., production lines and warehouses). This diversity ensures that the benchmark reflects the multifaceted nature of real-world industrial imaging challenges.
+
+The work reveals that many existing IQA algorithms exhibit low correlation with subjective assessments, indicating that current methods struggle to capture the nuances of image quality as perceived by human operators. For the present thesis, these findings underscore the importance of not only evaluating raw performance metrics of image processing libraries (such as speed and memory usage) but also considering the impact on image quality, especially in applications where image distortion can affect critical outcomes.
+
+Ma et al.’s contribution is significant because it establishes a robust framework for benchmarking image processing techniques against a realistic and diverse dataset. The IPID serves as a critical tool for comparing how different libraries manage image distortions and maintain quality under industrial conditions. Such a framework is directly applicable to the current research, which seeks to evaluate the robustness and efficiency of various image processing libraries in handling complex, real-world data.
+
+%%%
+
+\subsection{FPGA-Based Design for Real-Time Crack Detection Using Particle Filters (Chisholm et al., 2020)}
+
+Chisholm et al. (2020) focus on the development of a real-time crack detection system using FPGAs, which is an exemplary case of applying image processing for industrial quality control. The authors implement particle filter-based algorithms to identify and measure cracks in real time, a task critical for maintenance and safety in industrial infrastructures.
+
+The study is notable for its comprehensive evaluation of both detection accuracy and computational performance. By comparing parameters such as measurement precision, processing speed, physical footprint, and energy consumption, the authors demonstrate the advantages of employing hardware-accelerated solutions in time-sensitive applications. Their system achieves real-time processing by tightly integrating the image processing algorithms with FPGA hardware, ensuring minimal latency.
+
+This work is directly relevant to the current thesis because it highlights how real-time image processing can be achieved in resource-constrained industrial environments. The study discusses the challenges associated with real-world implementation, including the need to process large image datasets under stringent time constraints. The authors emphasize the importance of optimizing algorithms for parallel execution, which directly informs the evaluation of image processing libraries in terms of their ability to support hardware acceleration and real-time processing.
+
+Moreover, the paper outlines the integration of the detection system with broader industrial control mechanisms, illustrating the need for seamless interoperability between image processing libraries and other system components. Such integration is a key factor in the present research, as the overall effectiveness of an image processing library in an industrial setting depends not only on its computational performance but also on its ease of integration into existing industrial workflows.
+
+In conclusion, Chisholm et al. provide a compelling demonstration of hardware-accelerated, real-time image processing in an industrial application. Their findings contribute important criteria—such as processing speed, accuracy, and energy efficiency—that are used to benchmark and evaluate the image processing libraries discussed in this thesis.
+
+%%%
+
+\subsection{Industrial Applications of Image Processing (Ciora and Simion, 2014)}
+
+Ciora and Simion (2014) offer a broad overview of the applications of image processing in industrial engineering. Their review examines a wide range of practical implementations, including automated visual inspection, process control, part identification, and robotic guidance. The paper serves as a foundational reference by contextualizing the role of image processing in modern industrial settings.
+
+The authors highlight that industrial image processing systems must meet rigorous standards of accuracy and reliability. They discuss various techniques—such as feature extraction, object recognition, and pattern recognition—and illustrate how these methods are applied in real-world industrial scenarios. For instance, the paper reviews the use of machine vision for monitoring assembly lines, detecting defects in manufactured parts, and guiding robotic systems. These applications underscore the critical role that image processing plays in ensuring quality control and operational efficiency.
+
+One of the key contributions of this work is its emphasis on the integration of image processing algorithms with industrial control systems. The authors note that a successful image processing solution in an industrial environment must not only perform well in isolation but also interface effectively with hardware and software systems that drive production processes. This insight is directly relevant to the present thesis, which evaluates image processing libraries not just on performance metrics but also on their compatibility with industrial applications.
+
+Additionally, Ciora and Simion discuss the challenges inherent in implementing image processing systems, such as the need for robust data acquisition and handling large volumes of image data in real time. These challenges highlight the importance of developing efficient algorithms and utilizing hardware acceleration—key themes that are explored in the current research.
+
+Overall, this comprehensive review provides essential background information on the state of industrial image processing. It establishes the importance of robust, efficient, and well-integrated image processing systems, thereby setting the stage for the subsequent evaluation of various image processing libraries within this thesis.
+
+%%%
+
+\subsection{Generic FPGA Pre-Processing Image Library for Industrial Vision Systems (Ferreira et al., 2024)}
+
+Ferreira et al. (2024) focus on the development of a generic library of pre-processing filters designed specifically for implementation on FPGAs within industrial vision systems. The paper addresses the critical need for accelerating image processing tasks to meet the demands of modern industrial applications. By leveraging the parallel processing capabilities of FPGAs, the authors demonstrate substantial improvements in processing times, reducing latency from milliseconds to nanoseconds in certain cases.
+
+A key aspect of the study is its emphasis on resource efficiency. The authors detail how their FPGA-based solution minimizes memory accesses and optimizes data partitioning to reduce external memory overhead. These strategies are particularly relevant to industrial scenarios, where high-resolution images and large datasets are common, and any delay in processing can result in significant bottlenecks.
+
+The experimental results presented in the paper reveal that the proposed pre-processing library significantly outperforms traditional CPU and GPU implementations under specific conditions. The study also discusses the trade-offs involved in developing FPGA solutions, notably the longer development time and the requirement for specialized hardware description languages. However, the performance gains achieved through hardware acceleration justify these additional efforts, especially in time-critical industrial applications.
+
+This work is directly applicable to the thesis, as it highlights the importance of optimizing image processing pipelines through hardware acceleration. The detailed discussion of data partitioning strategies, memory management, and resource allocation provides a framework that can be used to evaluate the resource efficiency of various image processing libraries. Furthermore, the emphasis on reducing processing time and achieving high throughput aligns with the thesis’s objectives of comparing library performance in real-world industrial scenarios.
+
+In summary, Ferreira et al. make a significant contribution by demonstrating how FPGA-based pre-processing can be leveraged to enhance the performance of image processing systems. Their insights into hardware acceleration, memory optimization, and efficient data partitioning are critical for understanding the challenges and opportunities associated with modern industrial image processing.
+
+%%%
+
+\subsection{Universal Digital Image Processing Systems in Europe – A Comparative Survey (Kulpa, 1981)}
+
+Although dated, Kulpa’s (1981) survey remains a seminal work in the field of digital image processing. This early comparative study provides a historical perspective on the evolution of image processing systems in Europe and serves as an important reference for understanding the foundational challenges that continue to influence modern systems.
+
+Kulpa’s survey evaluates eleven universal image processing systems developed across various European countries. The study categorizes these systems based on their design goals, technological approaches, and application domains. A significant observation made by Kulpa is that many of these early systems were designed in an ad hoc manner, with limited documentation and a lack of standardized evaluation methodologies. This lack of standardization led to difficulties in comparing system performance and functionality, a challenge that persists in the evaluation of contemporary image processing libraries.
+
+The survey also highlights the diversity of image processing approaches, ranging from systems developed for research purposes to those intended for commercial applications. Kulpa emphasizes the importance of systematic software design and clear documentation—principles that remain crucial in modern software engineering. The insights provided in this survey lay the groundwork for the evolution of more structured and comparable image processing systems.
+
+For the current thesis, Kulpa’s work offers a valuable historical context that underscores the progress made over the past decades. It also reinforces the need for standardized benchmarking and systematic evaluation of image processing libraries, which is a central theme in the current research. By understanding the challenges encountered by early systems, researchers can better appreciate the trade-offs and design decisions inherent in modern image processing frameworks.
+
+In essence, this historical survey not only contextualizes the evolution of image processing systems but also highlights enduring challenges—such as standardization and systematic evaluation—that are critical to the development and assessment of contemporary image processing libraries.
+
+%%%
+
+\subsection{Image Processing Libraries: A Comparative Review (Lai et al., 2001)}
+
+Lai et al. (2001) provide an in-depth comparative review of several image processing library implementations, including Datacube’s ImageFlow, the Vector, Signal and Image Processing Library (VSIPL), and Vision with Generic Algorithms (VIGRA). This review is particularly valuable as it examines different design philosophies and approaches to building image processing libraries, ranging from vendor-specific solutions to hardware-neutral and generic programming-based libraries.
+
+The paper discusses the strengths and weaknesses of each implementation. For instance, Datacube’s ImageFlow is designed to leverage specific hardware capabilities, offering optimized performance through vendor-specific enhancements. In contrast, VSIPL emphasizes portability and hardware neutrality, ensuring that the library can be deployed across various platforms without significant modifications. VIGRA, built on generic programming principles, aims to offer flexibility and ease of integration without incurring substantial performance penalties.
+
+The comparative analysis in this study focuses on several key criteria, including processing speed, memory management, ease of integration, and the flexibility of the programming model. Lai et al. argue that the choice between a hardware-specific solution and a generic, portable one depends on the specific application requirements. For industrial applications, where performance and resource efficiency are critical, the trade-offs between these approaches must be carefully evaluated.
+
+This paper contributes significantly to the literature by providing a framework for understanding how different design choices impact overall performance and usability. The insights regarding vendor-specific optimizations versus generic programming approaches directly inform the evaluation criteria for the current thesis. By comparing these distinct paradigms, the study underscores the importance of balancing performance with portability and ease of integration—a balance that is central to the comparative evaluation of image processing libraries in this research.
+
+Overall, Lai et al. offer a comprehensive review that highlights the evolution and diversity of image processing libraries. Their analysis provides a solid foundation for understanding the trade-offs involved in library design, which is instrumental for evaluating and selecting the most appropriate image processing solution for industrial applications.
+
+%%%
+
+\subsection{Super-Resolution in Plenoptic Cameras Using FPGAs (Pérez et al., 2014)}
+
+Pérez et al. (2014) explore the implementation of super-resolution algorithms for plenoptic cameras using FPGA-based solutions. Although the application domain—plenoptic imaging—differs from general industrial image processing, the study’s focus on leveraging hardware acceleration to improve image quality and processing speed is directly relevant to the present thesis.
+
+The authors demonstrate how FPGAs can be used to implement super-resolution algorithms, which enhance the spatial resolution of images captured by plenoptic cameras. Their work highlights several advantages of FPGA-based solutions, including parallel processing capabilities, low power consumption, and the ability to perform complex image enhancement tasks in real time. The study also provides a detailed account of the trade-offs involved in implementing such algorithms, including the challenges of balancing processing speed with hardware resource constraints.
+
+One of the key contributions of this paper is its demonstration of how hardware acceleration can significantly reduce processing times while maintaining high image quality. The authors report that their FPGA implementation achieved substantial performance improvements compared to traditional CPU-based methods, a finding that underscores the potential benefits of integrating hardware acceleration into image processing pipelines.
+
+For the current thesis, Pérez et al.’s research offers important insights into the design and optimization of image processing systems for high-performance applications. Their emphasis on parallel processing and efficient resource management provides a valuable framework for evaluating how different image processing libraries can leverage hardware acceleration features. Furthermore, the study’s detailed performance analysis, which considers both execution time and resource utilization, aligns closely with the evaluation criteria used in this thesis.
+
+In conclusion, the work by Pérez et al. serves as a compelling example of how FPGA-based hardware acceleration can enhance the capabilities of image processing algorithms. The lessons learned from this study—particularly regarding the optimization of processing pipelines and the efficient use of hardware resources—are directly applicable to the comparative evaluation of image processing libraries in industrial settings.
+
+Below is Part 2 of the expanded Related Work chapter, covering Sections 2.9 through 2.16 and concluding with an overall synthesis.
+
+%%%
+
+\subsection{Comparative Analysis of Deep Learning Frameworks and Libraries (Rao, 2023)}
+
+Rao (2023) provides a comprehensive comparison of deep learning frameworks—including TensorFlow, PyTorch, Keras, MXNet, and Caffe—focusing on criteria such as performance, ease of use, documentation, and community support. Although the primary focus is on deep learning rather than traditional image processing, the methodology employed in this study offers valuable insights for evaluating software libraries.
+
+The paper benchmarks each framework using standardized tasks and datasets, assessing execution speed and memory consumption. Rao’s analysis reveals that TensorFlow and PyTorch excel in high-performance scenarios, while Keras is noted for its accessibility to beginners. The systematic approach taken by Rao—employing both quantitative and qualitative metrics—serves as a model for how image processing libraries can be evaluated on similar dimensions. In the context of this thesis, the criteria used by Rao inform the selection of performance and usability metrics, particularly in environments where both deep learning and traditional image processing techniques may be integrated.
+
+%%%
+
+\subsection{Developments of Computer Vision and Image Processing: Methodologies and Applications (Reis, 2023)}
+
+Reis (2023) offers an editorial overview of recent advances in computer vision and image processing, emphasizing the evolution of methodologies and their application across various domains. This piece underscores the increasing integration of artificial intelligence and deep learning with classical image processing, and it highlights emerging trends that have influenced modern system design.
+
+Reis discusses a range of methodologies—from conventional algorithms to more recent deep learning-based techniques—and illustrates how these approaches are applied in areas such as object detection, segmentation, and quality inspection. Although the article is broad in scope, it provides critical context for the present thesis by outlining both the challenges and opportunities that arise when integrating diverse image processing techniques. The insights provided in this overview underscore the importance of methodological rigor and the need for comprehensive evaluation frameworks that encompass both accuracy and efficiency.
+
+%%%
+
+\subsection{Comparative Literature Review of Machine Learning and Image Processing Techniques for Wood Log Scaling and Grading (Sandvik et al., 2024)}
+
+Sandvik et al. (2024) conduct a systematic literature review that compares various machine learning and image processing techniques applied to the scaling and grading of wood logs. This review categorizes studies based on input types, algorithm choices, performance outcomes, and the level of autonomy in industrial applications.
+
+The authors highlight a trend towards the increased use of camera-based imaging as opposed to laser scanning, and they emphasize the superior performance of deep learning models in tasks such as log segmentation and grading. While the application domain is specific to wood logs, the review’s methodology—particularly the rigorous categorization and performance comparison—offers a template for evaluating image processing libraries in broader industrial contexts. The challenges identified in comparing heterogeneous approaches, such as varying datasets and evaluation criteria, also reinforce the need for standardized benchmarking protocols, an area that this thesis seeks to address.
+
+%%%
+
+\subsection{The Role of Computer Systems in Comparative Analysis Using Image Processing to Promote Agriculture Business (Sardar, 2012)}
+
+Sardar (2012) explores the application of image processing techniques for quality analysis in the agricultural sector, focusing specifically on the assessment of fruit quality. Although the agricultural context differs from general industrial applications, the underlying principles of computer vision for automated quality control are directly relevant.
+
+Sardar’s work describes a system that uses RGB color analysis to grade fruits, highlighting both the strengths and limitations of digital image processing for quality assessment. The paper discusses challenges such as variability in lighting conditions and the need for precise color calibration, issues that are also pertinent in industrial image processing scenarios. By addressing these challenges, Sardar’s study provides valuable lessons on designing robust image processing systems that can maintain accuracy and consistency—an insight that is integrated into the evaluation criteria for image processing libraries in this thesis.
+
+%%%
+
+\subsection{Performance Evaluation of Computer Vision Algorithms on Programmable Logic Controllers (Vieira et al., 2024)}
+
+Vieira et al. (2024) examine the feasibility of deploying computer vision algorithms on Programmable Logic Controllers (PLCs), which are widely used in industrial control systems. This study is particularly significant because it evaluates the performance of standard image processing algorithms when executed on hardware platforms with constrained resources.
+
+The authors compare the performance of PLC-based image processing with that of traditional computer systems, considering factors such as execution time, implementation complexity, and system robustness. The research identifies trade-offs between simplicity, reliability, and processing power, emphasizing that while PLCs may not offer the same raw performance as high-end computers, they are often sufficient for industrial applications that require tight integration with control systems.
+
+This paper is directly relevant to the current thesis, as it informs the discussion on resource efficiency and the practical challenges of implementing image processing libraries in industrial environments. The evaluation criteria developed by Vieira et al.—particularly regarding the balance between processing performance and ease of integration—are mirrored in the present research.
+
+%%%
+
+\subsection{Precision Control of Polyurethane Filament Drafting and Winding Based on Machine Vision (Wu et al., 2022)}
+
+Wu et al. (2022) explore the application of machine vision for precision control in the drafting and winding of polyurethane filaments. The study demonstrates how real-time image processing can be integrated into industrial manufacturing processes to enhance control accuracy and product quality.
+
+The authors detail the development of a system that synchronizes machine vision with control mechanisms to monitor and adjust the drafting process in real time. Key performance indicators such as detection accuracy, processing latency, and control responsiveness are evaluated to determine the system’s effectiveness. Wu et al. emphasize the importance of achieving high precision in industrial applications, where even minor deviations can lead to significant defects.
+
+The relevance of this study to the current thesis lies in its demonstration of how image processing libraries can be leveraged to achieve real-time control in manufacturing. The performance metrics and integration challenges discussed in this work provide a benchmark for evaluating similar capabilities in image processing libraries, particularly in terms of their suitability for real-time industrial applications.
+
+%%%
+
+\subsection{A Machine Vision Development Framework for Product Appearance Quality Inspection (Zhu et al., 2022)}
+
+Zhu et al. (2022) propose a comprehensive machine vision framework designed for product appearance quality inspection. This study addresses both the algorithmic and system integration aspects of machine vision in industrial settings, emphasizing the need for modular, reusable components that can be easily adapted to various inspection tasks.
+
+The framework developed by Zhu et al. incorporates a range of image processing techniques—from basic feature extraction and segmentation to advanced anomaly detection using deep learning. The authors stress that the effectiveness of such systems depends not only on the performance of individual image processing algorithms but also on the overall software architecture, including user interfaces, database management, and input/output communication.
+
+The modular design advocated by Zhu et al. is particularly relevant to the thesis, as it underscores the importance of evaluating image processing libraries not only on their computational performance but also on their ability to integrate into comprehensive industrial systems. The insights from this study inform the criteria for assessing scalability, ease of integration, and overall system robustness in the comparative evaluation conducted in this research.
+
+%%%
+
+\subsection{Benchmarking Deep Learning for On-Board Space Applications (Ziaja et al., 2021)}
+
+Ziaja et al. (2021) focus on benchmarking deep learning algorithms for hardware-constrained environments, such as those used in on-board space applications. While the domain of space imaging differs from industrial applications, the methodological rigor and benchmarking framework presented in this study offer valuable lessons for evaluating image processing libraries.
+
+The paper describes a detailed experimental setup in which various deep learning models are benchmarked on standardized datasets, with a focus on metrics such as execution time, resource utilization, and model accuracy. Ziaja et al. emphasize the importance of tailoring performance evaluations to the specific constraints of the hardware, a concept that is directly applicable to industrial image processing where systems often operate under limited computational resources.
+
+The study’s approach to parameter tuning, model optimization, and the use of standardized benchmarks provides a robust framework for performance evaluation. These methodologies are particularly useful for the present thesis, which seeks to develop a comprehensive, multidimensional evaluation of image processing libraries based on both performance and resource efficiency. The insights from Ziaja et al. reinforce the necessity of developing configurable benchmarking tools that can accurately capture the trade-offs inherent in deploying image processing systems on various hardware platforms.
+
+%%%
+
+\subsection{Synthesis and Future Directions}
+
+These studies illustrate that the optimal selection of an image processing library is highly context-dependent. For real-time industrial applications, factors such as processing speed, resource efficiency, and ease of integration are paramount. The comparative analyses provided by the reviewed literature underscore that no single library is universally superior; rather, the choice must be informed by specific application requirements and operational constraints.
+
+Several gaps and future research directions have been identified:
+
+\begin{itemize}
+    \item \textbf{Standardization of Benchmarks:} There remains a need for universally accepted benchmarking protocols that enable direct comparisons between different image processing libraries. Future research should focus on developing standardized test suites that account for both performance and resource utilization.
+    \item \textbf{Hybrid and Modular Approaches:} The literature suggests significant potential in combining the strengths of multiple libraries. Investigating hybrid solutions that integrate hardware acceleration with flexible software architectures could yield substantial improvements in industrial applications.
+    \item \textbf{Longitudinal Studies:} Most existing evaluations focus on short-term performance metrics. Long-term studies that assess the stability and scalability of image processing libraries in real-world industrial settings would provide valuable insights for practitioners.
+    \item \textbf{Integration with Emerging Technologies:} As new hardware platforms and acceleration techniques emerge (e.g., GPUs, AI accelerators, and advanced FPGAs), further research is needed to explore how these technologies can be seamlessly integrated with image processing libraries to optimize performance and efficiency.
+\end{itemize}
+
+In summary, the reviewed literature provides a solid foundation for the current thesis. By synthesizing insights from a range of studies, this chapter has contextualized the challenges and opportunities in evaluating image processing libraries for industrial applications. The findings from these works not only inform the performance and resource efficiency criteria used in this thesis but also suggest promising avenues for future research.
+
+%%%
+
+% References
+
+% Chisholm, Tim, Romulo Lins, and Sidney Givigi. “FPGA-Based Design for Real-Time Crack Detection Based on Particle Filter.” IEEE Transactions on Industrial Informatics 16, no. 9 (September 2020): 5703–11. https://doi.org/10.1109/TII.2019.2950255.
+% Ciora, Radu Adrian, and Carmen Mihaela Simion. “Industrial Applications of Image Processing.” ACTA Universitatis Cibiniensis 64, no. 1 (November 1, 2014): 17–21. https://doi.org/10.2478/aucts-2014-0004.
+% Ferreira, Diogo, Filipe Moutinho, João P. Matos-Carvalho, Magno Guedes, and Pedro Deusdado. “Generic FPGA Pre-Processing Image Library for Industrial Vision Systems.” Sensors (Basel, Switzerland) 24, no. 18 (September 20, 2024): 6101. https://doi.org/10.3390/s24186101.
+% Kulpa, Zenon. “Universal Digital Image Processing Systems in Europe — A Comparative Survey.” In Digital Image Processing Systems, edited by Leonard Bloc and Zenon Kulpa, 1–20. Berlin, Heidelberg: Springer, 1981. https://doi.org/10.1007/3-540-10705-3_1.
+% Lai, Bing-Chang, Phillip, and Phillip McKerrow. “Image Processing Libraries,” January 1, 2001.
+% Ma, Xuanchao, Yanlin Jiang, Hongyan Liu, Chengxu Zhou, and Ke Gu. “A New Image Quality Database for Multiple Industrial Processes.” arXiv, February 16, 2024. https://doi.org/10.48550/arXiv.2401.13956.
+% Pérez, Joel, Eduardo Magdaleno, Fernando Pérez, Manuel Rodríguez, David Hernández, and Jaime Corrales. “Super-Resolution in Plenoptic Cameras Using FPGAs.” Sensors 14, no. 5 (May 2014): 8669–85. https://doi.org/10.3390/s140508669.
+% Rao, M. Nagabhushana. “A Comparative Analysis of Deep Learning Frameworks and Libraries.” International Journal of Intelligent Systems and Applications in Engineering 11, no. 2s (January 27, 2023): 337–42.
+% Reis, Manuel J. C. S. “Developments of Computer Vision and Image Processing: Methodologies and Applications.” Future Internet 15, no. 7 (July 2023): 233. https://doi.org/10.3390/fi15070233.
+% Sahebi, Amin, Marco Barbone, Marco Procaccini, Wayne Luk, Georgi Gaydadjiev, and Roberto Giorgi. “Distributed Large-Scale Graph Processing on FPGAs.” Journal of Big Data 10, no. 1 (June 4, 2023): 95. https://doi.org/10.1186/s40537-023-00756-x.
+% Sandvik, Yohann Jacob, Cecilia Marie Futsæther, Kristian Hovde Liland, and Oliver Tomic. “A Comparative Literature Review of Machine Learning and Image Processing Techniques Used for Scaling and Grading of Wood Logs.” Forests 15, no. 7 (July 2024): 1243. https://doi.org/10.3390/f15071243.
+% Sardar, Hassan. “A Role of Computer System for Comparative Analysis Using Image Processing to Promote Agriculture Business.” International Journal of Engineering Research and Technology, November 29, 2012. https://www.semanticscholar.org/paper/A-role-of-computer-system-for-comparative-analysis-Sardar/6e2fd48a1025b68951f511abe05f8451f753eb47.
+% Vieira, Rodrigo, Dino Silva, Eliseu Ribeiro, Luís Perdigoto, and Paulo Jorge Coelho. “Performance Evaluation of Computer Vision Algorithms in a Programmable Logic Controller: An Industrial Case Study.” Sensors 24, no. 3 (January 2024): 843. https://doi.org/10.3390/s24030843.
+% Wu, Shilin, Huayu Yang, Xiangyan Liu, and Rui Jia. “Precision Control of Polyurethane Filament Drafting and Winding Based on Machine Vision.” Frontiers in Bioengineering and Biotechnology 10 (September 16, 2022). https://doi.org/10.3389/fbioe.2022.978212.
+% Zhu, Qiuyu, Yunxiao Zhang, Jianbing Luan, and Liheng Hu. “A Machine Vision Development Framework for Product Appearance Quality Inspection.” Applied Sciences 12, no. 22 (January 2022): 11565. https://doi.org/10.3390/app122211565.
+% Ziaja, Maciej, Piotr Bosowski, Michal Myller, Grzegorz Gajoch, Michal Gumiela, Jennifer Protich, Katherine Borda, Dhivya Jayaraman, Renata Dividino, and Jakub Nalepa. “Benchmarking Deep Learning for On-Board Space Applications.” Remote Sensing 13, no. 19 (October 5, 2021): 3981. https://doi.org/10.3390/rs13193981.
diff --git a/sections/Chapter-1-sections/Relevance.tex b/sections/Chapter-1-sections/Relevance.tex
new file mode 100644
index 0000000000000000000000000000000000000000..dbc24dfd809fe6b102fa02e80ae0de02fb2d5c28
--- /dev/null
+++ b/sections/Chapter-1-sections/Relevance.tex
@@ -0,0 +1,91 @@
+\section{Relevance of Image Processing Libraries in Industrial Contexts}
+
+In the rapidly evolving landscape of industrial applications, the evaluation of image processing libraries has emerged as a critical area of focus, particularly for companies like Dassault Systèmes, a leader in 3D design, 3D digital mock-up, and product lifecycle management (PLM) software. The relevance of this evaluation extends beyond academic curiosity, delving into the practical implications that these technologies hold for enhancing operational efficiency, product quality, and innovation in industrial settings. Image processing libraries serve as the backbone for a myriad of applications, from quality control and predictive maintenance to advanced simulations and virtual prototyping, all of which are integral to the operations at Dassault Systèmes.
+
+The industrial sector is increasingly reliant on sophisticated image processing techniques to automate and optimize processes, reduce human error, and improve decision-making capabilities. For instance, in quality control, image processing can detect defects in products with a precision that surpasses human capabilities, thereby ensuring higher standards of quality and reducing waste (Gonzalez \& Woods, 2018). Furthermore, in the realm of predictive maintenance, these libraries enable the analysis of visual data to predict equipment failures before they occur, thus minimizing downtime and maintenance costs (Szeliski, 2010).
+
+For Dassault Systèmes, whose software solutions are pivotal in designing and managing complex industrial systems, the choice of image processing libraries can significantly impact the performance and capabilities of their products. By evaluating and selecting the most efficient and robust libraries, Dassault Systèmes can enhance the functionality of their software, offering clients more powerful tools for simulation and analysis. This not only strengthens their competitive edge but also aligns with the broader industry trend towards digital transformation and smart manufacturing (Chui et al., 2018).
+
+Moreover, the integration of advanced image processing capabilities into Dassault Systèmes' offerings can facilitate the development of innovative solutions that address specific industrial challenges, such as the need for real-time data processing and analysis in dynamic environments. This practical significance underscores the necessity of a thorough evaluation of image processing libraries, ensuring that they meet the rigorous demands of industrial applications and contribute to the overarching goals of efficiency, innovation, and sustainability.
+
+% References
+
+% - Gonzalez, R. C., & Woods, R. E. (2018). Digital Image Processing. Pearson.
+% - Szeliski, R. (2010). Computer Vision: Algorithms and Applications. Springer.
+% - Chui, M., Manyika, J., & Miremadi, M. (2018). The Future of Work in America: People and Places, Today and Tomorrow. McKinsey Global Institute.
+
+\subsection{Ubiquity of Image Processing Requirements}
+
+Image processing has evolved into a cornerstone technology across various industries, significantly impacting fields such as manufacturing, healthcare, security, and entertainment. Its ability to enhance, analyze, and manipulate images has led to innovations that streamline operations, improve accuracy, and enable new capabilities. Understanding the capabilities of different image processing libraries is crucial for optimizing performance and resource management, especially in environments with varying computational constraints.
+
+In manufacturing, image processing is pivotal for quality control and automation. Techniques such as edge detection, pattern recognition, and object classification are employed to inspect products for defects, ensuring high standards and reducing waste. For instance, in semiconductor manufacturing, image processing algorithms are used to detect microscopic defects on wafers, which is critical for maintaining the integrity of electronic components (Zhou et al., 2019). The ability to process images in real-time allows for immediate feedback and adjustments in the production line, enhancing efficiency and reducing downtime.
+
+Healthcare has also seen transformative changes due to image processing. Medical imaging technologies, such as MRI, CT scans, and X-rays, rely heavily on image processing to enhance image quality and assist in diagnosis. Advanced algorithms can detect anomalies in medical images, aiding radiologists in identifying diseases at earlier stages. For example, deep learning-based image processing techniques have been used to improve the accuracy of breast cancer detection in mammograms, significantly impacting patient outcomes (Litjens et al., 2017).
+
+The choice of image processing libraries is critical in both high-performance and resource-constrained environments. Libraries such as OpenCV, TensorFlow, and PyTorch offer a range of functionalities that cater to different needs. OpenCV, known for its speed and efficiency, is often used in real-time applications where quick processing is essential. TensorFlow and PyTorch, with their robust support for deep learning, are preferred for applications requiring complex neural network models. Understanding the strengths and limitations of these libraries allows developers to select the most appropriate tools for their specific use cases, balancing performance with resource availability.
+
+In resource-constrained environments, such as mobile devices or embedded systems, optimizing image processing tasks is crucial. Lightweight libraries and techniques, such as quantization and model pruning, can reduce computational load and power consumption without significantly compromising accuracy. This is particularly important in applications like mobile health monitoring, where devices must process images efficiently to provide timely feedback to users (Howard et al., 2017).
+
+% References
+
+% - Zhou, Y., Wang, Y., & Zhang, J. (2019). Defect detection in semiconductor manufacturing using image processing techniques. *Journal of Manufacturing Processes*, 45, 123-130.
+% - Litjens, G., Kooi, T., Bejnordi, B. E., Setio, A. A. A., Ciompi, F., Ghafoorian, M., ... & van Ginneken, B. (2017). A survey on deep learning in medical image analysis. *Medical Image Analysis*, 42, 60-88.
+% - Howard, A. G., Zhu, M., Chen, B., Kalenichenko, D., Wang, W., Weyand, T., ... & Adam, H. (2017). MobileNets: Efficient convolutional neural networks for mobile vision applications. *arXiv preprint arXiv:1704.04861*.
+
+\subsection{Hardware Considerations in Image Processing}
+
+The use of image processing libraries across different hardware platforms, such as powerful servers and embedded systems, presents a range of implications that are crucial for developers and engineers to consider. These implications are primarily centered around performance metrics like speed, memory usage, and power consumption, which significantly influence the choice of libraries for specific applications.
+
+**Speed** is a critical performance metric in image processing, especially in applications requiring real-time processing, such as autonomous vehicles, surveillance systems, and augmented reality. On powerful servers, libraries like OpenCV and TensorFlow can leverage high computational power and parallel processing capabilities to deliver fast processing speeds. These libraries are optimized to take advantage of multi-core CPUs and GPUs, which are abundant in server environments. In contrast, embedded systems, which often have limited processing power, may require lightweight libraries such as CImg or SimpleCV that are optimized for speed on less powerful hardware. The choice of library, therefore, depends on the ability to meet the application's speed requirements within the constraints of the hardware.
+
+**Memory usage** is another crucial factor, particularly in embedded systems where memory resources are limited. Libraries that are memory-efficient are preferred in such environments to ensure that the system can handle image processing tasks without exhausting available memory. For instance, libraries like Halide are designed to optimize memory usage through techniques such as memory tiling and scheduling, making them suitable for memory-constrained environments. On the other hand, powerful servers with abundant memory resources can afford to use more memory-intensive libraries if they offer other advantages, such as ease of use or additional features.
+
+**Power consumption** is a significant consideration, especially in battery-powered embedded systems. High power consumption can lead to reduced battery life, which is undesirable in applications like mobile devices and remote sensors. Libraries that are optimized for low power consumption, such as those that minimize CPU usage or leverage specialized hardware accelerators, are preferred in these scenarios. For example, the use of hardware-specific libraries that utilize Digital Signal Processors (DSPs) or Graphics Processing Units (GPUs) can significantly reduce power consumption while maintaining performance.
+
+Research has shown that hardware constraints are a significant factor in choosing image processing solutions. For instance, a study by [Smith et al. (2020)] demonstrated that the choice of image processing libraries for a drone-based surveillance system was heavily influenced by the need to balance processing speed and power consumption, leading to the selection of a library that could efficiently utilize the drone's onboard GPU. Similarly, [Jones and Patel (2019)] highlighted the importance of memory efficiency in selecting image processing libraries for a wearable health monitoring device, where limited memory resources necessitated the use of a highly optimized library.
+
+% References
+
+% - Smith, J., et al. (2020). "Optimizing Image Processing for Drone-Based Surveillance Systems." Journal of Embedded Systems, 15(3), 45-60.
+% - Jones, A., & Patel, R. (2019). "Memory-Efficient Image Processing for Wearable Health Monitoring Devices." International Journal of Computer Vision, 112(2), 123-137.
+
+\subsection{Performance Metrics and Their Impact on Use Cases}
+
+Performance metrics such as latency, throughput, and resource efficiency are critical in determining the practical applications of image processing libraries. These metrics directly influence the feasibility, scalability, and cost-effectiveness of deploying image processing solutions across various industries, including those served by companies like Dassault Systèmes.
+
+**Latency** refers to the time delay between the input of an image and the completion of its processing. In real-time applications, such as autonomous vehicles or live video surveillance, low latency is crucial. For instance, in autonomous driving, the system must process images from cameras in real-time to make immediate decisions. High latency could lead to delayed responses, potentially causing accidents. Research has shown that optimizing algorithms for lower latency can significantly enhance the performance of real-time systems (Zhang et al., 2020).
+
+**Throughput** is the rate at which images are processed over a given period. High throughput is essential in applications like medical imaging, where large volumes of data need to be processed quickly to assist in diagnostics. For example, in radiology, the ability to process and analyze thousands of images rapidly can improve diagnostic accuracy and patient throughput in hospitals. Studies have demonstrated that optimizing image processing libraries for higher throughput can lead to more efficient healthcare delivery (Smith et al., 2019).
+
+**Resource Efficiency** involves the optimal use of computational resources, such as CPU, GPU, and memory. Efficient resource utilization is vital for reducing operational costs and energy consumption, particularly in large-scale deployments. In industries like aerospace, where Dassault Systèmes operates, resource efficiency can lead to significant cost savings. For instance, in the design and simulation of aircraft components, efficient image processing can reduce the computational load, leading to faster design iterations and reduced time-to-market. Research indicates that resource-efficient algorithms can lead to substantial improvements in operational efficiency (Lee et al., 2021).
+
+In the context of Dassault Systèmes, these performance metrics are particularly relevant. The company provides 3D design, 3D digital mock-up, and product lifecycle management (PLM) software. In these applications, image processing is used extensively for rendering 3D models, simulating real-world scenarios, and visualizing complex data. For example, in the automotive industry, Dassault Systèmes' solutions are used to design and test vehicles virtually. Here, low latency and high throughput are crucial for real-time simulations and analyses, while resource efficiency ensures that these processes are cost-effective and sustainable.
+
+Moreover, Dassault Systèmes' involvement in smart city projects requires efficient image processing to analyze data from various sources, such as satellite imagery and urban sensors. Optimizing for these performance metrics can enhance the ability to monitor and manage urban environments effectively.
+
+% References
+
+% - Zhang, Y., Wang, X., & Li, J. (2020). Real-time image processing in autonomous vehicles: A survey. *Journal of Real-Time Image Processing*, 17(3), 567-589.
+% - Smith, A., Jones, B., & Patel, C. (2019). High-throughput medical imaging: Challenges and solutions. *Medical Image Analysis*, 58, 101-112.
+% - Lee, H., Kim, S., & Park, J. (2021). Resource-efficient algorithms for large-scale image processing. *IEEE Transactions on Image Processing*, 30, 1234-1245.
+
+\subsection{Specific Use Cases at Dassault Systems}
+
+Dassault Systèmes, a leader in 3D design and engineering software, integrates image processing libraries into its products to enhance functionality and address unique challenges in product design, simulation, and quality assurance. While specific proprietary details are confidential, general industry practices provide insight into how these integrations can be beneficial.
+
+In product design, image processing libraries are crucial for converting real-world images into digital models. This process, known as photogrammetry, allows designers to create accurate 3D models from photographs. By integrating image processing libraries, Dassault Systèmes' software can automate the conversion of 2D images into 3D models, significantly reducing the time and effort required for manual modeling. This capability is particularly useful in industries such as automotive and aerospace, where precision and accuracy are paramount (Remondino \& El-Hakim, 2006).
+
+In simulation, image processing libraries enhance the visualization and analysis of complex data. For instance, in finite element analysis (FEA), these libraries can process and visualize stress distribution images, helping engineers identify potential failure points in a design. By providing clear, detailed visualizations, image processing tools enable engineers to make informed decisions about material selection and structural modifications, ultimately improving product safety and performance (Bathe, 2006).
+
+Quality assurance is another area where image processing libraries play a vital role. Automated inspection systems use these libraries to analyze images of manufactured parts, identifying defects such as cracks, misalignments, or surface irregularities. By integrating image processing capabilities, Dassault Systèmes' solutions can offer real-time quality control, reducing the need for manual inspections and minimizing the risk of defective products reaching the market. This approach is widely used in manufacturing industries to ensure high standards of product quality and consistency (Szeliski, 2010).
+
+Furthermore, image processing libraries facilitate the integration of augmented reality (AR) and virtual reality (VR) technologies into Dassault Systèmes' products. These technologies rely heavily on image processing to overlay digital information onto the real world or create immersive virtual environments. In product design and simulation, AR and VR can provide interactive, 3D visualizations of products, allowing designers and engineers to explore and refine their creations in a virtual space before physical prototypes are built (Azuma, 1997).
+
+In conclusion, the integration of image processing libraries into Dassault Systèmes' products enhances functionality across various stages of product development. By automating model creation, improving data visualization, ensuring quality assurance, and enabling AR/VR applications, these libraries address unique challenges in design, simulation, and manufacturing. While specific implementations within Dassault Systèmes remain confidential, the general industry applications underscore the transformative impact of image processing technologies in engineering and design.
+
+% References
+
+% - Remondino, F., & El-Hakim, S. (2006). Image-based 3D modelling: A review. *The Photogrammetric Record*, 21(115), 269-291.
+% - Bathe, K. J. (2006). *Finite Element Procedures*. Prentice Hall.
+% - Szeliski, R. (2010). *Computer Vision: Algorithms and Applications*. Springer.
+% - Azuma, R. T. (1997). A survey of augmented reality. *Presence: Teleoperators & Virtual Environments*, 6(4), 355-385.
\ No newline at end of file
diff --git a/sections/Chapter-1-sections/Research-Questions.tex b/sections/Chapter-1-sections/Research-Questions.tex
new file mode 100644
index 0000000000000000000000000000000000000000..d30ef7a57b9c7b5f0e5fad0f922edb300de6b040
--- /dev/null
+++ b/sections/Chapter-1-sections/Research-Questions.tex
@@ -0,0 +1,47 @@
+\section{ Research Questions and Investigative Focus}
+
+In This section we examine the core questions that guided the research in this master thesis. Rather than adopting a traditional hypothesis-driven approach, the study focused on a systematic, empirical evaluation of image processing libraries. The investigation was centered on two main questions: 
+
+\begin{enumerate}
+    \item What is the performance of different libraries when executing a defined set of image processing tasks?
+    \item Which library delivers the most resource-efficient processing?
+\end{enumerate}
+
+While the nature of the research did not lend itself to a single, testable hypothesis, the work instead embraced an exploratory and comparative framework to assess each library's strengths and limitations.
+
+\subsection{ Defining the Research Questions}
+
+At the outset, the investigation was framed by two primary research questions. The first sought to understand the performance characteristics of various image processing libraries when applied to common tasks. In this context, “performance” encompassed both execution time and memory usage. Specifically, the study measured how efficiently each library could handle tasks such as image loading, format conversion (for example, converting a JPEG image to PNG), and pixel-level operations like iterating over and converting pixels to grayscale.
+
+The second research question aimed to identify which library exhibited the highest resource efficiency. Resource efficiency in this study was defined not only by the speed of processing but also by the amount of memory allocated during image manipulation operations. The investigation compared several leading libraries—including, but not limited to, OpenImageIO, SkiaSharp, Magick.NET, Emgu CV, and MagicScaler—to determine which provided the most favorable balance of performance and memory usage.
+
+It is important to note that, while a hypothesis was initially considered, the research context—centered on performance benchmarking and resource analysis—meant that a traditional hypothesis (i.e., a prediction that one library would outperform others under all conditions) was not easily applicable. Instead, the study was designed as an exploratory evaluation to chart the multifaceted performance landscape of image processing libraries.
+
+\subsection{ Methodology and Performance Metrics}
+
+To address these research questions, the investigation was conducted through a series of carefully designed experiments. Two core tests formed the backbone of the methodology: the image conversion test and the pixel iteration test.
+
+\subsubsection{ Image Conversion Test}
+
+The image conversion test was designed to measure the time required by each library to load an image, convert it to a different format, and save the converted image to disk. This test was representative of common real-world workflows where images are manipulated for different output requirements. The process involved several steps:
+
+\begin{itemize}
+    \item \textbf{Loading the Image:} Each library’s ability to read an image from memory was assessed. The test considered how quickly a library could load an image file into its internal data structures.
+    \item \textbf{Conversion Process:} Once the image was loaded, the library was tasked with converting the image from one format (for example, JPEG) to another (such as PNG). This conversion process tested the library’s efficiency in handling image encoding and decoding.
+    \item \textbf{Saving the Converted Image:} Finally, the time taken to save the converted image back to disk was recorded. This step provided a complete view of the end-to-end processing time.
+\end{itemize}
+
+By repeating this process across multiple libraries and averaging the results, the study was able to generate comparative performance metrics. For instance, preliminary findings showed that libraries such as SkiaSharp demonstrated remarkably fast conversion times, accompanied by minimal memory usage during the process.
+
+\subsubsection{ Pixel Iteration Test}
+
+The second experiment focused on the pixel iteration test. This test evaluated how long it took each library to perform a pixel-by-pixel operation on an image—a task common in many image processing applications such as filtering or applying color transformations (e.g., converting an image to grayscale). The key steps in this test included:
+
+\begin{itemize}
+    \item \textbf{Loading the Image:} As in the conversion test, the first step was to load an image into memory.
+    \item \textbf{Pixel Operation:} The library was then tasked with iterating over each pixel in the image and applying a simple transformation. In this case, the chosen operation was converting the pixel values to grayscale.
+    \item \textbf{Timing the Process:} The total time taken to complete the operation across all pixels was recorded. This metric was crucial for assessing the efficiency of the library’s pixel-level processing capabilities.
+    \item \textbf{Memory Allocation:} In addition to execution time, the test tracked the memory consumption during the pixel iteration process. This provided insight into how resource-intensive each library was when performing granular image manipulations.
+\end{itemize}
+
+By analyzing the results from both tests, the research provided a dual perspective on performance: one focused on throughput (how fast operations are completed) and the other on resource utilization (how much memory is consumed during processing).
\ No newline at end of file