( = Paper PDF,
= Presentation slides,
= Presentation video)
Lizhi Liao; Simon Eismann; Heng Li; Cor-Paul Bezemer; Diego Costa; André van Hoorn; Weiyi Shang
Early Detection of Performance Regressions by Bridging Local Performance Data and Architectural Models Inproceedings
International Conference on Software Engineering (ICSE), 2025.
BibTeX | Tags: Performance, Performance analysis, Performance engineering, Performance evaluation, Performance regressions, Performance testing
@inproceedings{Liao_ICSE2025,
title = {Early Detection of Performance Regressions by Bridging Local Performance Data and Architectural Models},
author = {Lizhi Liao and Simon Eismann and Heng Li and Cor-Paul Bezemer and Diego Costa and André van Hoorn and Weiyi Shang},
year = {2025},
date = {2025-08-15},
urldate = {2025-08-15},
booktitle = {International Conference on Software Engineering (ICSE)},
keywords = {Performance, Performance analysis, Performance engineering, Performance evaluation, Performance regressions, Performance testing},
pubstate = {published},
tppubtype = {inproceedings}
}
Mikael Sabuhi
Strategies For Building Performant Containerized Applications PhD Thesis
2023.
Abstract | BibTeX | Tags: Docker, Docker Hub, Microservices, Performance, Performance analysis, Performance engineering
@phdthesis{phd_mikael,
title = {Strategies For Building Performant Containerized Applications},
author = {Mikael Sabuhi},
year = {2023},
date = {2023-09-25},
urldate = {2023-09-25},
abstract = {The evolution of cloud computing in the last decade has offered unprecedented access to sizable, configurable computing resources with minimal management effort. Containerization of applications, particularly through Docker, has been pivotal in this progression. As modern software increasingly relies on various cloud services, designing performant cloud applications has emerged as a critical concern. Key attributes of such applications include reliability, scalability, efficiency, fault tolerance, and responsiveness. This thesis seeks to address the challenges intrinsic to creating performant cloud applications by developing strategies aimed at achieving these characteristics through: 1) the application of autoscaling techniques to enhance scalability, efficiency, and responsiveness; 2) the introduction of a methodology for assessing the impact of Docker image upgrades on containerized applications to prevent performance degradation; and 3) the utilization of microservices architecture to develop scalable, reliable, and fault-tolerant cloud applications. In our initial research, we propose a pioneering approach to optimize the performance and resource usage of containerized cloud applications using adaptive controllers grounded in control theory. Our methodology harnesses the capacity of neural networks to capture the intrinsic non-linearity of these applications, and adapts the parameters of a proportional-integral-derivative (PID) controller to accommodate environmental changes. The outcomes demonstrate significant enhancements in resource utilization and a reduction in service level agreement violations, surpassing the performance of other examined autoscaling techniques. In the subsequent study, we present a method to evaluate the performance implications of Docker image upgrades on cloud software systems and their correlation with application dependencies. Our case study of 90 official WordPress images underscores the need for comprehensive performance testing before upgrades, the importance of maintaining a performance repository for reporting test results, and the potential benefits of extending semantic versioning to encompass performance modifications. This investigation encourages an enlightened approach to Docker image management, promoting enhanced cloud application performance. Lastly, we introduce Micro-FL, a fault-tolerant federated learning framework crafted to enhance the reliability and scalability of cloud-based machine learning platforms. By incorporating a microservices-based architecture within Docker containers, Micro-FL overcomes challenges typically associated with federated learning, such as resource constraints, scalability, and system faults. Performance assessments demonstrate Micro-FL’s capability to efficiently manage faults and streamline federated learning processes, offering a more robust and scalable solution for federated learning. The research work presented in this thesis provides deep insights, actionable recommendations, and effective and thoroughly evaluated approaches for building performant cloud applications.
},
keywords = {Docker, Docker Hub, Microservices, Performance, Performance analysis, Performance engineering},
pubstate = {published},
tppubtype = {phdthesis}
}
Simon Eismann; Cor-Paul Bezemer; Weiyi Shang; Dušan Okanović; André van Hoorn
Microservices: A Performance Tester's Dream or Nightmare? Inproceedings
ACM/SPEC International Conference on Performance Engineering (ICPE), pp. 1–12, 2020.
Abstract | BibTeX | Tags: DevOps, Microservices, Performance, Regression testing
@inproceedings{Simon20,
title = {Microservices: A Performance Tester's Dream or Nightmare?},
author = {Simon Eismann and Cor-Paul Bezemer and Weiyi Shang and Dušan Okanović and André van Hoorn },
year = {2020},
date = {2020-01-24},
urldate = {2020-01-24},
booktitle = {ACM/SPEC International Conference on Performance Engineering (ICPE)},
pages = {1--12},
abstract = {In recent years, there has been a shift in software development towards microservice-based architectures, which consist of small services that focus on one particular functionality. Many companies are migrating their applications to such architectures to reap the benefits of microservices, such as increased flexibility, scalability and a smaller granularity of the offered functionality by a service.
On the one hand, the benefits of microservices for functional testing are often praised, as the focus on one functionality and their smaller granularity allow for more targeted and more convenient testing. On the other hand, using microservices has their consequences (both positive and negative) on other types of testing, such as performance testing. Performance testing is traditionally done by establishing the baseline performance of a software version, which is then used to compare the performance testing results of later software versions. However, as we show in this paper, establishing such a baseline performance is challenging in microservice applications.
In this paper, we discuss the benefits and challenges of microservices from a performance tester’s point of view. Through a series of experiments on the TeaStore application, we demonstrate how microservices affect the performance testing process, and we demonstrate that it is not straightforward to achieve reliable performance testing results for a microservice application.},
keywords = {DevOps, Microservices, Performance, Regression testing},
pubstate = {published},
tppubtype = {inproceedings}
}
On the one hand, the benefits of microservices for functional testing are often praised, as the focus on one functionality and their smaller granularity allow for more targeted and more convenient testing. On the other hand, using microservices has their consequences (both positive and negative) on other types of testing, such as performance testing. Performance testing is traditionally done by establishing the baseline performance of a software version, which is then used to compare the performance testing results of later software versions. However, as we show in this paper, establishing such a baseline performance is challenging in microservice applications.
In this paper, we discuss the benefits and challenges of microservices from a performance tester’s point of view. Through a series of experiments on the TeaStore application, we demonstrate how microservices affect the performance testing process, and we demonstrate that it is not straightforward to achieve reliable performance testing results for a microservice application.
Cor-Paul Bezemer; Andy Zaidman
Server Overload Detection and Prediction Using Pattern Classification Inproceedings
International Conference on Autonomous Computing (ICAC), pp. 163-164, 2011.
BibTeX | Tags: Performance
@inproceedings{Bezemer2011,
title = {Server Overload Detection and Prediction Using Pattern Classification},
author = {Cor-Paul Bezemer and Andy Zaidman},
year = {2011},
date = {2011-06-14},
urldate = {2011-06-14},
booktitle = {International Conference on Autonomous Computing (ICAC)},
pages = {163-164},
keywords = {Performance},
pubstate = {published},
tppubtype = {inproceedings}
}