2022
Nicolaou, Nicolas; Cadambe, Viveck; Prakash, N; Trigeorgi, Andria; Konwar, Kishori; Medard, Muriel; Lynch, Nancy
ARES: Adaptive, Reconfigurable, Erasure coded, Atomic Storage Journal Article
In: Transactions on Storage, 2022.
@article{ARESTOS22,
title = {ARES: Adaptive, Reconfigurable, Erasure coded, Atomic Storage},
author = {Nicolas Nicolaou and Viveck Cadambe and N Prakash and Andria Trigeorgi and Kishori Konwar and Muriel Medard and Nancy Lynch},
url = {https://projects.algolysis.com/collaborate/wp-content/uploads/2022/03/2020_ACM_TOS_ARES_final.pdf},
year = {2022},
date = {2022-06-30},
urldate = {2022-06-30},
journal = {Transactions on Storage},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Georgiou, Chryssis; Hadjistasi, Theophanis; Nicolaou, Nicolas; Schwarzmann, Alexander A.
Implementing three exchange read operations for distributed atomic storage Journal Article
In: Journal of Parallel and Distributed Computing, vol. 163, pp. 97-113, 2022, ISSN: 0743-7315.
@article{GEORGIOU202297,
title = {Implementing three exchange read operations for distributed atomic storage},
author = {Chryssis Georgiou and Theophanis Hadjistasi and Nicolas Nicolaou and Alexander A. Schwarzmann},
url = {https://www.sciencedirect.com/science/article/pii/S0743731522000314},
doi = {https://doi.org/10.1016/j.jpdc.2022.01.024},
issn = {0743-7315},
year = {2022},
date = {2022-01-01},
journal = {Journal of Parallel and Distributed Computing},
volume = {163},
pages = {97-113},
abstract = {Communication latency typically dominates the performance of message-passing systems, and consequently defines the efficiency of operations of algorithms implementing atomic read/write objects in asynchronous, crash-prone, message-passing systems. Here latency is measured in terms of the number of communication exchanges (or simply exchanges) involved in each operation. We present four algorithms, two for the single-writer/multiple-reader (SWMR) and two for the multi-writer/multiple-reader (MWMR) settings, that allow reads to take two or three exchanges, advancing the state-of-the-art in this area. Writes take the same number of exchanges as in prior works (i.e., two for SWMR and four for MWMR settings). In contrast with existing efficient implementations, ours come with no constraints on reader participation in both settings, and on the number of writers in the MWMR setting. Correctness of algorithms is rigorously argued. We conclude with an empirical study demonstrating the practicality of the algorithms, and identifying settings in which their read performance, is clearly superior compared to relevant algorithms.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Georgiou, Chryssis; Nicolaou, Nicolas; Trigeorgi, Andria
Fragmented ARES: Dynamic Storage for Large Objects Technical Report
2022.
@techreport{DBLP:journals/corr/abs-2201-13292,
title = {Fragmented ARES: Dynamic Storage for Large Objects},
author = {Chryssis Georgiou and Nicolas Nicolaou and Andria Trigeorgi},
url = {https://arxiv.org/abs/2201.13292},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {CoRR},
volume = {abs/2201.13292},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
2021
Krstic, Marko; Nicolaou, Nicolas; Stavrakis, Efstathios
How Detecting Soon-to-Fail Drives is Imperative in Industry 4.0? Conference
6th EAI International Conference on Management of Manufacturing Systems (MMS 21), 2021.
@conference{Krstic2021,
title = {How Detecting Soon-to-Fail Drives is Imperative in Industry 4.0?},
author = {Marko Krstic and Nicolas Nicolaou and Efstathios Stavrakis
},
url = {https://projects.algolysis.com/collaborate/wp-content/uploads/2022/03/D10-1_MMS2021.pdf},
year = {2021},
date = {2021-07-01},
urldate = {2021-07-01},
booktitle = {6th EAI International Conference on Management of Manufacturing Systems (MMS 21)},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Anta, Antonio Fernandez; Hadjistasi, Theophanis; Georgiou, Chryssis; Nicolaou, Nicolas; Stavrakis, Efstathios; Trigeorgi, Andria
Fragmented Objects: Boosting Concurrency of Shared Large Objects Conference
SIROCCO 2021: Structural Information and Communication Complexity, vol. 12810, SIROCCO Springer, 2021.
@conference{FGHNST21,
title = {Fragmented Objects: Boosting Concurrency of Shared Large Objects},
author = {Antonio Fernandez Anta and Theophanis Hadjistasi and Chryssis Georgiou and Nicolas Nicolaou and Efstathios Stavrakis and Andria Trigeorgi},
doi = {10.1007/978-3-030-79527-6_7},
year = {2021},
date = {2021-06-28},
urldate = {2021-06-28},
booktitle = {SIROCCO 2021: Structural Information and Communication Complexity},
volume = {12810},
pages = {106-126},
publisher = {Springer},
series = {SIROCCO},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Trigeorgi, Andria
Robust and Strongly Consistent Distributed Storage Systems Proceedings Article
In: The 15th International Conference on Research Challenges in Information Science =, 2021.
@inproceedings{Trigeorgi2021,
title = {Robust and Strongly Consistent Distributed Storage Systems},
author = {Andria Trigeorgi},
year = {2021},
date = {2021-05-11},
urldate = {2021-05-11},
booktitle = {The 15th International Conference on Research Challenges in Information Science =},
series = {RCIS},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gramoli, Vincent; Nicolaou, Nicolas; Schwarzmann, Alexander A.
Consistent Distributed Storage Book
Morgan & Claypool Publishers, 2021.
@book{DBLP:series/synthesis/2021Gramoli,
title = {Consistent Distributed Storage},
author = {Vincent Gramoli and Nicolas Nicolaou and Alexander A. Schwarzmann},
url = {https://doi.org/10.2200/S01069ED1V01Y202012DCT017},
doi = {10.2200/S01069ED1V01Y202012DCT017},
year = {2021},
date = {2021-01-01},
publisher = {Morgan & Claypool Publishers},
series = {Synthesis Lectures on Distributed Computing Theory},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
2020
Anta, Antonio Fernández; Hadjistasi, Theophanis; Nicolaou, Nicolas; Popa, Alexandru; Schwarzmann, Alexander A
Tractable low-delay atomic memory Journal Article
In: Distributed Computing, pp. 1–26, 2020, ISSN: 0178-2770.
@article{anta_tractable_2020,
title = {Tractable low-delay atomic memory},
author = {Antonio Fernández Anta and Theophanis Hadjistasi and Nicolas Nicolaou and Alexandru Popa and Alexander A Schwarzmann},
url = {https://projects.algolysis.com/collaborate/wp-content/uploads/2020/06/main.pdf},
doi = {10.1007/s00446-020-00379-y},
issn = {0178-2770},
year = {2020},
date = {2020-05-26},
journal = {Distributed Computing},
pages = {1--26},
abstract = {Communication cost is the most commonly used metric in assessing the efficiency of operations in distributed algorithms for message-passing environments. In doing so, the standing assumption is that the cost of local computation is negligible compared to the cost of communication. However, in many cases, operation implementations rely on complex computations that should not be ignored. Therefore, a more accurate assessment of operation efficiency should account for both computation and communication costs. This paper focuses on the efficiency of read and write operations in emulations of atomic read/write shared memory in the asynchronous, message-passing, crash-prone environment. The much celebrated work by Dutta et al. presented an implementation in this setting where all read and write operations could complete in just a single communication round-trip. Such operations where characterized for the first time as fast. At its heart, the work by Dutta et al. used a predicate to achieve that performance. We show that the predicate is computationally intractable by defining an equivalent problem and reducing it to Maximum Biclique, a known NP-hard problem. We derive a new, computationally tractable predicate, and an algorithm to compute it in linear time. The proposed predicate is used to develop three algorithms: ccFast, ccHybrid, and OhFast. ccFast is similar to the algorithm of Dutta et al. with the main difference being the use of the new predicate for reduced computational complexity. All operations in ccFast are fast, and particular constraints apply in the number of participants. ccHybrid and OhFast, allow some operations to be “slow”, enabling unbounded participants in the service. ccHybrid is a “multi-speed” version of ccFast, where the reader determines when it is not safe to complete a read operation in a single communication round-trip. OhFast, expedites algorithm OhSam of Hadjistasi et al. by placing the developed predicate at the servers instead of clients and avoiding excessive server communication when possible. An experimental evaluation using NS3 compares algorithms ccHybrid and OhFast to the classic algorithm ABD of Attiya et al., the algorithm Sf of Georgiou et al. (the first “semifast” algorithm, allowing both fast and slow operations), and algorithm OhSam. In summary, this work gives the new meaning to the term fast by assessing both the communication and the computation efficiency of each operation.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2019
Nicolaou, Nicolas; Cadambe, Viveck; Prakash, N; Konwar, Kishori; Medard, Muriel; Lynch, Nancy
ARES: Adaptive, Reconfigurable, Erasure coded, atomic Storage Proceedings Article
In: 39th IEEE International Conference on Distributed Computing Systems (ICDCS 2019), pp. 2195-2205, IEEE, 2019, ISBN: 978-1-7281-2519-0.
@inproceedings{ICDCS2019,
title = {ARES: Adaptive, Reconfigurable, Erasure coded, atomic Storage},
author = {Nicolas Nicolaou and Viveck Cadambe and N Prakash and Kishori Konwar and Muriel Medard and Nancy Lynch},
url = {https://projects.algolysis.com/collaborate/wp-content/uploads/2020/06/ICDCS_2019_paper_403.pdf},
isbn = {978-1-7281-2519-0},
year = {2019},
date = {2019-07-07},
urldate = {2019-07-07},
booktitle = {39th IEEE International Conference on Distributed Computing Systems (ICDCS 2019)},
pages = {2195-2205},
publisher = {IEEE},
abstract = {Emulating a shared atomic, read/write storage system is a fundamental problem in distributed computing. Replicating atomic objects among a set of data hosts was the norm for traditional implementations (e.g., [6]) in order to guarantee the availability and accessibility of the data despite host failures.
As replication is highly storage demanding, recent approaches suggested the use of erasure-codes to offer the same fault tolerance while optimizing storage usage at the hosts. Initial works focused on a fix set of data hosts. To guarantee longevity and scalability, a storage service should be able to dynamically mask hosts failures by allowing new hosts to join, and failed host to be removed without service interruptions. This work presents the first erasure-code based atomic algorithm, called ARES, which allows the set of hosts to be modified in the course of an execution. ARES is composed of three main components: (i) a reconfiguration protocol, (ii) a read/write protocol, and (iii) a set of data access primitives. The design of ARES is modular and is such to accommodate the usage of various erasure-code parameters on a per-configuration basis. We provide bounds on the latency of read/write operations, and analyze the storage and communication costs of the ARES algorithm.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
As replication is highly storage demanding, recent approaches suggested the use of erasure-codes to offer the same fault tolerance while optimizing storage usage at the hosts. Initial works focused on a fix set of data hosts. To guarantee longevity and scalability, a storage service should be able to dynamically mask hosts failures by allowing new hosts to join, and failed host to be removed without service interruptions. This work presents the first erasure-code based atomic algorithm, called ARES, which allows the set of hosts to be modified in the course of an execution. ARES is composed of three main components: (i) a reconfiguration protocol, (ii) a read/write protocol, and (iii) a set of data access primitives. The design of ARES is modular and is such to accommodate the usage of various erasure-code parameters on a per-configuration basis. We provide bounds on the latency of read/write operations, and analyze the storage and communication costs of the ARES algorithm.