@inproceedings { , title = {Algorithm Configuration: Learning Policies for the Quick Termination of Poor Performers}, abstract = {© 2019, Springer Nature Switzerland AG. One way to speed up the algorithm configuration task is to use short runs instead of long runs as much as possible, but without discarding the configurations that eventually do well on the long runs. We consider the problem of selecting the top performing configurations of Conditional Markov Chain Search (CMCS), a general algorithm schema that includes, for example, VNS. We investigate how the structure of performance on short tests links with those on long tests, showing that significant differences arise between test domains. We propose a “performance envelope” method to exploit the links; that learns when runs should be terminated, but that automatically adapts to the domain.}, conference = {LION 12 Learning and Intelligent Optimization Conference}, doi = {10.1007/978-3-030-05348-2\_20}, eissn = {1611-3349}, isbn = {9783030053475}, journal = {Lecture Notes in Computer Science; Learning and Intelligent Optimization}, pages = {220-224}, publicationstatus = {Published}, publisher = {Springer Verlag}, url = {https://nottingham-repository.worktribe.com/output/1659549}, volume = {11353 LNCS}, year = {2019}, author = {Karapetyan, Daniel and Parkes, Andrew J. and Sttzle, Thomas} }