%Aigaion2 BibTeX export from Knowledge Engineering Publications
%Friday 17 December 2021 11:56:17 PM

@INPROCEEDINGS{rapp20boomer,
     author = {Rapp, Michael and Loza Menc{\'{\i}}a, Eneldo and F{\"{u}}rnkranz, Johannes and Nguyen, Vu-Linh and H{\"{u}}llermeier, Eyke},
     editor = {Hutter, Frank and Kersting, Kristian and Lijffijt, Jefrey and Valera, Isabel},
   keywords = {Gradient boosting, multilabel classification, Rule Learning},
      title = {Learning Gradient Boosted Multi-label Classification Rules},
  booktitle = {Machine Learning and Knowledge Discovery in Databases (ECML-PKDD)},
     series = {Lecture Notes in Computer Science},
     volume = {12459},
       year = {2020},
      pages = {124--140},
  publisher = {Springer},
        url = {https://link.springer.com/chapter/10.1007/978-3-030-67664-3_8},
        doi = {https://doi.org/10.1007/978-3-030-67664-3_8},
   abstract = {In multi-label classification, where the evaluation of predic-tions  is  less  straightforward  than  in  single-label  classification,  variousmeaningful, though different, loss functions have been proposed. Ideally,the learning algorithm should be customizable towards a specific choiceof the performance measure. Modern implementations of boosting, mostprominently gradient boosted decision trees, appear to be appealing fromthis point of view. However, they are mostly limited to single-label clas-sification, and hence not amenable to multi-label losses unless these arelabel-wise decomposable. In this work, we develop a generalization of thegradient boosting framework to multi-output problems and propose analgorithm for learning multi-label classification rules that is able to min-imize decomposable as well as non-decomposable loss functions. Usingthe well-known Hamming loss and subset 0/1 loss as representatives, weanalyze the abilities and limitations of our approach on synthetic dataand evaluate its predictive performance on multi-label benchmarks.}
}