Roy, Devjeet; Fakhoury, Sarah; Lee, John; Arnaoudova, Venera A model to detect incremental readability improvements in incremental changes Inproceedings In: Proceedings of the International Conference on Program Comprehension (ICPC), pp. 25–36, 2020. Links | BibTeX | Tags: developers' perception, empirical study, machine learning, source code readability Ma, Yuzhan; Fakhoury, Sarah; Christensen, Mike; Arnaoudova, Venera; Zogaan, Waleed; Mirakhorli, Mehdi Automatic Classification of Software Artifacts in Open-Source Applications Inproceedings In: Proceedings of the Working Conference on Mining Software Repositories (MSR), pp. 414–425, 2018. Links | BibTeX | Tags: machine learning, open-source software, software artifacts Fakhoury, Sarah; Arnaoudova, Venera; Noiseux, Cedric; Khomh, Foutse; Antoniol, Giuliano Keep it simple: is deep learning good for linguistic smell detection? Inproceedings In: Proceedings of the International Conference on Software Analysis, Evolution, and Reengineering (SANER)—REproducibility Studies and NEgative Results (RENE) Track, 2018. Links | BibTeX | Tags: deep learning, empirical study, linguistic antipatterns, machine learning, source code identifiers, source code readability Abebe, Surafel Lemma; Arnaoudova, Venera; Tonella, Paolo; Antoniol, Giuliano; Guéhéneuc, Yann-Gaël Can Lexicon Bad Smells improve fault prediction? Inproceedings In: Proceedings of the Working Conference on Reverse Engineering (WCRE), pp. 235–244, 2012. Abstract | BibTeX | Tags: fault prediction, lexicon bad smells, machine learning, source code identifiers2020
@inproceedings{Roy:icpc20:ReadabilityModel,
title = {A model to detect incremental readability improvements in incremental changes},
author = {Devjeet Roy and Sarah Fakhoury and John Lee and Venera Arnaoudova},
url = {http://veneraarnaoudova.com/wp-content/uploads/2020/07/2020-ICPC-PREPRINT-A-Model-to-Detect-Readability-Improvements-in-Incremental-Changes.pdf},
year = {2020},
date = {2020-05-24},
booktitle = {Proceedings of the International Conference on Program Comprehension (ICPC)},
pages = {25--36},
keywords = {developers' perception, empirical study, machine learning, source code readability},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
@inproceedings{Ma:MSR18:ArtifactsClassification,
title = {Automatic Classification of Software Artifacts in Open-Source Applications},
author = {Yuzhan Ma and Sarah Fakhoury and Mike Christensen and Venera Arnaoudova and Waleed Zogaan and Mehdi Mirakhorli},
url = {http://veneraarnaoudova.ca/wp-content/uploads/2018/03/2018-MSR-preprint-automatic-classification-software-artifacts.pdf},
year = {2018},
date = {2018-03-02},
booktitle = {Proceedings of the Working Conference on Mining Software Repositories (MSR)},
pages = {414--425},
keywords = {machine learning, open-source software, software artifacts},
pubstate = {published},
tppubtype = {inproceedings}
}
@inproceedings{Fakhoury:saner:CNN,
title = {Keep it simple: is deep learning good for linguistic smell detection?},
author = {Sarah Fakhoury and Venera Arnaoudova and Cedric Noiseux and Foutse Khomh and Giuliano Antoniol},
url = {http://veneraarnaoudova.ca/wp-content/uploads/2018/02/2018-SANER_RENE-preprint-simple-deep-learning.pdf},
year = {2018},
date = {2018-02-22},
booktitle = {Proceedings of the International Conference on Software Analysis, Evolution, and Reengineering (SANER)—REproducibility Studies and NEgative Results (RENE) Track},
keywords = {deep learning, empirical study, linguistic antipatterns, machine learning, source code identifiers, source code readability},
pubstate = {published},
tppubtype = {inproceedings}
}
2012
@inproceedings{2012-WCRE-Abebe-LBSfaults,
title = {Can Lexicon Bad Smells improve fault prediction?},
author = {Surafel Lemma Abebe and Venera Arnaoudova and Paolo Tonella and Giuliano Antoniol and Yann-Gaël Guéhéneuc},
year = {2012},
date = {2012-01-01},
booktitle = {Proceedings of the Working Conference on Reverse Engineering (WCRE)},
pages = {235--244},
abstract = {In software development, early identification of fault-prone classes can save a considerable amount of resources. In the literature, source code structural metrics have been widely investigated as one of the factors that can be used to identify faulty classes. Structural metrics measure code complexity, one aspect of the source code quality. Complexity might affect program understanding and hence increase the likelihood of inserting errors in a class. Besides the structural metrics, we believe that the quality of the identifiers used in the code may also affect program understanding and thus increase the likelihood of error insertion.
In this study, we measure the quality of identifiers using the number of Lexicon Bad Smells (LBS) they contain. We investigate whether using LBS in addition to structural metrics improves fault prediction. To conduct the investigation, we assess the prediction capability of a model while using i) only structural metrics, and ii) structural metrics and LBS. The results on three open source systems, ArgoUML, Rhino, and Eclipse, indicate that there is an improvement in the majority of the cases.},
keywords = {fault prediction, lexicon bad smells, machine learning, source code identifiers},
pubstate = {published},
tppubtype = {inproceedings}
}
In this study, we measure the quality of identifiers using the number of Lexicon Bad Smells (LBS) they contain. We investigate whether using LBS in addition to structural metrics improves fault prediction. To conduct the investigation, we assess the prediction capability of a model while using i) only structural metrics, and ii) structural metrics and LBS. The results on three open source systems, ArgoUML, Rhino, and Eclipse, indicate that there is an improvement in the majority of the cases.