diff --git a/docs/report/bib-rapport.bib b/docs/report/bib-rapport.bib
index 8bde4a1..cfea6a4 100644
--- a/docs/report/bib-rapport.bib
+++ b/docs/report/bib-rapport.bib
@@ -1,333 +1,381 @@
@misc{Aylien16,
Author = {Aylien},
Date-Added = {2018-02-17 20:56:11 +0000},
Date-Modified = {2018-02-17 21:00:33 +0000},
Howpublished = {\url{http://blog.aylien.com/source-code-classification-using-deep-learning/}},
Keywords = {data science, research},
Month = {August},
Title = {Source Code Classification Using Deep Learning [blog post]},
Year = {2016}}
@misc{universal-ctags,
Author = {Universal Ctags Team},
Date-Added = {2018-02-17 20:53:07 +0000},
Date-Modified = {2018-02-17 20:54:44 +0000},
Howpublished = {\url{http://ctags.io/}},
Title = {Universal Ctags},
Year = {2001--2018}}
@misc{sloccount,
Author = {David A. Wheeler},
Date-Added = {2018-02-17 20:47:15 +0000},
Date-Modified = {2018-02-17 20:51:51 +0000},
Howpublished = {\url{https://www.dwheeler.com/sloccount/}},
Title = {SLOCCount},
Year = {2004--2018}}
@misc{cloc,
Author = {Al Danial},
Date-Added = {2018-02-17 20:46:02 +0000},
Date-Modified = {2018-02-17 20:46:38 +0000},
Howpublished = {\url{https://github.com/AlDanial/cloc}},
Title = {cloc},
Year = {2006--2018}}
@misc{guesslang,
Author = {Y. Somda},
Date-Added = {2018-02-17 20:27:54 +0000},
Date-Modified = {2018-02-17 20:43:42 +0000},
Howpublished = {\url{http://guesslang.readthedocs.io/}},
Title = {Guesslang},
Year = {2017--2018}}
@misc{linguist,
Author = {Github},
Date-Added = {2018-02-17 20:21:27 +0000},
Date-Modified = {2018-02-17 20:26:46 +0000},
Howpublished = {\url{https://github.com/github/linguist}},
Title = {Linguist},
Year = {2011--2018}}
@misc{ohcount,
Author = {Black Duck Software},
Date-Added = {2018-02-17 20:11:31 +0000},
Date-Modified = {2018-02-17 21:03:52 +0000},
Title = {Ohcount},
Howpublished = {\url{https://github.com/blackducksoftware/ohcount}},
Year = {2008--2018}}
@inproceedings{vanDam16,
Author = {J. K. v. Dam and V. Zaytsev},
Booktitle = {2016 IEEE 23rd International Conference on Software Analysis, Evolution, and Reengineering (SANER)},
Doi = {10.1109/SANER.2016.92},
Keywords = {meta data;natural language processing;pattern classification;program diagnostics;software maintenance;text analysis;embedded code fragments;file extensions;grammar-based text analysis;keyword search;legacy code analysis;multinominal naïve Bayes;n-grams;natural language classifiers;natural language processing field;normalised compression distance;skip-grams;software artefact metadata;software language identification;statistical language models;universal IDE support;Cascading style sheets;HTML;Java;Natural languages;Software;Training;Training data;language identification;natural language processing;software language engineering},
Month = {March},
Pages = {624-628},
Title = {Software Language Identification with Natural Language Classifiers},
Volume = {1},
Year = {2016},
Bdsk-Url-1 = {http://dx.doi.org/10.1109/SANER.2016.92}}
@article{Klein11,
Archiveprefix = {arXiv},
Author = {David Klein and Kyle Murray and Simon Weber},
Bibsource = {dblp computer science bibliography, http://dblp.org},
Biburl = {http://dblp.org/rec/bib/journals/corr/abs-1106-4064},
Eprint = {1106.4064},
Journal = {CoRR},
Timestamp = {Wed, 07 Jun 2017 14:41:07 +0200},
Title = {Algorithmic Programming Language Identification},
Url = {http://arxiv.org/abs/1106.4064},
Volume = {abs/1106.4064},
Year = {2011},
Bdsk-Url-1 = {http://arxiv.org/abs/1106.4064}}
@inproceedings{Gilda17,
Author = {S. Gilda},
Booktitle = {2017 14th International Joint Conference on Computer Science and Software Engineering (JCSSE)},
Doi = {10.1109/JCSSE.2017.8025917},
Keywords = {feature extraction;learning (artificial intelligence);neural nets;pattern classification;programming languages;software engineering;source code (software);artificial neural network;convolutional neural network;file extension;intelligent feature extraction;multilayer neural network;neural networks;programming languages;software development industry;source code classification;supervised learning;word embedding layers;Feature extraction;HTML;Syntactics;Training;Artificial neural network;Feature extraction;Multi-layer neural network;Supervised learning},
Month = {July},
Pages = {1-6},
Title = {Source code classification using Neural Networks},
Year = {2017},
Bdsk-Url-1 = {http://dx.doi.org/10.1109/JCSSE.2017.8025917}}
@article{Zevin17,
Archiveprefix = {arXiv},
Author = {Shaul Zevin and Catherine Holzem},
Bibsource = {dblp computer science bibliography, http://dblp.org},
Biburl = {http://dblp.org/rec/bib/journals/corr/ZevinH17},
Eprint = {1703.07638},
Journal = {CoRR},
Timestamp = {Wed, 07 Jun 2017 14:41:28 +0200},
Title = {Machine Learning Based Source Code Classification Using Syntax Oriented Features},
Url = {http://arxiv.org/abs/1703.07638},
Volume = {abs/1703.07638},
Year = {2017},
Bdsk-Url-1 = {http://arxiv.org/abs/1703.07638}}
@inproceedings{Ugurel02,
Acmid = {775141},
Address = {New York, NY, USA},
Author = {Ugurel, Secil and Krovetz, Robert and Giles, C. Lee},
Booktitle = {Proceedings of the Eighth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},
Doi = {10.1145/775047.775141},
Isbn = {1-58113-567-X},
Location = {Edmonton, Alberta, Canada},
Numpages = {7},
Pages = {632--638},
Publisher = {ACM},
Series = {KDD '02},
Title = {What's the Code?: Automatic Classification of Source Code Archives},
Url = {http://doi.acm.org/10.1145/775047.775141},
Year = {2002},
Bdsk-Url-1 = {http://doi.acm.org/10.1145/775047.775141},
Bdsk-Url-2 = {http://dx.doi.org/10.1145/775047.775141}}
@inproceedings{Wang15,
author = {Peng Wang and
Jiaming Xu and
Bo Xu and
Cheng{-}Lin Liu and
Heng Zhang and
Fangyuan Wang and
Hongwei Hao},
title = {Semantic Clustering and Convolutional Neural Network for Short Text
Categorization},
booktitle = {Proceedings of the 53rd Annual Meeting of the Association for Computational
Linguistics and the 7th International Joint Conference on Natural
Language Processing of the Asian Federation of Natural Language Processing,
{ACL} 2015, July 26-31, 2015, Beijing, China, Volume 2: Short Papers},
pages = {352--357},
year = {2015},
url = {http://aclweb.org/anthology/P/P15/P15-2058.pdf},
timestamp = {Mon, 03 Aug 2015 08:13:34 +0200},
biburl = {http://dblp.org/rec/bib/conf/acl/WangXXLZWH15},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@inproceedings{Khasnabish14,
author = {Jyotiska Nath Khasnabish and
Mitali Sodhi and
Jayati Deshmukh and
G. Srinivasaraghavan},
title = {Detecting Programming Language from Source Code Using Bayesian Learning
Techniques},
booktitle = {Machine Learning and Data Mining in Pattern Recognition - 10th International
Conference, {MLDM} 2014, St. Petersburg, Russia, July 21-24, 2014.
Proceedings},
pages = {513--522},
year = {2014},
url = {https://doi.org/10.1007/978-3-319-08979-9_39},
doi = {10.1007/978-3-319-08979-9_39},
timestamp = {Wed, 17 May 2017 14:25:11 +0200},
biburl = {http://dblp.org/rec/bib/conf/mldm/KhasnabishSDS14},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@misc{Heres16,
Author = {Daniël Heres},
Howpublished = {\url{http://blog.aylien.com/source-code-classification-using-deep-learning/}},
Month = {July},
Title = {Detecting the Programming Language of Source Code Snippets using Machine Learning and Neural Networks [blog post]},
Year = {2016}}
@Inbook{Aggarwal12,
author={Aggarwal, Charu C.
and Zhai, ChengXiang},
editor={Aggarwal, Charu C.
and Zhai, ChengXiang},
title={A Survey of Text Classification Algorithms},
bookTitle={Mining Text Data},
year={2012},
publisher={Springer US},
address={Boston, MA},
pages={163--222},
abstract={The problem of classification has been widely studied in the data mining, machine learning, database, and information retrieval communities with applications in a number of diverse domains, such as target marketing, medical diagnosis, news group filtering, and document organization. In this paper we will provide a survey of a wide variety of text classification algorithms.},
isbn={978-1-4614-3223-4},
doi={10.1007/978-1-4614-3223-4_6},
url={https://doi.org/10.1007/978-1-4614-3223-4_6}
}
@article{Chen09,
title = {Feature selection for text classification with Naïve Bayes},
journal = {Expert Systems with Applications},
volume = {36},
number = {3, Part 1},
pages = {5432 - 5435},
year = {2009},
issn = {0957-4174},
doi = {https://doi.org/10.1016/j.eswa.2008.06.054},
url = {http://www.sciencedirect.com/science/article/pii/S0957417408003564},
author = {Jingnian Chen and Houkuan Huang and Shengfeng Tian and Youli Qu},
keywords = {Text classification, Feature selection, Text preprocessing, Naïve Bayes}
}
@misc{MLatB16,
Author = {Machine Learning at Berkeley},
Howpublished = {\url{https://ml.berkeley.edu/blog/2016/12/03/github/}},
Keywords = {data science, research},
Month = {December},
Title = {Github Programming Language Classification [blog post]},
Year = {2016}
}
@article{Cavnar94,
title={N-gram-based text categorization},
author={Cavnar, William B and Trenkle, John M and others},
journal={Ann arbor mi},
volume={48113},
number={2},
pages={161--175},
year={1994},
publisher={Citeseer}
}
@article{Kim15,
author = {Yoon Kim and
Yacine Jernite and
David Sontag and
Alexander M. Rush},
title = {Character-Aware Neural Language Models},
journal = {CoRR},
volume = {abs/1508.06615},
year = {2015},
url = {http://arxiv.org/abs/1508.06615},
archivePrefix = {arXiv},
eprint = {1508.06615},
timestamp = {Wed, 07 Jun 2017 14:41:17 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/KimJSR15},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{Kim14,
author = {Yoon Kim},
title = {Convolutional Neural Networks for Sentence Classification},
journal = {CoRR},
volume = {abs/1408.5882},
year = {2014},
url = {http://arxiv.org/abs/1408.5882},
archivePrefix = {arXiv},
eprint = {1408.5882},
timestamp = {Wed, 07 Jun 2017 14:40:07 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/Kim14f},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{kenlm,
author = {Kenneth Heafield},
title = {{KenLM:} Faster and Smaller Language Model Queries},
year = {2011},
month = {July},
booktitle = {Proceedings of the {EMNLP} 2011 Sixth Workshop on Statistical Machine Translation},
address = {Edinburgh, Scotland, United Kingdom},
pages = {187--197},
url = {https://kheafield.com/papers/avenue/kenlm.pdf},
}
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
@misc{keras,
title={Keras},
author={Chollet, Fran\c{c}ois and others},
year={2015},
howpublished={\url{https://keras.io}},
}
@misc{tensorflow2015-whitepaper,
title={ {TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems},
howpublished={\url{https://www.tensorflow.org/}},
author={
Mart\'{\i}n~Abadi and
Ashish~Agarwal and
Paul~Barham and
Eugene~Brevdo and
Zhifeng~Chen and
Craig~Citro and
Greg~S.~Corrado and
Andy~Davis and
Jeffrey~Dean and
Matthieu~Devin and
Sanjay~Ghemawat and
Ian~Goodfellow and
Andrew~Harp and
Geoffrey~Irving and
Michael~Isard and
Yangqing Jia and
Rafal~Jozefowicz and
Lukasz~Kaiser and
Manjunath~Kudlur and
Josh~Levenberg and
Dandelion~Man\'{e} and
Rajat~Monga and
Sherry~Moore and
Derek~Murray and
Chris~Olah and
Mike~Schuster and
Jonathon~Shlens and
Benoit~Steiner and
Ilya~Sutskever and
Kunal~Talwar and
Paul~Tucker and
Vincent~Vanhoucke and
Vijay~Vasudevan and
Fernanda~Vi\'{e}gas and
Oriol~Vinyals and
Pete~Warden and
Martin~Wattenberg and
Martin~Wicke and
Yuan~Yu and
Xiaoqiang~Zheng},
year={2015},
-}
\ No newline at end of file
+}
+
+
+@article{Gepperth16,
+ Abstract = {We present a biologically inspired architecture for incremental learning that remains resource-efficient even in the face of very high data dimensionalities (>1000) that are typically associated with perceptual problems. In particular, we investigate how a new perceptual (object) class can be added to a trained architecture without retraining, while avoiding the well-known catastrophic forgetting effects typically associated with such scenarios. At the heart of the presented architecture lies a generative description of the perceptual space by a self-organized approach which at the same time approximates the neighborhood relations in this space on a two-dimensional plane. This approximation, which closely imitates the topographic organization of the visual cortex, allows an efficient local update rule for incremental learning even in the face of very high dimensionalities, which we demonstrate by tests on the well-known MNIST benchmark. We complement the model by adding a biologically plausible short-term memory system, allowing it to retain excellent classification accuracy even under incremental learning in progress. The short-term memory is additionally used to reinforce new data statistics by replaying previously stored samples during dedicated ``sleep'' phases.},
+ Author = {Gepperth, Alexander and Karaoguz, Cem},
+ Day = {01},
+ Doi = {10.1007/s12559-016-9389-5},
+ Issn = {1866-9964},
+ Journal = {Cognitive Computation},
+ Month = {Oct},
+ Number = {5},
+ Pages = {924--934},
+ Title = {A Bio-Inspired Incremental Learning Architecture for Applied Perceptual Problems},
+ Url = {https://doi.org/10.1007/s12559-016-9389-5},
+ Volume = {8},
+ Year = {2016},
+ Bdsk-Url-1 = {https://doi.org/10.1007/s12559-016-9389-5}}
+
+@article{RebuffiKL16,
+ author = {Sylvestre{-}Alvise Rebuffi and
+ Alexander Kolesnikov and
+ Christoph H. Lampert},
+ title = {iCaRL: Incremental Classifier and Representation Learning},
+ journal = {CoRR},
+ volume = {abs/1611.07725},
+ year = {2016},
+ url = {http://arxiv.org/abs/1611.07725},
+ archivePrefix = {arXiv},
+ eprint = {1611.07725},
+ timestamp = {Wed, 07 Jun 2017 14:42:11 +0200},
+ biburl = {https://dblp.org/rec/bib/journals/corr/RebuffiKL16},
+ bibsource = {dblp computer science bibliography, https://dblp.org}
+}
+
+@article{Kemker17,
+ author = {Ronald Kemker and
+ Christopher Kanan},
+ title = {FearNet: Brain-Inspired Model for Incremental Learning},
+ journal = {CoRR},
+ volume = {abs/1711.10563},
+ year = {2017},
+ url = {http://arxiv.org/abs/1711.10563},
+ archivePrefix = {arXiv},
+ eprint = {1711.10563},
+ timestamp = {Mon, 04 Dec 2017 18:34:59 +0100},
+ biburl = {https://dblp.org/rec/bib/journals/corr/abs-1711-10563},
+ bibsource = {dblp computer science bibliography, https://dblp.org}
+}
diff --git a/docs/report/report-en.pdf b/docs/report/report-en.pdf
index 6082769..856dd40 100644
Binary files a/docs/report/report-en.pdf and b/docs/report/report-en.pdf differ
diff --git a/docs/report/report-en.tex b/docs/report/report-en.tex
index 5849bb9..6024d65 100644
--- a/docs/report/report-en.tex
+++ b/docs/report/report-en.tex
@@ -1,712 +1,757 @@
\documentclass[a4paper,12pt]{article}
\usepackage[a4paper,left=3cm,right=3cm,top=3cm,bottom=3cm]{geometry}
\usepackage[english]{babel}
\usepackage[parfill]{parskip}
\usepackage{graphicx}
\usepackage{xeCJK}
\setCJKmainfont{Songti SC Light}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{xunicode}
\usepackage[utf8]{inputenc}
\usepackage[charter]{mathdesign}
\usepackage{url}
\usepackage{hyperref}
\usepackage{multirow}
\usepackage[toc,page]{appendix}
\usepackage{tabularx}
\usepackage{longtable}
\usepackage{listings}
\lstset{basicstyle=\footnotesize\ttfamily,breaklines=true, upquote=true}
\usepackage{textcomp}
\usepackage{graphicx}
\usepackage{subfig}
\usepackage[labelfont={small,sc}, font={small}]{caption}
\DeclareTextCommand{\nobreakspace}{T1}{\leavevmode\nobreak\ }
\title{Large-scale Programming Language Detection}
\author{Yuan YIN}
\date{}
\begin{document}
\maketitle
\begin{abstract}
(to be completed)
\end{abstract}
\tableofcontents
\section{Introduction}
Programming Language Detection is a problem of identifying which programming language is a piece of source code written in. We here define the piece of source code as a textual sequential representation of an artefact, which is normally in the form of character sequence or, more generally, byte sequence. More precisely, the objective is to build a model which could predict the language of a given byte sequence.
The formal definition of the problem as follows: on the input, given a byte sequence $d$ and the number of a languages $n$,
\[l_d = \underset{l_i\in \{l_1, ..., l_n\}}{\arg \max}\ m(d, l_i),\]
where $l_d$ is the projected language, model $m$ calculates a value indicating the likelihood of a document written in language $l_i$ and the most likely one is chosen as the recognised language of the document.
In general, Programming Language Detection could be utilised in different situations, here are several example applications: language composition of software project in version control systems. For example, GitHub team is developing the project Linguist to return which languages are the project written in; code searching in plain text, in order to track the popularity of a language; language detection helps also IDEs to choose the language whose support functionalities, like syntax highlighting, are implemented.
We dive into this problem in the context of \emph{Software Heritage}. \emph{Software Heritage}, initiated by Inria, is an archive in which 4 billions source code files from 80 millions projects are stored.
The reason why the language detection is requested by \emph{Software Heritage} is that the language of a file could not be found in its filename extension. In \emph{Software Heritage}, every source code file is a blob which contains raw content of the file, that means a sequence of bytes without any extra information, such as filename (including filename extension), metadata, \emph{etc}. Since each blob could be represented by an intrinsic identifier generated from the blob itself, the duplication of files is avoided. For this reason, all existing tools depending on filenames fail in our context, and the methods for recognising the language from a sequence of bytes is strongly demanded.
(To be fixed after the redaction)
In this report, we introduce briefly the state-of-the-art methods in Section 2. In Section 3, the procedure of making a feasible dataset is related. In Section 4, we explain the methods that we took in account for the evaluation.
We provide the implemented methods and more detailed results on Forge of \emph{Software Heritage} \footnote{\url{http://}}.
\section{Related Works}
The existing approaches could be divided into two categories: practical methods and machine learning methods.
Practical methods are mostly based on several empirical or external information, basic ideas are presented as follows:
\begin{itemize}
\item Judging from filename extension. Ohcount\cite{ohcount} and Linguist\cite{linguist} practice the detection by hashing filename extension. The problem from this straightforward method is that some extensions are related to different languages, \emph{e.g.} \texttt{*.m} refers to a file written in Objective-C or MATLAB, \texttt{*.pl} points to Python or Prolog.
\item Grammar-based approaches. The principal is to parse through all languages, which is complex in modelling and demand an heavy consumption of calculation time.
\item Heuristics approaches. Most of them, such as SLOCCount\cite{sloccount}, use predefined regular expressions to capture empirically discovered features, \emph{e.g.} a file start with ``\texttt{\#include}'' is probably written in C. Some other looks for hints in the file, such as shebang lines, Vim modelines, Emacs modelines, \emph{etc}.
\end{itemize}
In Machine learning, the problem is regarded as a sub-problem of \emph{text categorisation} or \emph{text classification}, which means that given a piece of text, we find a function that predicts which category the text belongs to. The state-of-the-art methods build such function based on example input-output pairs, which are categorised as \emph{supervised learning}.
Ugurel \emph{et al.} \cite{Ugurel02} selects firstly the features by Expected Entropy Loss for each language, then vectorise the tested document into a vector representing the presence of a selected feature. Since Support Vector Machine (SVM) is binary classifier, the $n$-class classification is resolved by training $n \choose 2$ SVMs in the form of decision tree. Van Dam and Zaytsev \cite{vanDam16} test several popular and performant methods in Natural Language Processing. Multi-nominal Naïve Bayes (MNB), one of the variants of Naïve Bayes Classifiers, utilises unified frequency of a word or a sequence of words in a byte-sequence to decide the most possibly corresponding programming language. $N$-gram model and skip-gram model calculate for each gram the possibility of its appearance after $N$ grams. Normalised Compression Distance compares a piece of compressed code to the examples in the training set, then chooses the nearest language on as projection. MNB and $N$-gram model outperform others according to the experimental results. Gilda\cite{Gilda17} adopts a general setup of Convolutional Neurone Network (ConvNet) in NLP and proofs its performance.
\section{Dataset}
We considered either applying supervised learning and unsupervised learning for the problem. However, the usage of unsupervised learning is quite limited (we will talk about it later in Section 6). We then focus on supervised methods.
Supervised learning methods require a dataset containing labeled inputs to train and to evaluate the model. Nowadays, since Programming Language Detection is not seriously considered as an important subject in machine learning, for the reason that it could be resolved by adopting existing classifiers of ML, the articles are rarely accompanied by a publicly available dataset. Therefore, we natively build a novel dataset for our experiments.
GitHub\footnote{\url{https://www.github.com/}} is one of the most popular web-based hosting service for Git version control system, reporting having more than 57 million repositories. We decide to build the dataset using GitHub.
\paragraph{Ground Truth Supposition}
In the context of \emph{Software Heritage}, our aim is to cover as many languages as possible for classification, thus the dataset we build possesses inevitably a large amount of files, which is unaffordable to be labeled manually. We thus seek help from automatic labelling tools.
-Linguist \cite{linguist} is the tool of language detection developed by the GitHub team for unveiling the language composition in git repository, service provided on GitHub through API. There exists a command line version Linguist producing list of files by language for repository. Given that filename extensions are visible for Linguist and such features boost enormously on accuracy of classification (we will show this claim in later experiment), we suppose that the language recognised by Linguist is the ground truth language attributed to it.
+Linguist \cite{linguist} is the tool of language detection developed by the GitHub team for unveiling the language composition in git repository, service provided on GitHub through API. There exists a command line version Linguist producing list of files by language for repository. Given that filename extensions are visible for Linguist and such features boost enormously on accuracy of classification (we will show this claim in later experiment), we suppose that the language recognised by Linguist is the ground truth language attributed to it. Since the original Linguist did not give detailed results some data description languages, \emph{i.e.} XML, JSON, we slightly modified Linguist to integrate these missing languages.
\paragraph{Source Code Recuperation and Languages Included}
The dataset is built in the context of \emph{Software Heritage}. Therefore, the list of languages we consider integrating in the system covers as many languages as possible.
-We initially took the entire language list of Linguist (version 6.0.1) into account for repository fetching. For each language, we fetch the first 75 repositories which top on the list ordered by number of stars, manifesting the popularity of the repository. To avoid huge repositories, we ignore all repositories whose size is superior to 150~MiB.
+We initially took the entire language list of Linguist into account for repository fetching. For each language, we fetch the first 75 repositories which top on the list ordered by number of stars, manifesting the popularity of the repository. To avoid huge repositories, we ignore all repositories whose size is superior to 150~MiB.
We then eliminate some languages, \emph{i.e.} data description languages, which we could not fetch any repository from GitHub. We successfully fetched 3,525,897 files for 323 valid languages showed in Table~\ref{tab:lan}.
\section{Methods for Evaluation}
In this section, we describe several NLP methods here tested on our dataset:
\begin{itemize}
\item $n$-gram-based frequency distance model,
\item $n$-gram model,
\item Multinominal Naïve Bayes (MNB), and
\item Convolutional Neurone Networks (ConvNet).
\end{itemize}
The first approach is regarded as a baseline method for the evaluation of the accuracy and the efficiency of the model.
Given that in \emph{Software Heritage} every file is only a sequence of bytes which we are not able to assert its encoding, even unable to judge whether it is a binary file or not, we are willing to discover the approaches at byte level.
\subsection{Baseline: $n$-gram-based frequency distance}
\paragraph{$n$-gram}
An $n$-gram is a slice of a larger sequence with $n$ units. In NLP, the sequence is naturally the string. Depending on different problems, an unit represents a character or a word.
For example, the string ``\texttt{print(n)}'' with 8 characters could generate following character based $n$-grams:
\begin{itemize}
\item unigrams: \texttt{p, r, ..., )}
\item bigrams: \texttt{\textvisiblespace p, pr, ri, ..., n), )\textvisiblespace}
\item trigrams: \texttt{\textvisiblespace\textvisiblespace p, \textvisiblespace pr, pri, rit, ..., n)\textvisiblespace, )\textvisiblespace\textvisiblespace}
\item ...
\end{itemize}
or word-based $n$-grams:
\begin{itemize}
\item unigrams: \texttt{, print, (, n, ), }
\item bigrams: \texttt{ print, print (, ( n, n ), ) }
\item trigrams: \texttt{ print (, print ( n, ( n ), n ) }
\item ...
\end{itemize}
Strings are often padded with start marker \texttt{} and end marker \texttt{}. In general, a $k$-unity sequence generates exactly $k-(n-1)$ n-grams.
Cavnar and Trenkle \cite{Cavnar94} introduce an early NLP method using the distance between two $n$-gram frequency profiles.
According to Zipf's law, an empirical observation expressing that the $n$-th most common word in a human language occurs with a frequency inversely proportional to $n$. By retaining the most common words, it is possible to obtain a list describing the characteristics of the language.
Given a training set, at the training phase, a bag of $n$-grams is generated for each document in the training set. By gathering all bags of a language and counting the occurrences of each $n$-gram, a list of $n$-grams ordered by number of occurrences is created as the \emph{category profile} of the class. Only the most frequent 300 $n$-grams are kept, since they are highly correlated to the language.
The \emph{distance} between category profile and document profile is defined as follows:
Given trained category profiles $p_{l_1}, ..., p_{l_k}$ for $k$ languages, and document profile $p_{d}$ of test document $d$,
\[
distance(p_{l_i}, p_{d}) = \sum_{w\in p_{d}} | rankdist(w, p_d, p_{l_i})|
\]
\[
rankdist(w, p_d, p_{l_i})=
\begin{cases}
|rank(w, p_d) - rank(w, p_{l_i})| & \text{if }rank(w, p_{l_i}) \text{ exists,} \\
|p_d| & \text{else}
\end{cases}
\]
where $p$ containing an ordered list of word, $rank(w, p)$ returns the rank of $w$ in list $p$. $rankdist(w, p_d, p_{l_i})$ returns the out-of-place distance between two profiles if $w$ appears in $p_{l_i}$. If $w$ is an out-of-vocabulary word, the distance is the length of document profile $p_d$.
We then categorise the document as language with minimum distance.
\subsection{Multinominal Naïve Bayes}
This approach is introduced by van Dam and Zaytsev \cite{vanDam16}.
We assume in Naïve Bayes model that each word of the document is independent from each other. According to Bayes' Theorem,
\begin{eqnarray*}
P(l|w_1w_2...w_n) & = & \frac{P(w_1w_2...w_n|l)P(l)}{P(w_1w_2...w_n)} \\
& = & c\cdot P(l) P(w_1w_2...w_n|l)\\
& = & c\cdot P(l) \prod_{i = 1}^n P(w_i|l)
\end{eqnarray*}
Probability of $w_i$ in language $l$ is estimated by its occurrences in language with bag-of-word assumption:
\[
P(w_i|l) = \frac{C_l(w_i) + 1}{\sum_{w\in V}C_l(w) + |V|}
\]
where $C_l$ gives frequency of a word, $V$ is the vocabulary all languages.
Assumption of independence of words is quite limited for classification, in practice we actually use unigrams to 5-grams to replace words in the original method for taking the context of words into account.
\subsection{$n$-gram model}
The approach is introduced by van Dam and Zaytsev\cite{vanDam16}. As the precedent method, $n$-gram model utilises also statistical properties of $n$-grams but in another way.
Originally, $n$-gram model aims at predicting the possibility of an unit after knowing $n-1$ units occurred before. Given an unit $w_i$, the probability of its occurrence in a sequence is defined as:
\[
P(w_i | w_1...w_{i-1})
\]
According to Markov assumption, we omit older context in the sequence,
\[
P(w_i | w_1...w_{i-1}) \approx P(w_i | w_{i-(n-1)}...w_{i-1})
\]
In reality, the probability could be estimated by maximum likelihood estimation (MLE):
\[
P(w_i | w_{i-(n-1)}...w_{i-1}) = \frac{C(w_{i-(n-1)}...w_{i-1}w_{i})}{C(w_{i-(n-1)}...w_{i-1})}
\]
where $C$ gives the count of given $n$-gram.
By chain rule of probability and precedent estimation,
\[
P(w_1w_2...w_n)\approx \prod_{i = 1}^n P(w_i|w_{i-(n-1)}...w_{i-1})
\]
Now we transform such model into a classifier. Given a sequence $w_1w_2...w_n$, we assume that each language $l$ appears with the same probability and the probability of a given sequence is fixed.
According to Bayes' Theorem,
\begin{eqnarray*}
P(l|w_1w_2...w_n) & = & \frac{P(w_1w_2...w_n|l)P(l)}{P(w_1w_2...w_n)} \\
& = & c\cdot P(w_1w_2...w_n|l)\\
& = & c\cdot \prod_{i = 1}^n P(w_i|w_{i-(n-1)}...w_{i-1}, l)
\end{eqnarray*}
Rather than counting $n$-grams in the document, the probability of $n$-gram is estimated from the $n$-gram frequency of language, obtained from training set.
\[
P(w_i | w_{i-(n-1)}...w_{i-1}, l) = \frac{C_l(w_{i-(n-1)}...w_{i-1}w_{i})}{C_l(w_{i-(n-1)}...w_{i-1})}
\]
where $l$ is $C_l$ gives the count of language $l$ in training set.
While estimating the probability of $n$-grams, the smoothing techniques are required because of possible occurrence of \emph{out-of-vocabulary (OOV)} $n$-gram. In our case, Modified Kneser-Ney is applied since it is one of the methods that gives better experimental results in \cite{vanDam16}.
\subsection{Convolutional Neural Network (ConvNet)}
Convolutional Neural Network is one of the most popular machine learning branch usually used for image classification. It is a class of deep feed-forward artificial neural networks.
The following two architectures are tested in Section 4.
-\subsubsection{Word-level Approach (Ongoing)}
+\subsubsection{Word-level Approach}
\label{sec:word-conv}
Although Gilda \cite{Gilda17} shows the performance of his own architecture, we are not able to rebuild the same network due to the lack of network architecture details and hyper-parameter configuration. We move our vision to other architectures.
Kim \cite{Kim14} introduces a ConvNet for natural language sentence classification. Figure~\ref{fig:word-convnet} illustrates the architecture of the network.
\paragraph{Word Embedding}
In this architecture, word is the unit of the input. The $i$-th word $w_i$ is transformed into a vector $\mathbf{x}_i \in \mathbb{R}^k$ by word embedding level using \texttt{word2vec}. Word vectors are then concatenated to form the representation of the document, an $n\times k$ matrix.
The number of words $n$ of the document is fixed by the model. Therefore, a document longer than $n$ words needs to be pruned and the shorter one needs padding, by concatenating zero-vectors at the beginning or the end of the matrix.
\paragraph{Feature Extraction}
In the convolutional levels, by using a \emph{filter} $\mathbf{w_h} \in R^{hk}$, a \emph{feature} $c_i$ is then generated,
\[c_i = f(\mathbf{w_h}\cdot(\mathbf{x}_i\ ||\ \mathbf{x}_{i+1}\ ||\ ...\ ||\ \mathbf{x}_{i+h-1}) + b)\]
where $||$ is vector concatenate operator, $b\in \mathbb{R}$ is a bias term, $f$ is an \emph{activation function} outputting a feature from a set of inputs.
This procedure utilises the similar principle of $n$-gram model, but rather than extracting features from original words, ConvNet works on their vector representation.
Each filter produces a \emph{feature map}, a vector $\mathbf{c}^h\in \mathbb{R}^{n - h+1}$. A max-over-time-pooling is then applied on the feature map $\mathbf{c}^h$, aiming at choosing the most important features with the highest values and avoiding overfitting at training stage. We then obtain the final feature map of this $h\times k$ filter.
Several filters are often applied to obtain the corresponding feature map, representing a \emph{channel}. They are then concatenated vertically into a final feature map $\mathbf{c}$.
\paragraph{Classification}
\emph{Fully connected layer} is a traditional multi-layer perceptron whose neurons are all connected to every neurons of the precedent and following levels. It uses a softmax activation function in the output layer.
Feature map $\mathbf{c}$ is then put into a fully connected layer for extracting higher level features preparing for final classification. The output of these fully connected layers gives a vector indicating the score obtained for each class. The higher the score is given, the more possible the document is categorised into this class.
-\begin{table}[t]
-\centering
-\begin{tabular}{|c|c|}
-\hline
-Hyperparameter & Value \\
-\hline
-input size & 400 \\
-vocabulary size & 15000 \\
-character embedding size & 128 \\
-filter sizes & [3, 4, 5] \\
-nb. of filter matrices & 100 \\
-dropout rate & 0.5 \\
-activation function & ReLU \\
-nb. of neurons in fully connected level & 1024 \\
-nb. of classes & 323 \\
-\hline
-\end{tabular}
-\caption{\label{tab:hyp-word} Details of hyperparameter configuration of word-level ConvNet architecture, referred from \cite{Kim14}.}
-\end{table}
-
-\subsubsection{Byte-level Approach (Ongoing)}
+\subsubsection{Byte-level Approach}
Kim \cite{Kim15} introduces a character-level ConvNet for language modelling. The original architecture is adapted by Chaitanya Joshi\footnote{\url{https://github.com/chaitjo/character-level-cnn}} for achieving a classification model by replacing recurrent layers with same fully connected layers as word-level approach of Section~\ref{sec:word-conv}.
-\paragraph{Word Embedding} Instead of using word or token as feature, character-level approach could make use of character (or byte) without building a large vocabulary. Although the size of vocabulary is commonly considerably small, \emph{e.g.} 256 when we use every byte as character.
-
-\paragraph{Feature Extraction and Classification} These two parts are similar to word-level approach.
+Instead of using word or token as feature, character-level approach could make use of character (or byte) without building a large vocabulary. Although the size of vocabulary is commonly considerably small, \emph{e.g.} 256 when we use every byte as character.
-\begin{table}[t!]
-\centering
-\
-\begin{tabular}{|c|c|}
-\hline
-Hyperparameter & Value \\
-\hline
-input size & 2,048 \\
-vocabulary size & 256 \\
-character embedding size & 32 \\
-filter sizes & [3, 5, 7, 9, 10] \\
-nb. of filter matrices & 256 \\
-activation function & ReLU \\
-nb. of neurons in fully connected level & 1,024 \\
-nb. of classes & 323 \\
-\hline
-\end{tabular}
-\caption{\label{tab:hyp-byte} Details of hyperparameter configuration of byte-level ConvNet architecture, referred from Chaitanya Joshi's adaptation.}
-\end{table}
+Feature extraction and classification are similar to the word-level approach.
\section{Experimental Results}
In this section, we present several questions that we are willing to answer by experiments on our customised dataset.
\subsection{Implementation and System Setup}
We implement the methods described in Section 4 in Python 3, in order to finally integrate one of them in \emph{Software Heritage}.
Baseline method is implemented natively in Python. We implement MNB using Scikit-learn. $n$-gram model is implemented with KenLM \cite{kenlm}. The last two ConvNets are both implemented with Keras \cite{keras} using Tensorflow \cite{tensorflow2015-whitepaper} as backend.
We execute principally the training and test phase on a portable computer with 2.7 GHz Intel Core i5 processor running macOS 10.3. The training phase of two ConvNet methods are executed in an instance running Ubuntu 16.04 with one Intel Sandy Bridge virtual CPU, equipped with one NVIDIA Tesla K80 GPU on Google Cloud Platform. The instance is configured for making use of Tensorflow backend with GPU acceleration using CUDA Deep Neural Network Library (cuDNN).
-\subsection{Training Set And Test Set}
+\subsection{Training Set and Test Set}
-Files of the training set are randomly picked from the dataset at first time. To avoid the imbalance of the training set that impacts the performance of several methods in Section 4, we restrain the maximum number of training files to 500 for each language. The test set is then built from remaining samples, it includes up to 1000 files for testing.
+Files of the training set are randomly picked from the dataset at the first time. To avoid the imbalance of the training set that impacts the performance of several methods in Section 4, we restrain the maximum number of training files to 500 for each language. The test set is then built from remaining samples, it includes up to 1000 files for testing.
We built 3 series of training set and test set of different sizes:
\begin{itemize}
\item \texttt{mini}: 20 languages in \cite{vanDam16} , 10,000 training files, 20,000 test files.
\item \texttt{less}: 109 languages collecting more than 5,000 files in dataset, 54,500 training files, 109,000 test files.
\item \texttt{total}: 323 languages in Table~\ref{tab:lan}, 136,609 training files, 248,924 test files.
\end{itemize}
\subsection{Tokenisation}
In our case, tokenisation is useless for byte-level applications of method. The interest to introduce a simple general tokeniser is to break a document into words for making use of word-based methods.
It is difficult to summarise the relationship between programming language alphabet and its byte representation. We empirically suppose that most of the programming languages share some basic characters, \emph{e.g.} latin alphabet, parentheses, space, \emph{etc.} and most of encoding standards covers these characters in common.
A binary document is broken by a set of characters (operators, punctuations, spaces, \emph{etc.}) and numbers (integer, float, \emph{etc.}). All separators are retrieved after splitting.
For example, for the string ``\verb|print ("Hello world! 你好,世界!")|'' with UTF-8 encoding, its byte representation is
\begin{lstlisting}
"Hello world! \xe4\xbd\xa0\xe5\xa5\xbd\xef\xbc\x8c\xe4\xb8\x96\xe7\x95\x8c\xef\xbc\x81".
\end{lstlisting}
It is then tokenised to a sequence of 12 words:
\begin{lstlisting}
'print', ' ', '(', '"', 'Hello', ' ', 'world', '!', ' ', '\xe4\xbd\xa0\xe5\xa5\xbd\xef\xbc\x8c\xe4\xb8\x96\xe7\x95\x8c\xef\xbc\x81', '"', ')'
\end{lstlisting}
\subsection{Model Quality Metrics}
For a class $c$, test results of documents could be regrouped into 4 categories, we mark $\hat{y_i}$ as ground truth class label, $y_i$ as predicted label:
\begin{itemize}
\item True Positive (TP): when $\hat{y_i} = l$ and $y_i = l$, \emph{i.e.} document written in $l$ is recognised as the same language.
\item False Positive (FP): when $\hat{y_i} \neq l$ and $y_i = l$, \emph{i.e.} document not written in languag $l$ is incorrectly recognised as $l$.
\item True Negative (TN): when $\hat{y_i} \neq l$ and $y_i \neq l$, \emph{i.e.} document not written in $l$ is rejected by $l$.
\item False Negative (FN): when $\hat{y_i} = l$ and $y_i \neq l$, \emph{i.e.} document written in $l$ is incorrectly rejected by $l$.
\end{itemize}
In the context of classification, the quality of methods is measured by Precision, Recall and $F_1$ score.
Recall is also called True Positive Rate (TPR). It is the fraction of correctly classified samples over all samples should be predicted as in $c$:
\[\text{recall} = \frac{\text{\#TP}}{\text{\#TP}+\text{\#FN}}\]
Precision is also called Positive Predictive Value (PPV). It is the fraction of correctly classified samples over all samples predicted as in $c$:
\[\text{precision} = \frac{\text{\#TP}}{\text{\#TP}+\text{\#FP}}\]
The harmonic mean of precision and recall is called $F_1$ score, introduced for balancing two metrics:
\[
F_1 = \left(\frac{\text{precision}^{-1} + \text{recall}^{-1}}{2}\right)^{-1} = 2\cdot\frac{\text{precision}\cdot\text{recall}}{\text{precision}+\text{recall}}
\]
In following subsections, we use $F_1$ as the measurement of the model quality of each class' performance.
Global model quality is evaluated by accuracy score:
\[
\text{accuracy}(y,\hat{y}) = \frac{1}{n}\sum_{i=0}^{n-1}1(y_i = \hat{y}_i)
\]
where $y$ is the predicted labels, $\hat{y}$ is the ground truth labels, $n$ is the number of samples, $1(\cdot)$ is the indicator function. The score shows the ratio of the number of samples whose projected label is the same as its ground truth to the total number of samples.
\subsection{Experimental Results}
\subsubsection{Quality of Models}
The evaluation of the quality of models utilises the entire list of 323 languages.
\paragraph{Overall Quality}
Table~\ref{tab:total-comp} shows that baseline method reaches only 46.14\% of accuracy. Byte-level ConvNet marks the best accuracy at 87.26\% which is much higher than word-level ConvNet. Both MNB and $n$-gram model reach acceptable results respectively at 85.10\% and 83.39\%.
\begin{table}[t]
\centering
\begin{tabular}{|c|c|}
\hline
& Accuracy / \% \\ \hline
Baseline & 46.14 \\
MNB & 85.10 \\
$n$-gram model & 83.39 \\
Word-level ConvNet & 76.77 \\
Byte-level ConvNet & 87.26 \\ \hline
\end{tabular}
\caption{\label{tab:total-comp} Comparison of accuracy between evaluation methods.}
\end{table}
\paragraph{Inequality Between Classes} Although the overall score of Byte-level ConvNet reaches 87.26\%, $F_1$ score of several classes is much lower than the average. For instance, $F_1$ of NetLogo reaches 99.9\%, meanwhile C++ achieves only 47.8\%. Figure~\ref{fig:ineq} illustrates huge gap between best and worst results.
\begin{figure}[t!]
\centering
\subfloat[][25 language with highest $F_1$]{
\includegraphics[height=0.4\textwidth]{./comparison_cnn_f1_above}
}
\subfloat[][25 language with least $F_1$]{
\includegraphics[height=0.4\textwidth]{./comparison_cnn_f1_below}
}
\caption{\label{fig:ineq} Inequality between the most performing classes and least performing classes.}
\end{figure}
\paragraph{Interclass Confusion}
Some languages are especially difficult to distinguish from each other for these methods. We visualise the confusion matrices of methods in our repository in order to give several intuitive observations.
There are significant confusions between similar languages, \emph{i.e.} C and C++; Objective-C, Objective-C++ and Objective-J; Befunge and HyPhy; Java and Processing; NetLinx, PAWN and Ruby; Javascript and Cycript, \emph{etc}.
\subsubsection{Benchmark and Model Sizes}
Table~\ref{tab:ben-train} shows that the first three basic NLP methods could be rapidly trained on CPU even when a large number of classes are considered. ConvNet methods demand more computing power in training stage. On the contrary, ConvNets classify a document over 10 times faster than other $n$-gram based approaches.
\begin{table}[t]
\centering
\begin{tabular}{|c|c|c|c|}
\hline
& \multirow{2}{*}{Training Time} & Test Time & \multirow{2}{*}{Model Size}\\
& & (per file)&\\
\hline
Baseline & 1.8 h & 0.12 s & 3.8 MiB \\
MNB & 0.7 h & 2 s & 323.0 MiB \\
$n$-gram model & 0.8 h & 1.2 s & 663.1 MiB \\
\multirow{2}{*}{Word-level ConvNet} & 40.6 h & \multirow{2}{*}{0.01 s} & \multirow{2}{*}{313.3 MiB}\\
& (18.2 h*) & & \\
\multirow{2}{*}{Byte-level ConvNet} & 20.8 h & \multirow{2}{*}{0.01 s} & \multirow{2}{*}{32.8 MiB} \\
& (1.6 h*) & & \\
\hline
\end{tabular}
\footnotesize{*: Training time on distant VM using GPU.}
\caption{\label{tab:ben-train} Comparison of training time and test time benchmark on the same computer with model size.}
\end{table}
\subsubsection{Filename Extension Is Important}
We know empirically that filename extension is a critical feature of classification. However, we hope to find out how important it is. Knowing that ConvNet is good at highlighting features that distinguish mostly the inputs, we test the performance using Byte-level ConvNet by adding the extension of the file to the input.
For convenience, we test only for 20 languages in the list. Table~\ref{tab:ext} shows that by adding the extension into the code the detection accuracy could be dramatically improved.
\begin{table}[t]
\centering
\begin{tabular}{|c|c|}
\hline
& Accuracy / \%\\
\hline
Without Extension & 93.70 \\
With Extension & \textbf{97.37} \\
\hline
\end{tabular}
\caption{\label{tab:ext} Comparison of accuracy with extension and accuracy without extension with Byte-level ConvNet Classification on 20 classes.}
\end{table}
\subsubsection{Word or Byte (Ongoing)}
Our choice of applying tested methods at byte-level is comparative to the word-level applications. Table~\ref{tab:w-b} indicates that methods perform comparably better for MNB and ConvNet, $n$-gram model drops slightly after switched to byte-level.
\begin{table}[h]
\centering
\begin{tabular}{|c|c|c|}
\hline
&\multicolumn{2}{c|}{Accuracy / \%} \\
\cline{2-3}
& Word & Byte \\
\hline
MNB & 79.95 & 87.81 \\
$n$-gram model & 92.46 & 91.40 \\
ConvNet & 86.71 & 93.70 \\
\hline
\end{tabular}
\caption{\label{tab:w-b}}
\end{table}
-\section{Application in \emph{Software Heritage}}
+\section{Application in \emph{Software Heritage} (Ongoing)}
+
+We apply Byte-level ConvNet, the most performing method, to a subset of \emph{Software Heritage} archive, containing more than 17 millions files (around 0.1\% of the archive). However, we are not able to check the results one by one. Several folders are therefore selected for evaluation.
\subsection{Manual Verification Results}
+Since we have nothing but the content of each file to judge its language in this dataset, the following results are based on author's acquired knowledge with the help of searching engine and other assistant tools. The results here are indicative.
+
+Table~\ref{tab:manual} indicates the test accuracy of more than a thousand files manually checked by author using a graphic interface. By analysing the tested samples, errors could normally categorised into the following cases:
+\begin{itemize}
+ \item Short files. These files containing short code snippet are even indistinguishable for human.
+ \item Non-text files. Documentations consist usually of PDF documents, PNG or JPEG photos are surely misclassified.
+ \item ConvNet does not work well for many popular languages. From the results of former section, popular languages, such as C, C++, HTML, are more often wrongly classified.
+\end{itemize}
+
+\begin{table}[t]
+\centering
+\begin{tabular}{|c|c|}
+\hline
+& Accuracy / \% \\
+\hline
+Subset 1 & 69.53 \\
+Subset 2 & 66.67 \\
+Subset 3 & 62.35 \\
+Subset 4 & 68.28 \\
+Subset 5 & 58.97 \\
+\hline
+Overall & 64.98 \\
+\hline
+\end{tabular}
+\caption{\label{tab:manual} Test results of manual checking on subsets of the archive.}
+\end{table}
+
+\subsection{Recourse (Ongoing)}
+
+Libmagic is an efficient library differentiating plain text files and other formatted files. It is also reliable while recognising the popular languages. We decide to abandon some particular classes which is often misclassified by ConvNet and covered by Libmagic at the same time.
+
\section{Challenges of Large-scale Deployment (Ongoing)}
\subsection{Imbalance Between Classes}
Imbalance in dataset between classes could affect the performance of different models in many ways. For the approaches essentially based on statistics, \emph{i.e.} $n$-gram frequency, $n$-gram model, a small training set means that it is possible that we could not fetch enough features. For ConvNet approaches, apart from the former reason, ConvNets intend to ignore smaller classes to avoid errors.
Despite of our efforts on balancing the number of repositories for each class, a significant imbalance is eventually observed between language classes. We know from Figure~\ref{fig:distribution} that the first half of dataset consists of 13 languages, 310 other languages share another half. Nearly a half of languages possess less than 5,000 files, and two third of these own less than 1,000 files.
In future works, we are willing to fetch a more balanced database for each language and enrich weaker classes during the real deployment on the archive.
\subsection{Discovering New Languages}
The real challenges come from changes over time. In order to recognise as many languages as possible, our language list should be able to grow through the time. Unfortunately, the existing performing methods fix \emph{a priori} a list of languages and focus on distinguish between them.
On the one hand, despite our efforts for fetching as many languages as possible, it is already impossible to list all existing languages. On the other hand, we have no idea about how many new languages will appear in the archive.
Therefore, in this subsection, we will note several attempts on discovering new classes and discuss the extensibility of models in following parts.
\subsubsection{Detecting New Languages}
Unsupervised Learning is the machine learning task finding a model indicating inherent structures of unlabelled data. Clustering is one of the topic on finding potential new self-forming classes in feature space. Since new languages are still unknown for us, we focus here on hierarchical clustering, which does not demand \emph{a priori} a fixed number of new classes.
\paragraph{Agglomerative Hierarchical Clustering}
Agglomerative Hierarchical Clustering (AHC) is the mostly considered Hierarchical Clustering approach. It is a type of bottom-top approach.
We call the sample without label an \emph{observation}. Given $n$ observations $\{o_1,o_2,...,o_n\}$, a distance is calculated with a \emph{pairwise metric} for each pair of documents, resulting $O(n^2)$ distances. At the first time, every single observation is a cluster. By applying a \emph{linkage criteria}, two of clusters are combined as a single cluster. The algorithm terminate when there is only one cluster containing $n$ observations for gathering.
The clustering is tested firstly on the most popular 20 languages. Unfortunately, it does not work as we expected. By varying pairwise metric and linkage criteria, we obtained a slightly more performing combination: euclidean distance and average linkage.
However, Figure~\ref{fig:???} shows that only few languages such as Objective-C are able to form a huge, visible and pure agglomeration of the same language. Most of them are equally mixed up inside a cluster. We will continually discover other methods other than AHC for this task in the future.
\subsubsection{Extensibility of Existing Models (Ongoing)}
-Once discovered, new classes need to be integrated into the existing model. Since the Baseline method, $n$-gram model and MNB demand a profile stocking statistics for each language, it suffices to train the incoming supplementary training sets and simply add the profiles into the model. On the contrary, ConvNet approaches should be retrained with a new network. However, no matter how we integrate these classes into original models, the quality of the models will drop when more classes added.
+Once discovered, new classes need to be integrated into the existing model. Since the Baseline method, $n$-gram model and MNB demand a profile stocking statistics for each language, it suffices to train the incoming supplementary training sets and simply add the profiles into the model. On the contrary, ConvNet approaches should be retrained with a new network. However, no matter how we integrate these classes into original models, the quality of models will drop when more classes are added.
-\paragraph{Impact of Retraining}
+\paragraph{Impact of Retraining with More Classes}
The objective of \emph{Software Heritage} is to recognise as many languages as possible. Therefore it is inevitable to integrate new languages to older classifier. We test 3 series of training and test sets in order to discover the impact of number of classes on global results and the deterioration of $F_1$ for commonly appeared languages.
\begin{table}[h]
\centering
\begin{tabular}{|c|c|c|c|}
\hline
&\multicolumn{3}{c|}{Accuracy / \%} \\
\cline{2-4}
& \texttt{mini} & \texttt{less} & \texttt{total} \\
& \footnotesize{(20 languages)} & \footnotesize{(109 languages)} & \footnotesize{(323 languages)}\\
\hline
Baseline & 63.03 & 50.09 & 46.14 \\
MNB & 87.81 & 85.34 & 85.10 \\
$n$-gram model & 91.40 & 86.36 & 83.39 \\
Word-level ConvNet & 86.71 & 76.88 & 76.77\\
Byte-level ConvNet & 93.70 & 90.87 & 89.77 \\
\hline
\end{tabular}
\caption{\label{tab:size} Comparison of accuracy score for each method on 3 series of training and test sets.}
\end{table}
Table~\ref{tab:size} compares the global accuracy scores of each series and each approach. We figure out that along with the growth of number of classes, the accuracy drops for all methods. From 20 languages to 323 languages, the baseline method loses 16.89\%, while MNB loses only 2.71\%.
Figure~\ref{fig:size} shows that the recognition quality of earlier integrated languages drops on most occasions, especially for those languages which are often the root of later introduced languages.
\begin{figure}[t!]
\centering
\subfloat[Baseline]{
\includegraphics[height=8cm]{./comparison_ngram_dist_size.pdf}
}
\subfloat[MNB]{
\includegraphics[height=8cm]{./comparison_bayes_size.pdf}
}
\subfloat[$n$-gram]{
\includegraphics[height=8cm]{./comparison_ngram_prob_size.pdf}
}
\subfloat[Word ConvNet]{
\includegraphics[height=8cm]{./comparison_cnn_word_size.pdf}
}
\subfloat[Byte ConvNet]{
\includegraphics[height=8cm]{./comparison_cnn_size.pdf}
}
\caption{\label{fig:size} Comparison of $F_1$ score for each method on 3 series of training and test sets. (Blue: \texttt{mini}, Red: \texttt{less}, Cyan: \texttt{total})}
\end{figure}
\subsubsection{Incremental Learning (Ongoing)}
+Incremental learning is a another track of supervised learning, which is capable to take new classes in account when they appear in the training data flow. This online procedure means that earlier learned knowledge is conserved, reused and enriched, which is different from the offline retraining by completely forgetting the ancient knowledge. Nowadays, there exists several deep incremental learning models, \emph{e.g.} Gepperth's GeppNet\cite{Gepperth16}, Rebuffi \emph{et al.}'s iCaRL\cite{RebuffiKL16}, Kemker and Kanan's FearNet\cite{Kemker17}, \emph{etc.}
+
+Although the online version learning is favourable for use cases of \emph{Software Heritage}, the performance shown in \cite{Kemker17} points out that it is inevitable that the overall accuracy of these incremental learning method degrades after adding new classes. In addition, the online learning is shown in \cite{Kemker17} always underperforming to offline version.
+
+\subsubsection{Other Solutions (Ongoing)}
+
+
+
\section{Conclusion (Ongoing)}
+In the frame of TRE, we investigated existing NPL methods of text categorisation for applying to source code in Software Heritage. We tested on an originally created dataset with 374 classes.
+
\clearpage
\begin{appendices}
\section{Language List}
\begin{table*}[h!]
\centering
\tiny
\begin{tabularx}{\textwidth}{|X|X|X|X|X|X|}
\hline
1C Enterprise & ABAP & ActionScript & Ada & Agda & AGS Script \\
\hline
Alloy & AMPL & AngelScript & ANTLR & Apex & API Blueprint \\
\hline
APL & AppleScript & Arc & ASP & AspectJ & Assembly \\
\hline
ATS & Augeas & AutoHotkey & AutoIt & Awk & Ballerina \\
\hline
Batchfile & Befunge & BitBake & BlitzBasic & BlitzMax & Bluespec \\
\hline
Boo & Brainfuck & Brightscript & Bro & C & C\# \\
\hline
C++ & Cap'n Proto & CartoCSS & Ceylon & Chapel & Charity \\
\hline
ChucK & Cirru & Clarion & Clean & Click & CLIPS \\
\hline
Clojure & CMake & COBOL & CoffeeScript & ColdFusion & Common Lisp \\
\hline
Common Workflow Language & Component Pascal & Cool & Coq & Crystal & Csound \\
\hline
Csound Document & Csound Score & CSS & Cuda & CWeb & Cycript \\
\hline
D & Dart & DataWeave & DIGITAL Command Language & DM & Dogescript \\
\hline
DTrace & Dylan & E & eC & ECL & Eiffel \\
\hline
Elixir & Elm & Emacs Lisp & EmberScript & EQ & Erlang \\
\hline
F\# & Factor & Fancy & Fantom & Filebench WML & FLUX \\
\hline
Forth & Fortran & FreeMarker & Frege & Game Maker Language & GAMS \\
\hline
GAP & GDB & GDScript & Genie & Genshi & Gherkin \\
\hline
GLSL & Glyph & Gnuplot & Go & Golo & Gosu \\
\hline
Grace & Grammatical Framework & Groovy & Hack & Harbour & Haskell \\
\hline
Haxe & HCL & HLSL & HTML & Hy & HyPhy \\
\hline
IDL & Idris & IGOR Pro & Inform 7 & Inno Setup & Io \\
\hline
Ioke & Isabelle & J & Jasmin & Java & JavaScript \\
\hline
Jolie & JSONiq & Julia & Jupyter Notebook & Kit & Kotlin \\
\hline
KRL & LabVIEW & Lasso & Lean & Lex & LFE \\
\hline
LilyPond & Limbo & Liquid & LiveScript & LLVM & Logos \\
\hline
Logtalk & LOLCODE & LookML & LoomScript & LSL & Lua \\
\hline
M & M4 & Makefile & Mako & Markdown & Mask \\
\hline
Mathematica & Matlab & Max & MAXScript & Mercury & Meson \\
\hline
Metal & Mirah & Modelica & Modula-2 & Module Management System & Monkey \\
\hline
Moocode & MoonScript & MQL4 & MQL5 & MTML & mupad \\
\hline
NCL & Nearley & Nemerle & nesC & NetLinx & NetLinx+ERB \\
\hline
NetLogo & NewLisp & Nextflow & Nim & Nit & Nix \\
\hline
NSIS & Nu & Objective-C & Objective-C++ & Objective-J & OCaml \\
\hline
Omgrofl & ooc & Opa & Opal & OpenEdge ABL & OpenSCAD \\
\hline
Ox & Oxygene & Oz & P4 & Pan & Papyrus \\
\hline
Parrot & Pascal & PAWN & Pep8 & Perl & Perl 6 \\
\hline
PHP & PicoLisp & PigLatin & Pike & PLpgSQL & PLSQL \\
\hline
PogoScript & Pony & PostScript & POV-Ray SDL & PowerBuilder & PowerShell \\
\hline
Processing & Prolog & Propeller Spin & Puppet & PureBasic & PureScript \\
\hline
Python & QMake & QML & R & Racket & Ragel \\
\hline
RAML & Rascal & REALbasic & Rebol & Red & Redcode \\
\hline
Ren'Py & RenderScript & reStructuredText & REXX & Ring & RMarkdown \\
\hline
RobotFramework & Roff & Rouge & RPC & Ruby & Rust \\
\hline
SaltStack & SAS & Scala & Scheme & Scilab & Self \\
\hline
ShaderLab & Shell & ShellSession & Shen & Slash & Smali \\
\hline
Smalltalk & Smarty & SMT & Solidity & SourcePawn & SQF \\
\hline
SQLPL & Squirrel & SRecode Template & Stan & Standard ML & Stata \\
\hline
SuperCollider & Swift & SystemVerilog & Tcl & Tea & Terra \\
\hline
TeX & Thrift & TI Program & TLA & Turing & TXL \\
\hline
TypeScript & Uno & UnrealScript & UrWeb & Vala & VCL \\
\hline
Verilog & VHDL & Vim script & Visual Basic & Volt & Vue \\
\hline
wdl & WebAssembly & WebIDL & wisp & X10 & xBase \\
\hline
XC & XML & Xojo & XProc & XQuery & XS \\
\hline
XSLT & Xtend & Yacc & YAML & Zephir & Zimpl \\
\hline
\end{tabularx}
\caption{\label{tab:lan} Language List of the dataset, 323 languages engaged.}
\end{table*}
\section{File Distribution in Dataset of Each Language}
\begin{figure}[h]
\centering
\includegraphics[width=\textwidth]{circle}
\caption{\label{fig:distribution}File Distribution in Dataset of Each Language}
\end{figure}
+
+\section{Hyperparameters of ConvNets}
+
+\begin{table}[h!]
+\centering
+\begin{tabular}{|c|c|}
+\hline
+Hyperparameter & Value \\
+\hline
+input size & 2,048 \\
+vocabulary size & 256 \\
+character embedding size & 32 \\
+filter sizes & [3, 5, 7, 9, 10] \\
+nb. of filter matrices & 256 \\
+activation function & ReLU \\
+nb. of neurons in fully connected level & 1,024 \\
+nb. of classes & 323 \\
+\hline
+\end{tabular}
+\caption{\label{tab:hyp-byte} Details of hyperparameter configuration of byte-level ConvNet architecture, referred from Chaitanya Joshi's adaptation.}
+\end{table}
+
+\begin{table}[h!]
+\centering
+\begin{tabular}{|c|c|}
+\hline
+Hyperparameter & Value \\
+\hline
+input size & 400 \\
+vocabulary size & 15000 \\
+character embedding size & 128 \\
+filter sizes & [3, 4, 5] \\
+nb. of filter matrices & 100 \\
+dropout rate & 0.5 \\
+activation function & ReLU \\
+nb. of neurons in fully connected level & 1024 \\
+nb. of classes & 323 \\
+\hline
+\end{tabular}
+\caption{\label{tab:hyp-word} Details of hyperparameter configuration of word-level ConvNet architecture, referred from \cite{Kim14}.}
+\end{table}
+
\end{appendices}
\bibliography{bib-rapport}
\bibliographystyle{unsrt}
%Rapport
%
%Il doit faire de 15 à 30 pages et, dans la mesure du possible, doit être en grande part lisible par des non-spécialistes. Son plan peut être par exemple :
%présentation du domaine de recherche (le jury n'est pas constitué seulement de spécialistes du domaine, tenez-en compte !) ;
%énoncé et motivation du sujet ;
%résultats existants s'y rapportant (état de l'art, commentaire d'article, ...) ;
%vos résultats personnels (clairement identifiés comme tels).
%Le rapport devra être assorti d'un résumé d'une page compréhensible par quiconque.
\end{document}
\ No newline at end of file
diff --git a/scripts/dataset/build_dataset.sh b/scripts/dataset/build_dataset.sh
index 65e8290..f0b49c6 100755
--- a/scripts/dataset/build_dataset.sh
+++ b/scripts/dataset/build_dataset.sh
@@ -1,27 +1,27 @@
#!/bin/bash
# Pipeline of dataset construction
ROOT_FOLDER=$1
-if [ $# -eq 1]
+if [ $# -eq 1 ]
then
mkdir $ROOT_FOLDER;
echo "[[Start listing repositories.]]";
- python3 repo_lister.sh;
+ python3 repo_lister.py;
echo "[[Repository lists built.]]";
echo "[[Start cloning repositories to '$ROOT_FOLDER']]";
./repo_clone.sh "$ROOT_FOLDER";
echo "[[Clone completed.]]";
echo "[[Start calculating ground truth.]]";
./repo_arrange.sh "$ROOT_FOLDER";
echo "[[JSON ground truth calculated.]]";
echo "[[Start completing ground truth with text result.]]";
./repo_arrange_text.sh "$ROOT_FOLDER";
echo "[[Ground truth completed.]]";
echo "[[Start arranging files by language.]]";
- python3 ground_truth_arrange.py;
+ python3 ground_truth_arrange.py "$ROOT_FOLDER";
echo "[[Raw file dataset built in '$ROOT_FOLDER'.]]";
else
echo "Please enter root folder correctly. Only one folder is needed.";
fi
diff --git a/swh/langdetect/checker.py b/swh/langdetect/checker.py
index 01f8811..9f873c1 100644
--- a/swh/langdetect/checker.py
+++ b/swh/langdetect/checker.py
@@ -1,102 +1,160 @@
#!/usr/bin/python
from PyQt5 import QtGui, QtCore
from pyforms import BaseWidget
from pyforms.controls import ControlTextArea
from pyforms.controls import ControlDir
from pyforms.controls import ControlList
from pyforms.controls import ControlLabel
from pyforms.controls import ControlCombo
from .cnn import CNN
import pyforms, os, gzip
+from pickle import load, dump
RED = QtGui.QColor(255,0,0)
WHITE = QtGui.QColor(255,255,255)
GREEN = QtGui.QColor(0,255,0)
BLACK = QtGui.QColor(0,0,0)
class Check(BaseWidget):
def __init__(self):
super(Check, self).__init__('Software Heritage Source Code Language Manual Check Tool')
-
+
+ self._control_root = ControlDir('Choose the root of database: ')
self._control = ControlDir('Choose a directory: ')
self._list = ControlList('Files in the directory')
self._text = ControlTextArea('Content')
self._label = ControlLabel('Language: \nValue: ')
self._label_rest = ControlLabel('')
self._combo = ControlCombo('Is that correct ?')
- self.formset = ['_control', ('_list', ['_text', ('_label', '_combo')]),'_label_rest']
+ self.formset = ['_control_root', '_control', ('_list', ['_text', ('_label', '_combo')]),'_label_rest']
+ self._control_root.changed_event = self.__save_root
self._control.changed_event = self.__get_files
self._list.readonly = True
- self._list.cell_double_clicked_event=self.__show_text
+ self._list.item_selection_changed_event=self.__show_text
self._text.readonly = True
self._cnn = CNN(None, 2048, None)
+ self._root = None
+
self._dict = {}
self._combo += ('Unknown', None)
self._combo += ('No', False)
self._combo += ('Yes', True)
self._combo.activated_event = self.__checked
self._curr_row = None
self._curr_column = None
+ self._curr_dir = None
+ self._state = 0
+
+ self.before_close_event = self.__store
+
+ def __save_root(self):
+ self._root = self._control_root.value
+ try:
+ with open(os.path.join(self._root, 'results'), 'rb') as f:
+ self._dicts = load(f)
+ except Exception:
+ self._dicts = {}
+ self._state = 1
+ self._control.value = self._root
+
+ def __store(self):
+ with open(os.path.join(self._root, 'results'), 'wb') as f:
+ self._dicts[self._curr_dir] = self._dict
+ dump(self._dicts, f)
def __get_files(self):
- self._dict = {}
+ if self._state == 1:
+ self._state = 2
+ return
+ elif self._state == 0:
+ self.alert_popup('Please choose root of your database.', title='Error')
+ return
res = []
- for root, sub, files in os.walk(self._control.value):
+ if self._curr_dir != None:
+ self._dicts[self._curr_dir] = self._dict
+ self._curr_dir = self._control.value
+ self._dict = self._dicts.get(self._curr_dir, {})
+ for root, sub, files in os.walk(self._curr_dir):
if sub == []:
for file in files:
if not file.startswith('.'):
res.append((os.path.join(root, file),))
self._list.value = res
+ self._update_status()
+ self._update_cells_color()
def __checked(self, index):
path = self._list.get_value(self._curr_column, self._curr_row)
self._dict[path] = self._combo.value
- print(self._combo.value)
if self._combo.value == 'Unknown':
del self._dict[path]
self._update_color(self._combo.value, self._list.get_cell(self._curr_column, self._curr_row))
+ self._update_status()
+
+ def _update_status(self):
correct = len([x for x in self._dict.values() if x == True])
wrong = len(self._dict.keys()) - correct
remaining = len(self._list.value) - len(self._dict.keys())
- self._label_rest.value = 'Correct:\t{}\tWrong:\t{}\tRemaining:\t{}'.format(correct, wrong, remaining)
+ try:
+ accuracy = correct / (correct + wrong) * 100
+ except:
+ accuracy = 0
+ self._label_rest.value = 'Correct:\t{}\tWrong:\t{}\tRemaining:\t{}\tAccuracy:\t{:.2f}%'.format(correct, wrong, remaining, accuracy)
+
+ def _update_cells_color(self):
+ n = self._list.rows_count
+ for i in range(0, n):
+ cell = self._list.get_cell(0, i)
+ value = self._list.get_value(0, i)
+ self._update_color(self._dict.get(value, None), cell)
def _update_color(self, x, cell):
if x == False:
cell.setBackground(RED)
elif x == True:
cell.setBackground(GREEN)
else:
cell.setBackground(WHITE)
- def __show_text(self, row, column):
+ def __show_text(self):
+ column = 0
+ row = self._list.selected_row_index
self._curr_row = row
self._curr_column = column
+ if row == None:
+ self._text.value = ''
+ self._label.value = 'Language: \nValue: '
+ return
+
path = self._list.get_value(column, row)
with gzip.open(path, 'rb') as f:
string = f.read()
try:
string = string.decode('utf-8')
except UnicodeDecodeError:
pass
self._text.value = string[:10240]
res = self._cnn.classify(path)
- self._label.value = 'Language: {}\nValue: {}'.format(res[0],res[1])
+ if(res[1] >= 0):
+ self._label.value = 'Language: {}\nValue: {}'.format(res[0],res[1])
+ else:
+ self._label.value = 'Language: No Reliable Result\nValue: '
h_sel = self._dict.get(path, None)
if h_sel == None:
self._combo.current_index = 0
elif h_sel == False:
self._combo.current_index = 1
elif h_sel == True:
self._combo.current_index = 2
#Execute the application
if __name__ == "__main__":
pyforms.start_app(Check)
diff --git a/swh/langdetect/cnn.py b/swh/langdetect/cnn.py
index fb8d5a0..ad6298a 100644
--- a/swh/langdetect/cnn.py
+++ b/swh/langdetect/cnn.py
@@ -1,327 +1,346 @@
import os
import sys
import subprocess
import time
import random
import csv
import numpy as np
import warnings
import gzip
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import tensorflow as tf
import json
import argparse
+import magic
from ast import literal_eval
from pickle import dump
from pickle import load
from numpy import array
from .utils.common import Tokenizer
from .utils.common import file_to_string
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import EarlyStopping
from keras.models import Model
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout, AlphaDropout
from keras.layers import ThresholdedReLU
from keras.layers import Activation
from keras.layers import Lambda
from keras.layers import Embedding
from keras.layers import Concatenate, GlobalMaxPooling1D
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.optimizers import SGD
#from pyspark import SparkContext, SparkConf
#from elephas.spark_model import SparkModel # pip install flask
#from elephas import optimizers as elephas_optimizers
#from elephas.utils.rdd_utils import to_labeled_point
csv.field_size_limit(sys.maxsize)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras import backend as K
#K.set_session(K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)))
def main():
parser = argparse.ArgumentParser(description='Training and test tool of charactor-level ConvNet text categorisation.')
subparsers = parser.add_subparsers(dest='sub_command')
-
parser_train = subparsers.add_parser('train', help='Training on the dataset, dataset must be a *.csv file. A model will be created in the same directory.')
parser_train.add_argument('-s', '--spark', type=bool, help='Training on cluster.', dest='train_spark')
parser_train.add_argument('train_path', metavar='PATH', type=str, help='Path of the training dataset.')
parser_train.add_argument('-ms', '--maxsize', metavar='SIZE', dest='train_maxsize', type=int, help='Set maximum input size of ConvNet, default 1024.')
parser_train.add_argument('-e', '--epochs', metavar='N', dest='train_epochs', type=int, help='Number of training epochs (iterations), default 50.')
parser_test = subparsers.add_parser('test', help='Test on the dataset, dataset must be a directory with *.csv dataset named by corresponding language.')
parser_test.add_argument('test_root', metavar='ROOT', type=str, help='Root of the test dataset.')
parser_clf = subparsers.add_parser('clf', help='Test a file.')
parser_clf.add_argument('clf_path', metavar='PATH', type=str, help='Path of test file.')
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
args = parser.parse_args()
maxsize = 2048
epochs = 15
if args.sub_command == 'train' :
if args.train_maxsize:
maxsize = args.train_maxsize
if args.train_epochs:
epochs = args.train_epochs
n = CNN(args.train_path, maxsize=maxsize, epochs=epochs)
if args.train_spark:
n.train_on_cluster()
else:
n.train()
elif args.sub_command == 'test':
n = CNN(args.test_root, maxsize=maxsize, epochs=epochs)
n.test()
elif args.sub_command == 'clf':
n = CNN(None, maxsize, None)
n.classify(args.clf_path)
else:
parser.parse_args('-h')
class CNN:
def __init__(self, path, maxsize, epochs):
if path != None:
self._path = path
# Root of model folder
self._root_model = os.path.join(os.path.dirname(path), 'model_cnn')
try:
os.mkdir(self._root_model)
except:
pass
# Path of result
self._path_result = os.path.join(os.path.dirname(path), 'result_cnn')
self._path_test_csv = path
dir_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dir_path, 'static_data', 'languages.json'), 'r') as f:
self._languages = json.load(f)
self._input_size = maxsize
self._vocab_size = 256
self._num_of_classes = len(self._languages)
self._batch_size = 64
self._epochs = epochs
self._model = None
if path == None and epochs == None:
self._model = load_model(os.path.join(dir_path, 'static_data', 'model.h5'))
def file_len(self, fname):
with open(fname) as f:
count = 0
for l in f:
count += 1
return count
def train(self):
self._get_model()
earlystop = EarlyStopping(monitor='loss', min_delta=0, patience=3, verbose=0, mode='auto')
callbacks = [earlystop]
self._model.fit_generator(
self._generator(self._input_size,
self._num_of_classes,
self._batch_size),
steps_per_epoch=self.file_len(self._path) / self._batch_size,
epochs=self._epochs,
callbacks=callbacks)
self._model.save(os.path.join(self._root_model, 'model.h5'))
def _generator(self, length, total_class, batch_size=128):
counter = 0
while True:
with open(self._path, newline='') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|')
for pair in r:
if counter == 0:
X = np.empty((0, length))
Y = np.empty((0, total_class))
label, string = pair
label = int(label)
string = literal_eval(string)
+ if len(string) > self._input_size:
+ len_s = len(string)
+ stop_1 = int(len_s / 3)
+ stop_2 = int(len_s * 2 / 3)
+ part = int(self._input_size / 4)
+ half_part = int(part / 2)
+ string = string[:part] + string[stop_1 - half_part:stop_1 + half_part] + string[stop_2 - half_part:stop_2 + half_part] + string[-part:]
tokens = [x + 1 for x in Tokenizer.tokenize(string, 'letter')]
X = np.append(X, pad_sequences([tokens], maxlen=length), axis=0)
label = array(np_utils.to_categorical([label], total_class))
Y = np.append(Y, label, axis=0)
counter += 1
if counter == batch_size:
counter = 0
yield(X,Y)
def _get_model_zhang(self):
input_size = self._input_size
alphabet_size = self._vocab_size
embedding_size = 128
conv_layers = [(256,7,3), (256,7,3), (256,3,-1), (256,3,-1), (256,3,-1), (256,3,3)]
threshold = 1e-6
fully_connected_layers = [1024, 1024]
dropout_p = 0.2
optimizer = 'adam'
loss = 'categorical_crossentropy'
num_of_classes = self._num_of_classes
# Input layer
inputs = Input(shape=(input_size,), name='sent_input', dtype='int64')
# Embedding layers
x = Embedding(alphabet_size + 1, embedding_size, input_length=input_size)(inputs)
# Convolution layers
for cl in conv_layers:
x = Convolution1D(cl[0], cl[1])(x)
x = ThresholdedReLU(threshold)(x)
if cl[2] != -1:
x = MaxPooling1D(cl[2])(x)
x = Flatten()(x)
# Fully connected layers
for fl in fully_connected_layers:
x = Dense(fl)(x)
x = ThresholdedReLU(threshold)(x)
x = Dropout(dropout_p)(x)
# Output layer
predictions = Dense(num_of_classes, activation='softmax')(x)
# Build and compile model
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
print(model.summary())
self._model = model
def _get_model(self):
input_size = self._input_size
alphabet_size = self._vocab_size
embedding_size = 64
conv_layers = [(256,10), (256,7), (256,5), (256,3)]
threshold = 1e-6
fully_connected_layers = [1024, 1024]
dropout_p = 0.1
optimizer = 'adam'
loss = 'categorical_crossentropy'
num_of_classes = self._num_of_classes
# Input layer
inputs = Input(shape=(input_size,), name='sent_input', dtype='int64')
# Embedding layers
x = Embedding(alphabet_size + 1, embedding_size, input_length=input_size)(inputs)
convolution_output = []
# Convolution layers
for num_filters, filter_width in conv_layers:
conv = Convolution1D(filters=num_filters,
kernel_size=filter_width,
activation='tanh',
name='Conv1D_{}_{}'.format(num_filters, filter_width))(x)
pool = GlobalMaxPooling1D(name='MaxPoolingOverTime_{}_{}'.format(num_filters, filter_width))(conv)
convolution_output.append(pool)
x = Concatenate()(convolution_output)
# Fully connected layers
for fl in fully_connected_layers:
x = Dense(fl, activation='selu', kernel_initializer='lecun_normal')(x)
x = Dropout(dropout_p)(x)
# Output layer
predictions = Dense(num_of_classes, activation='softmax')(x)
# Build and compile model
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
print(model.summary())
self._model = model
def _max_len(self, texts):
return max([len(text) for text in texts])
+ def _load_model(self):
+ self._model = load_model(os.path.join(self._root_model, 'model.h5'))
+
def test(self):
csv.field_size_limit(sys.maxsize)
try:
r = open(self._path_result, 'rb')
test_result = load(r)
r.close()
except FileNotFoundError:
test_result = {}
self._load_model()
for language in [x for x in self._languages if x not in test_result.keys()]:
test_result[language] = self.test_class(language)
with open(self._path_result, 'wb') as f:
dump(test_result, f)
def _count_size(self, files):
size = 0
for f in files:
size += os.path.getsize(f)
return size
def test_class(self, language):
ok = 0
results = []
count = 0
total_test = self.file_len(os.path.join(self._path_test_csv, language + '.csv'))
with open(os.path.join(self._path_test_csv, language + '.csv'), newline='') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|')
for pair in r:
label, string = pair
label = int(label)
string = literal_eval(string)
+ if len(string) > self._input_size:
+ length = len(string)
+ stop_1 = int(length / 3)
+ stop_2 = int(length * 2 / 3)
+ part = int(self._input_size / 4)
+ half_part = int(part / 2)
+ string = string[:part] + string[stop_1 - half_part:stop_1 + half_part] + string[stop_2 - half_part:stop_2 + half_part] + string[-part:]
tokens = [x + 1 for x in Tokenizer.tokenize(string, 'letter')]
result = self._guess_file_language(tokens)
count += 1
print('[{0:4d}/{1:4d}] {2}:{3} '.format(count, total_test, result[0][1], result[0][0]),end='\r')
results.append(result[0])
if result[0][1] == language:
ok += 1
accuracy = ok / total_test
print('Tests for {} '.format(language))
print('Total test files : {}'.format(total_test))
print('Correctly classified files : {}'.format(ok))
print('Accuracy : {}%'.format(accuracy * 100))
return (ok, total_test, accuracy, results)
def speed_benchmark(self):
language = self._languages[10]
self._model = load_model(os.path.join(self._root_model, 'model.h5'))
test_set = self._get_test_set(language)
total_size = self._count_size(test_set)
print('{} kB in total'.format(total_size / 1024))
t_start = time.perf_counter()
self.test_class(language)
t_end = time.perf_counter()
print('{} seconds.'.format(t_end - t_start))
print('{} seconds per KiB'.format(((t_end - t_start) / total_size) * 1024))
def _guess_file_language(self, tokens):
X = pad_sequences([tokens], maxlen=self._input_size)
result = list(self._model.predict(X))[0]
result = [(s, self._languages[i]) for i, s in enumerate(result)]
return sorted(result, reverse=True)
def classify(self, path):
with gzip.open(path, 'rb') as f:
string = f.read()
+ a = magic.from_buffer(string, mime=True)
+ print(a)
tokens = [x + 1 for x in Tokenizer.tokenize(string, 'letter')]
res = self._guess_file_language(tokens)
print('Filename :\t{}\nLanguage :\t{}\nValue :\t{}'.format(path, res[0][1],res[0][0]))
return (res[0][1], res[0][0])
if __name__ == '__main__':
main()
diff --git a/swh/langdetect/static_data/languages.json b/swh/langdetect/static_data/languages.json
index 01d803f..51e8065 100644
--- a/swh/langdetect/static_data/languages.json
+++ b/swh/langdetect/static_data/languages.json
@@ -1 +1 @@
-["1C Enterprise", "ABAP", "ActionScript", "Ada", "Agda", "AGS Script", "Alloy", "AMPL", "AngelScript", "ANTLR", "Apex", "API Blueprint", "APL", "AppleScript", "Arc", "ASP", "AspectJ", "Assembly", "ATS", "Augeas", "AutoHotkey", "AutoIt", "Awk", "Ballerina", "Batchfile", "Befunge", "BitBake", "BlitzBasic", "BlitzMax", "Bluespec", "Boo", "Brainfuck", "Brightscript", "Bro", "C", "C#", "C++", "Cap'n Proto", "CartoCSS", "Ceylon", "Chapel", "Charity", "ChucK", "Cirru", "Clarion", "Clean", "Click", "CLIPS", "Clojure", "CMake", "COBOL", "CoffeeScript", "ColdFusion", "Common Lisp", "Common Workflow Language", "Component Pascal", "Cool", "Coq", "Crystal", "Csound", "Csound Document", "Csound Score", "CSS", "Cuda", "CWeb", "Cycript", "D", "Dart", "DataWeave", "DIGITAL Command Language", "DM", "Dogescript", "DTrace", "Dylan", "E", "eC", "ECL", "Eiffel", "Elixir", "Elm", "Emacs Lisp", "EmberScript", "EQ", "Erlang", "F#", "Factor", "Fancy", "Fantom", "Filebench WML", "FLUX", "Forth", "Fortran", "FreeMarker", "Frege", "Game Maker Language", "GAMS", "GAP", "GDB", "GDScript", "Genie", "Genshi", "Gherkin", "GLSL", "Glyph", "Gnuplot", "Go", "Golo", "Gosu", "Grace", "Grammatical Framework", "Groovy", "Hack", "Harbour", "Haskell", "Haxe", "HCL", "HLSL", "HTML", "Hy", "HyPhy", "IDL", "Idris", "IGOR Pro", "Inform 7", "Inno Setup", "Io", "Ioke", "Isabelle", "J", "Jasmin", "Java", "JavaScript", "Jolie", "JSONiq", "Julia", "Jupyter Notebook", "Kit", "Kotlin", "KRL", "LabVIEW", "Lasso", "Lean", "Lex", "LFE", "LilyPond", "Limbo", "Liquid", "LiveScript", "LLVM", "Logos", "Logtalk", "LOLCODE", "LookML", "LoomScript", "LSL", "Lua", "M", "M4", "Makefile", "Mako", "Markdown", "Mask", "Mathematica", "Matlab", "Max", "MAXScript", "Mercury", "Meson", "Metal", "Mirah", "Modelica", "Modula-2", "Module Management System", "Monkey", "Moocode", "MoonScript", "MQL4", "MQL5", "MTML", "mupad", "NCL", "Nearley", "Nemerle", "nesC", "NetLinx", "NetLinx+ERB", "NetLogo", "NewLisp", "Nextflow", "Nim", "Nit", "Nix", "NSIS", "Nu", "Objective-C", "Objective-C++", "Objective-J", "OCaml", "Omgrofl", "ooc", "Opa", "Opal", "OpenEdge ABL", "OpenSCAD", "Ox", "Oxygene", "Oz", "P4", "Pan", "Papyrus", "Parrot", "Pascal", "PAWN", "Pep8", "Perl", "Perl 6", "PHP", "PicoLisp", "PigLatin", "Pike", "PLpgSQL", "PLSQL", "PogoScript", "Pony", "PostScript", "POV-Ray SDL", "PowerBuilder", "PowerShell", "Processing", "Prolog", "Propeller Spin", "Puppet", "PureBasic", "PureScript", "Python", "QMake", "QML", "R", "Racket", "Ragel", "RAML", "Rascal", "REALbasic", "Rebol", "Red", "Redcode", "Ren'Py", "RenderScript", "reStructuredText", "REXX", "Ring", "RMarkdown", "RobotFramework", "Roff", "Rouge", "RPC", "Ruby", "Rust", "SaltStack", "SAS", "Scala", "Scheme", "Scilab", "Self", "ShaderLab", "Shell", "ShellSession", "Shen", "Slash", "Smali", "Smalltalk", "Smarty", "SMT", "Solidity", "SourcePawn", "SQF", "SQLPL", "Squirrel", "SRecode Template", "Stan", "Standard ML", "Stata", "SuperCollider", "Swift", "SystemVerilog", "Tcl", "Tea", "Terra", "TeX", "Thrift", "TI Program", "TLA", "Turing", "TXL", "TypeScript", "Uno", "UnrealScript", "UrWeb", "Vala", "VCL", "Verilog", "VHDL", "Vim script", "Visual Basic", "Volt", "Vue", "wdl", "WebAssembly", "WebIDL", "wisp", "X10", "xBase", "XC", "Xojo", "XProc", "XQuery", "XS", "XSLT", "Xtend", "Yacc", "YAML", "Zephir", "Zimpl"]
\ No newline at end of file
+["1C Enterprise", "ABAP", "ActionScript", "Ada", "Adobe Font Metrics", "Agda", "AGS Script", "Alloy", "AMPL", "AngelScript", "Ant Build System", "ANTLR", "ApacheConf", "Apex", "API Blueprint", "APL", "AppleScript", "Arc", "AsciiDoc", "ASP", "AspectJ", "Assembly", "ATS", "Augeas", "AutoHotkey", "AutoIt", "Awk", "Ballerina", "Batchfile", "BitBake", "BlitzBasic", "BlitzMax", "Bluespec", "Boo", "Brainfuck", "Brightscript", "Bro", "C", "C#", "C++", "Cap'n Proto", "CartoCSS", "Ceylon", "Chapel", "Charity", "ChucK", "Cirru", "Clarion", "Clean", "Click", "CLIPS", "Clojure", "CMake", "COBOL", "CoffeeScript", "ColdFusion", "COLLADA", "Common Lisp", "Common Workflow Language", "Component Pascal", "CoNLL-U", "Cool", "Coq", "Crystal", "Csound", "Csound Document", "Csound Score", "CSS", "CSV", "Cuda", "CWeb", "Cycript", "D", "Dart", "DataWeave", "desktop", "Diff", "DIGITAL Command Language", "DM", "Dockerfile", "Dogescript", "DTrace", "Dylan", "E", "Eagle", "eC", "ECL", "edn", "Eiffel", "Elixir", "Elm", "Emacs Lisp", "EmberScript", "EQ", "Erlang", "F#", "Factor", "Fancy", "Fantom", "Filebench WML", "FLUX", "Forth", "Fortran", "FreeMarker", "Frege", "G-code", "Game Maker Language", "GAMS", "GAP", "GDB", "GDScript", "Genie", "Genshi", "Gerber Image", "Gettext Catalog", "Gherkin", "GLSL", "Glyph", "Gnuplot", "Go", "Golo", "Gosu", "Grace", "Gradle", "Grammatical Framework", "Graph Modeling Language", "GraphQL", "Graphviz (DOT)", "Groovy", "Hack", "Harbour", "Haskell", "Haxe", "HCL", "HLSL", "HTML", "HXML", "Hy", "HyPhy", "IDL", "Idris", "IGOR Pro", "Inform 7", "INI", "Inno Setup", "Io", "Ioke", "Isabelle", "J", "Jasmin", "Java", "JavaScript", "Jolie", "JSON", "JSON5", "JSONiq", "Julia", "Jupyter Notebook", "KiCad Layout", "KiCad Legacy Layout", "KiCad Schematic", "Kit", "Kotlin", "KRL", "LabVIEW", "Lasso", "Lean", "Lex", "LFE", "LilyPond", "Limbo", "Linker Script", "Linux Kernel Module", "Liquid", "LiveScript", "LLVM", "Logos", "Logtalk", "LOLCODE", "LookML", "LoomScript", "LSL", "Lua", "M", "M4", "Makefile", "Mako", "Markdown", "Mask", "Mathematica", "Matlab", "Maven POM", "Max", "MAXScript", "MediaWiki", "Mercury", "Meson", "Metal", "Mirah", "Modelica", "Modula-2", "Module Management System", "Monkey", "Moocode", "MoonScript", "MQL4", "MQL5", "MTML", "mupad", "NCL", "Nemerle", "nesC", "NetLinx", "NetLogo", "NewLisp", "Nextflow", "Nginx", "Nim", "Nit", "Nix", "NSIS", "Nu", "Objective-C", "Objective-C++", "Objective-J", "OCaml", "ooc", "Opa", "OpenEdge ABL", "OpenSCAD", "OpenType Feature File", "Org", "Ox", "Oz", "P4", "Pan", "Papyrus", "Parrot", "Pascal", "PAWN", "Pep8", "Perl", "Perl 6", "PHP", "Pickle", "PicoLisp", "PigLatin", "Pike", "PLpgSQL", "PLSQL", "Pod", "PogoScript", "Pony", "PostScript", "POV-Ray SDL", "PowerBuilder", "PowerShell", "Processing", "Prolog", "Propeller Spin", "Protocol Buffer", "Public Key", "Puppet", "Pure Data", "PureBasic", "PureScript", "Python", "q", "QMake", "QML", "R", "Racket", "Ragel", "RAML", "Rascal", "Raw token data", "RDoc", "REALbasic", "Rebol", "Red", "Redcode", "Ren'Py", "RenderScript", "reStructuredText", "REXX", "Ring", "RMarkdown", "RobotFramework", "Roff", "Rouge", "RPC", "RPM Spec", "Ruby", "Rust", "SaltStack", "SAS", "Scala", "Scheme", "Scilab", "sed", "Self", "ShaderLab", "Shell", "Shen", "Slash", "Smali", "Smalltalk", "Smarty", "SMT", "Solidity", "SourcePawn", "SPARQL", "SQF", "SQL", "SQLPL", "Squirrel", "SRecode Template", "Stan", "Standard ML", "Stata", "SubRip Text", "SuperCollider", "SVG", "Swift", "SystemVerilog", "Tcl", "Tea", "Terra", "TeX", "Text", "Textile", "Thrift", "TI Program", "TLA", "TOML", "Turing", "Turtle", "TXL", "TypeScript", "Unity3D Asset", "Uno", "UnrealScript", "UrWeb", "Vala", "VCL", "Verilog", "VHDL", "Vim script", "Visual Basic", "Volt", "Vue", "Wavefront Material", "Wavefront Object", "wdl", "Web Ontology Language", "WebAssembly", "WebIDL", "wisp", "X10", "xBase", "XC", "XML", "Xojo", "XPages", "XProc", "XQuery", "XS", "XSLT", "Xtend", "Yacc", "YAML", "YANG", "Zephir", "Zimpl"]
diff --git a/swh/langdetect/static_data/languages_less.json b/swh/langdetect/static_data/languages_less.json
index 695c179..885951f 100644
--- a/swh/langdetect/static_data/languages_less.json
+++ b/swh/langdetect/static_data/languages_less.json
@@ -1,2 +1 @@
-["ActionScript", "Ada", "AMPL", "AngelScript", "AspectJ", "Assembly", "ATS", "AutoHotkey", "AutoIt", "Batchfile", "BitBake", "C", "C#", "C++", "Ceylon", "Clean", "Clojure", "CMake", "CoffeeScript", "ColdFusion", "Common Lisp", "Coq", "CSS", "D", "Dart", "DM", "Dylan", "E", "Eiffel", "Elixir", "Erlang", "F#", "Factor", "Forth", "Fortran", "Game Maker Language", "GLSL", "Go", "Groovy", "Haskell", "Haxe", "HTML", "IDL", "Isabelle", "Java", "JavaScript", "Kotlin", "LilyPond", "Limbo", "LLVM", "Lua", "M", "Makefile", "Mathematica", "Matlab", "Max", "Modelica", "Modula-2", "NCL", "nesC", "NewLisp", "Nim", "Objective-C", "Objective-J", "OCaml", "OpenEdge ABL", "P4", "Pan", "Pascal", "Perl", "Perl 6", "PHP", "PLpgSQL", "PowerBuilder", "Processing", "Prolog", "Python", "QML", "R", "Racket", "RAML", "Red", "Roff", "Ruby", "Rust", "Scala", "Scheme", "Scilab", "Shell", "Smali", "Smalltalk", "Smarty", "SMT", "SourcePawn", "SQF", "Standard ML", "Swift", "SystemVerilog", "Tcl", "TeX", "TypeScript", "UnrealScript", "Vala", "Verilog", "VHDL", "Vim script", "Visual Basic", "xBase", "XSLT"]
-
+["ActionScript", "Ada", "AMPL", "AngelScript", "AspectJ", "Assembly", "ATS", "AutoHotkey", "AutoIt", "Batchfile", "BitBake", "C", "C#", "C++", "Ceylon", "Clean", "Clojure", "CMake", "CoffeeScript", "ColdFusion", "Common Lisp", "Coq", "CSS", "CSV", "D", "Dart", "desktop", "Diff", "DM", "Dylan", "E", "Eiffel", "Elixir", "Erlang", "F#", "Factor", "Forth", "Fortran", "Game Maker Language", "Gettext Catalog", "GLSL", "Go", "Graphviz (DOT)", "Groovy", "Hack", "Haskell", "Haxe", "HTML", "IDL", "INI", "Isabelle", "Java", "JavaScript", "JSON", "Kotlin", "LilyPond", "Limbo", "LLVM", "Lua", "M", "Makefile", "Markdown", "Mathematica", "Matlab", "Max", "Modelica", "Modula-2", "NCL", "nesC", "NewLisp", "Nim", "Objective-C", "Objective-J", "OCaml", "OpenEdge ABL", "P4", "Pan", "Pascal", "Perl", "Perl 6", "PHP", "PLpgSQL", "PowerBuilder", "Processing", "Prolog", "Pure Data", "Python", "QML", "R", "Racket", "RAML", "Red", "reStructuredText", "Roff", "Ruby", "Rust", "Scala", "Scheme", "Shell", "Smali", "Smalltalk", "Smarty", "SMT", "SourcePawn", "SQF", "SQL", "Standard ML", "SubRip Text", "SuperCollider", "SVG", "Swift", "SystemVerilog", "Tcl", "TeX", "Text", "TypeScript", "Unity3D Asset", "UnrealScript", "Vala", "Verilog", "VHDL", "Vim script", "Visual Basic", "xBase", "XML", "XSLT", "YAML"]
diff --git a/swh/langdetect/static_data/languages_mini.json b/swh/langdetect/static_data/languages_mini.json
index 1641253..ec15931 100644
--- a/swh/langdetect/static_data/languages_mini.json
+++ b/swh/langdetect/static_data/languages_mini.json
@@ -1,2 +1,2 @@
-["C", "C#", "C++", "Clojure", "CSS", "Go", "Haskell", "HTML", "Java", "JavaScript", "Lua", "Objective-C", "Perl", "PHP", "Python", "R", "Ruby", "Scala", "Scheme", "Swift"]
+["C", "C#", "C++", "Clojure", "CSS", "Go", "Haskell", "HTML", "Java", "JavaScript", "Lua", "Objective-C", "Perl", "PHP", "Python", "R", "Ruby", "Scala", "Scheme", "XML"]