diff --git a/scripts/comparison.pdf b/scripts/comparison.pdf index 0107d69..1865219 100644 Binary files a/scripts/comparison.pdf and b/scripts/comparison.pdf differ diff --git a/swh/langdetect/cnn.py b/swh/langdetect/cnn.py index a9ab20a..22b555b 100644 --- a/swh/langdetect/cnn.py +++ b/swh/langdetect/cnn.py @@ -1,303 +1,308 @@ import os import sys import subprocess import time import random import csv import numpy as np import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") import tensorflow as tf import json import argparse from ast import literal_eval from pickle import dump from pickle import load from numpy import array from .utils.common import tokenizer from .utils.common import file_to_string from keras.preprocessing.sequence import pad_sequences from keras.callbacks import EarlyStopping from keras.models import Model from keras.models import Sequential from keras.models import load_model from keras.layers import Input from keras.layers import Dense from keras.layers import Flatten from keras.layers import Dropout from keras.layers import ThresholdedReLU from keras.layers import Activation from keras.layers import Lambda from keras.layers import Embedding from keras.layers.convolutional import Convolution1D from keras.layers.convolutional import MaxPooling1D from keras.layers.normalization import BatchNormalization from keras.utils import np_utils from keras.optimizers import SGD from pyspark import SparkContext, SparkConf from elephas.spark_model import SparkModel # pip install flask from elephas import optimizers as elephas_optimizers from elephas.utils.rdd_utils import to_labeled_point csv.field_size_limit(sys.maxsize) -conf = SparkConf().setAppName('Elephas_App').setMaster('local[8]') +conf = SparkConf().setAppName('Elephas_App').setMaster('local[8]') # Set up on cluster. sc = SparkContext(conf=conf) def main(): parser = argparse.ArgumentParser(description='Training and test tool of charactor-level ConvNet text categorisation.') subparsers = parser.add_subparsers(dest='sub_command') parser_train = subparsers.add_parser('train', help='Training on the dataset, dataset must be a *.csv file. A model will be created in the same directory.') parser_train.add_argument('-s', '--spark', type=bool, help='Training on cluster.', dest='train_spark') parser_train.add_argument('train_path', metavar='PATH', type=str, help='Path of the training dataset.') parser_train.add_argument('-ms', '--maxsize', metavar='SIZE', dest='train_maxsize', type=int, help='Set maximum input size of ConvNet, default 1024.') parser_train.add_argument('-e', '--epochs', metavar='N', dest='train_epochs', type=int, help='Number of training epochs (iterations), default 50.') parser_test = subparsers.add_parser('test', help='Test on the dataset, dataset must be a directory with *.csv dataset named by corresponding language.') parser_test.add_argument('test_root', metavar='ROOT', type=str, help='Root of the test dataset.') if len(sys.argv[1:]) == 0: parser.print_help() parser.exit() args = parser.parse_args() if args.sub_command == 'train' : + maxsize = 1024 + epochs = 50 + if args.train_maxsize: - if args.train_epochs: - n = CNN(args.train_path, maxsize=args.train_maxsize, epochs=args.train_epochs) - else: - n = CNN(args.train_path, maxsize=args.train_maxsize) - else: - if args.train_epochs: - n = CNN(args.train_path, epochs=args.train_epochs) - else: - n = CNN(args.train_path) - if not args.train_spark: - n.train() - else: + maxsize = args.train_maxsize + if args.train_epochs: + epochs = args.train_epochs + + n = CNN(args.train_path, maxsize=maxsize, epochs=epochs) + + if args.train_spark: n.train_on_cluster() + elif args.train_gpu: + n.train() + elif args.sub_command == 'test': n = CNN(args.test_root) n.test() else: parser.parse_args('-h') class CNN: - def __init__(self, path, maxsize=1024, epochs=50): + def __init__(self, path, maxsize, epochs): self._path = path # Root of model folder self._root_model = os.path.join(os.path.dirname(path), 'model_cnn') try: os.mkdir(self._root_model) except: pass # Path of result self._path_result = os.path.join(os.path.dirname(path), 'result_cnn') dir_path = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(dir_path, 'static_data', 'languages.json'), 'r') as f: self._languages = json.load(f) self._path_test_csv = path self._input_size = maxsize self._vocab_size = 256 self._num_of_classes = len(self._languages) self._batch_size = 128 self._epochs = epochs def file_len(self, fname): with open(fname) as f: count = 0 for l in f: count += 1 return count def train(self): model = self._get_model() - earlystop = EarlyStopping(monitor='loss', min_delta=0, patience=2, verbose=0, mode='auto') + earlystop = EarlyStopping(monitor='loss', min_delta=0, patience=5, verbose=0, mode='auto') callbacks = [earlystop] model.fit_generator( - self._generator(self._input_size, self._num_of_classes, self._batch_size), - steps_per_epoch=self.file_len(self._path) / self._batch_size, epochs=self._epochs, callbacks=callbacks) + self._generator(self._input_size, + self._num_of_classes, + self._batch_size), + steps_per_epoch=self.file_len(self._path) / self._batch_size, + epochs=self._epochs, + callbacks=callbacks) model.save(os.path.join(self._root_model, 'model.h5')) def train_on_cluster(self): rdd = self._get_train_rdd() model = self._get_model() adagrad = elephas_optimizers.Adagrad() spark_model = SparkModel(sc, model, optimizer=adagrad, frequency='epoch', mode='asynchronous', num_workers=2) spark_model.train(rdd, nb_epoch=self._epochs, batch_size=self._batch_size, verbose=0, categorical=True, nb_classes=self._num_of_classes) model.save(os.path.join(self._root_model, 'model.h5')) def _get_train_rdd(self): print('Prepairing RDD for training...') X_train = np.empty((0, self._input_size)) Y_train = np.empty((0, self._num_of_classes)) with open(self._path, newline='') as csvfile: r = csv.reader(csvfile, delimiter=' ', quotechar='|') for pair in r: label, string = pair label = int(label) print(label, end='\r') string = literal_eval(string) tokens = [x + 1 for x in tokenizer(string, 'letter')] X_train = np.append(X_train, pad_sequences([tokens], maxlen=self._input_size), axis=0) label = array(np_utils.to_categorical([label], self._num_of_classes)) Y_train = np.append(Y_train, label, axis=0) rdd = to_labeled_point(sc, X_train, Y_train, categorical=True) def _generator(self, length, total_class, batch_size=128): counter = 0 while True: with open(self._path, newline='') as csvfile: r = csv.reader(csvfile, delimiter=' ', quotechar='|') for pair in r: if counter == 0: X = np.empty((0, length)) Y = np.empty((0, total_class)) label, string = pair label = int(label) string = literal_eval(string) tokens = [x + 1 for x in tokenizer(string, 'letter')] X = np.append(X, pad_sequences([tokens], maxlen=length), axis=0) label = array(np_utils.to_categorical([label], total_class)) Y = np.append(Y, label, axis=0) counter += 1 if counter == batch_size: counter = 0 yield(X,Y) def _get_model(self): input_size = self._input_size alphabet_size = self._vocab_size embedding_size = 256 - conv_layers = [(256,7,3), (256,7,3), (256,3,-1), (256,3,-1), (256,3,-1), (256,3,3)] + conv_layers = [(256,7,5), (256,7,5), (256,3,-1), (256,3,-1), (256,3,-1), (256,3,5)] threshold = 1e-6 fully_connected_layers = [1024, 1024] dropout_p = 0.2 optimizer = 'adam' loss = 'categorical_crossentropy' num_of_classes = self._num_of_classes # Input layer inputs = Input(shape=(input_size,), name='sent_input', dtype='int64') # Embedding layers x = Embedding(alphabet_size + 1, embedding_size, input_length=input_size)(inputs) # Convolution layers for cl in conv_layers: x = Convolution1D(cl[0], cl[1])(x) x = ThresholdedReLU(threshold)(x) if cl[2] != -1: x = MaxPooling1D(cl[2])(x) x = Flatten()(x) # Fully connected layers for fl in fully_connected_layers: x = Dense(fl)(x) x = ThresholdedReLU(threshold)(x) x = Dropout(dropout_p)(x) # Output layer predictions = Dense(num_of_classes, activation='softmax')(x) # Build and compile model model = Model(inputs=inputs, outputs=predictions) model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy']) print(model.summary()) return model def _max_len(self, texts): return max([len(text) for text in texts]) def test(self): csv.field_size_limit(sys.maxsize) try: r = open(self._path_result, 'rb') test_result = load(r) r.close() except FileNotFoundError: test_result = {} model = self._load_model() for language in [x for x in self._languages if x not in test_result.keys()]: test_result[language] = self.test_class(model, language) with open(self._path_result, 'wb') as f: dump(test_result, f) def _load_model(self): model = load_model(os.path.join(self._root_model, 'model.h5')) return model def _count_size(self, files): size = 0 for f in files: size += os.path.getsize(f) return size def test_class(self, model, language): ok = 0 results = [] count = 0 total_test = self.file_len(os.path.join(self._path_test_csv, language + '.csv')) with open(os.path.join(self._path_test_csv, language + '.csv'), newline='') as csvfile: r = csv.reader(csvfile, delimiter=' ', quotechar='|') for pair in r: label, string = pair label = int(label) string = literal_eval(string) tokens = [x + 1 for x in tokenizer(string, 'letter')] result = self._guess_file_language(model, tokens) count += 1 print('[{0:4d}/{1:4d}] {2}:{3} '.format(count, total_test, result[0][1], result[0][0]),end='\r') results.append(result[0]) if result[0][1] == language: ok += 1 accuracy = ok / total_test print('Tests for {} '.format(language)) print('Total test files : {}'.format(total_test)) print('Correctly classified files : {}'.format(ok)) print('Accuracy : {}%'.format(accuracy * 100)) return (ok, total_test, accuracy, results) def speed_benchmark(self): language = self._languages[10] model = self._load_model() test_set = self._get_test_set(language) total_size = self._count_size(test_set) print('{} kB in total'.format(total_size / 1024)) t_start = time.perf_counter() self.test_class(model, language) t_end = time.perf_counter() print('{} seconds.'.format(t_end - t_start)) print('{} seconds per KiB'.format(((t_end - t_start) / total_size) * 1024)) def _guess_file_language(self, model, tokens): X = pad_sequences([tokens], maxlen=self._input_size) result = list(model.predict(X))[0] result = [(s, self._languages[i]) for i, s in enumerate(result)] return sorted(result, reverse=True) if __name__ == '__main__': main() diff --git a/swh/langdetect/cnn_w.py b/swh/langdetect/cnn_w.py index 6500e74..3ae2a36 100644 --- a/swh/langdetect/cnn_w.py +++ b/swh/langdetect/cnn_w.py @@ -1,302 +1,301 @@ import os import sys import subprocess import time import random import csv import numpy as np import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") import tensorflow as tf import json import argparse from ast import literal_eval from pickle import dump from pickle import load from numpy import array from .utils.common import tokenizer from .utils.common import file_to_string from keras.preprocessing.sequence import pad_sequences from keras.callbacks import EarlyStopping from keras.models import Model from keras.models import Sequential from keras.models import load_model from keras.layers import Input from keras.layers import Dense from keras.layers import Flatten from keras.layers import Merge from keras.layers import Dropout from keras.layers import ThresholdedReLU from keras.layers import Activation from keras.layers import Lambda from keras.layers import Embedding from keras.layers.convolutional import Convolution1D from keras.layers.convolutional import MaxPooling1D from keras.layers.normalization import BatchNormalization from keras.layers import Concatenate from keras.utils import np_utils from keras.optimizers import SGD from collections import Counter csv.field_size_limit(sys.maxsize) from keras import backend as K K.set_session(K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1))) def main(): parser = argparse.ArgumentParser(description='Training and test tool of charactor-level ConvNet text categorisation.') subparsers = parser.add_subparsers(dest='sub_command') parser_train = subparsers.add_parser('train', help='Training on the dataset, dataset must be a *.csv file. A model will be created in the same directory.') parser_train.add_argument('train_path', metavar='PATH', type=str, help='Path of the training dataset.') parser_train.add_argument('-ms', '--maxsize', metavar='SIZE', dest='train_maxsize', type=int, help='Set maximum input size of ConvNet, default 1024.') parser_train.add_argument('-e', '--epochs', metavar='N', dest='train_epochs', type=int, help='Number of training epochs (iterations), default 50.') parser_test = subparsers.add_parser('test', help='Test on the dataset, dataset must be a directory with *.csv dataset named by corresponding language.') parser_test.add_argument('test_root', metavar='ROOT', type=str, help='Root of the test dataset.') if len(sys.argv[1:]) == 0: parser.print_help() parser.exit() args = parser.parse_args() if args.sub_command == "train": if args.train_maxsize: if args.train_epochs: n = CNNword(args.train_path, maxsize=args.train_maxsize, epochs=args.train_epochs) n.train() else: n = CNNword(args.train_path, maxsize=args.train_maxsize) n.train() else: if args.train_epochs: n = CNNword(args.train_path, epochs=args.train_epochs) n.train() else: n = CNNword(args.train_path) n.train() elif args.sub_command == "test": n = CNNword(args.test_root) print(args.test_root) n.test() else: parser.parse_args('-h') class CNNword: def __init__(self, path, maxsize=768, epochs=30): self._path = path # Root of model folder self._root_model = os.path.join(os.path.dirname(path), 'model_cnn_word') try: os.mkdir(self._root_model) except: pass # Path of result self._path_result = os.path.join(os.path.dirname(path), 'result_cnn_word') dir_path = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(dir_path, 'static_data', 'languages.json'), 'r') as f: self._languages = json.load(f) self._path_test_csv = path self._path_vocab = os.path.join(self._root_model, 'vocab') self._input_size = maxsize self._vocab_size = 20001 self._num_of_classes = len(self._languages) self._batch_size = 64 self._epochs = epochs if not os.path.isfile(self._path_vocab): self._learn_vocab(self._input_size, self._num_of_classes) with open(self._path_vocab, 'rb') as f: c = load(f) l = c.most_common(20000) print(l) self._indexer = dict((v[0], i + 1) for i, v in enumerate(l)) self._oov_index = len(self._indexer) + 1 def file_len(self, fname): with open(fname) as f: count = 0 for l in f: count += 1 return count def train(self): model = self._get_model() earlystop = EarlyStopping(monitor='loss', min_delta=0, patience=2, verbose=0, mode='auto') callbacks = [earlystop] model.fit_generator( self._generator(self._input_size, self._num_of_classes, self._batch_size), steps_per_epoch=self.file_len(self._path) / self._batch_size, epochs=self._epochs, callbacks=callbacks) model.save(os.path.join(self._root_model, 'model.h5')) def _learn_vocab(self, length, total_class): c = Counter() with open(self._path, newline='') as csvfile: r = csv.reader(csvfile, delimiter=' ', quotechar='|') for pair in r: label, string = pair label = int(label) print(label, end='\r') string = literal_eval(string) tokens = tokenizer(string, 'word') c.update(tokens) with open(self._path_vocab, 'wb') as f: dump(c, f) def _generator(self, length, total_class, batch_size=64): counter = 0 while True: with open(self._path, newline='') as csvfile: r = csv.reader(csvfile, delimiter=' ', quotechar='|') for pair in r: if counter == 0: X = np.empty((0, length)) Y = np.empty((0, total_class)) label, string = pair label = int(label) string = literal_eval(string) tokens = [self._indexer.get(x, self._oov_index) for x in tokenizer(string, 'word')] X = np.append(X, pad_sequences([tokens], maxlen=length), axis=0) label = array(np_utils.to_categorical([label], total_class)) Y = np.append(Y, label, axis=0) counter += 1 if counter == batch_size: counter = 0 yield(X,Y) def _get_model(self): input_size = self._input_size vocab_size = self._vocab_size embedding_size = 128 optimizer = 'adam' loss = 'categorical_crossentropy' num_of_classes = self._num_of_classes embedding_layer = Embedding(vocab_size + 1, embedding_size, input_length=input_size, -# trainable=False, ) # applying a more complex convolutional approach convs = [] filter_sizes = [3,4,5] sequence_input = Input(shape=(input_size,), dtype='int64') embedded_sequences = embedding_layer(sequence_input) for fsz in filter_sizes: l_conv = Convolution1D(filters=10, kernel_size=fsz, activation='relu')(embedded_sequences) l_pool = MaxPooling1D(5)(l_conv) convs.append(l_pool) l_merge = Concatenate(axis=1)(convs) l_conv1= Convolution1D(128, 5, activation='relu')(l_merge) l_pool1 = MaxPooling1D(5)(l_conv1) l_conv2 = Convolution1D(128, 5, activation='relu')(l_pool1) l_pool2 = MaxPooling1D(5)(l_conv2) l_flat = Flatten()(l_pool2) l_dense = Dense(512, activation='relu')(l_flat) preds = Dense(num_of_classes, activation='softmax')(l_dense) model = Model(sequence_input, preds) model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy']) print(model.summary()) return model def _max_len(self, texts): return max([len(text) for text in texts]) def test(self): csv.field_size_limit(sys.maxsize) try: r = open(self._path_result, 'rb') test_result = load(r) r.close() except FileNotFoundError: test_result = {} model = self._load_model() for language in [x for x in self._languages if x not in test_result.keys()]: test_result[language] = self.test_class(model, language) with open(self._path_result, 'wb') as f: dump(test_result, f) def _load_model(self): model = load_model(os.path.join(self._root_model, 'model.h5')) return model def _count_size(self, files): size = 0 for f in files: size += os.path.getsize(f) return size def test_class(self, model, language): ok = 0 results = [] count = 0 total_test = self.file_len(os.path.join(self._path_test_csv, language + '.csv')) with open(os.path.join(self._path_test_csv, language + '.csv'), newline='') as csvfile: r = csv.reader(csvfile, delimiter=' ', quotechar='|') for pair in r: label, string = pair label = int(label) string = literal_eval(string) tokens = [self._indexer.get(x, self._oov_index) for x in tokenizer(string, 'word')] result = self._guess_file_language(model, tokens) count += 1 print('[{0:4d}/{1:4d}] {2}:{3} '.format(count, total_test, result[0][1], result[0][0]),end='\r') results.append(result[0]) if result[0][1] == language: ok += 1 accuracy = ok / total_test print('Tests for {} '.format(language)) print('Total test files : {}'.format(total_test)) print('Correctly classified files : {}'.format(ok)) print('Accuracy : {}%'.format(accuracy * 100)) return (ok, total_test, accuracy, results) def speed_benchmark(self): language = self._languages[10] model = self._load_model() test_set = self._get_test_set(language) total_size = self._count_size(test_set) print('{} kB in total'.format(total_size / 1024)) t_start = time.perf_counter() self.test_class(model, language) t_end = time.perf_counter() print('{} seconds.'.format(t_end - t_start)) print('{} seconds per KiB'.format(((t_end - t_start) / total_size) * 1024)) def _guess_file_language(self, model, tokens): X = pad_sequences([tokens], maxlen=self._input_size) result = list(model.predict(X))[0] result = [(s, self._languages[i]) for i, s in enumerate(result)] return sorted(result, reverse=True) if __name__ == '__main__': main() diff --git a/swh/langdetect/ngramdist.py b/swh/langdetect/ngramdist.py index e67df63..3dc4e9b 100644 --- a/swh/langdetect/ngramdist.py +++ b/swh/langdetect/ngramdist.py @@ -1,249 +1,234 @@ -""" -Baseline approach -""" - -import os, sys, operator, nltk, random, time - +import os +import sys +import time +import random +import csv +import json +import argparse +import nltk +import operator + +from ast import literal_eval from itertools import islice from pickle import dump, load from nltk.util import ngrams from .utils.common import tokenizer, file_to_string, find_file, count_files -class NGramDist: - - def __init__(self, root): - # Root of dataset - self._root = root +csv.field_size_limit(sys.maxsize) - # Root of training set - self._root_training_set = os.path.join(self._root, '..', 'training_set') +def main(): + parser = argparse.ArgumentParser(description='Training and test tool of frequency distance of n-grams.') - # Root of model folder - self._root_model = os.path.join(self._root, '..', 'model_ngram_dist') + subparsers = parser.add_subparsers(dest='sub_command') + + parser_train = subparsers.add_parser('train', help='Training on the dataset, dataset must be a *.csv file. A model will be created in the same directory.') + parser_train.add_argument('train_path', metavar='PATH', type=str, help='Path of the training dataset.') + # parser_train.add_argument('-n', '--ngrams', metavar='N', dest='train_maxsize', type=int, help='Set maximum input size of ConvNet, default 5.') + parser_test = subparsers.add_parser('test', help='Test on the dataset, dataset must be a directory with *.csv dataset named by corresponding language.') + parser_test.add_argument('test_root', metavar='ROOT', type=str, help='Root of the test dataset.') + + if len(sys.argv[1:]) == 0: + parser.print_help() + parser.exit() + args = parser.parse_args() + + if args.sub_command == 'train' : + n = NGramDist(args.train_path) + n.train() + elif args.sub_command == 'test': + n = NGramDist(args.test_root) + n.test() + else: + parser.parse_args('-h') - # Root of arranged dataset - self._root_language_dataset = os.path.join(self._root, '..', 'code_by_language') +class NGramDist: - # Path of result - self._path_result = os.path.join(self._root, '..', 'result_freq') + def __init__(self, path): - def train(self): - ''' - train () generates and stores counted n-grams in '_root_model' folder - ''' + self._path = path + # Root of model folder + self._root_model = os.path.join(os.path.dirname(path), 'model_ngram_dist') try: os.mkdir(self._root_model) - except FileExistsError: + except: pass - ''' - Calculate frequencies of generated n-grams then store - them into a sorted list of (ngram, count) - ''' - for language in os.listdir(self._root_training_set): - if not language.startswith('.'): - root_training_set_language = os.path.join(self._root_training_set, language) - root_stat_language = os.path.join(self._root_model, language) - if os.path.isfile(root_stat_language): - continue - statistics = {} - for f in os.listdir(root_training_set_language): - print(f) - if not f.startswith('.'): - filename = os.path.join(root_training_set_language, f) - tokens = tokenizer(file_to_string(filename), 'letter') - generated_ngrams = self._generate_ngrams([chr(token) for token in tokens], 3) - self._count_ngrams(statistics, generated_ngrams) - with open(root_stat_language, 'wb') as f: - dump(self._sort_by_value(statistics), f) - + # Path of result + self._path_result = os.path.join(os.path.dirname(path), 'result_ngram_dist') + + dir_path = os.path.dirname(os.path.abspath(__file__)) + with open(os.path.join(dir_path, 'static_data', 'languages.json'), 'r') as f: + self._languages = json.load(f) + + self._path_test_csv = path + + self._num_of_classes = len(self._languages) + + def file_len(self, fname): + with open(fname) as f: + count = 0 + for l in f: + count += 1 + return count + + def train(self): + statistics = {} + with open(self._path, newline='') as csvfile: + r = csv.reader(csvfile, delimiter=' ', quotechar='|') + for pair in r: + label, string = pair + label = int(label) + language = self._languages[label] + print(language, end='\r') + statistics_lang = statistics.get(language, {}) + + string = literal_eval(string) + tokens = tokenizer(string, 'letter') + generated_ngrams = self._generate_ngrams([chr(token) for token in tokens], 3) + self._count_ngrams(statistics_lang, generated_ngrams) + + for language in self._languages: + with open(os.path.join(self._root_model, language), 'wb') as f: + dump(self._sort_by_value(statistics[language], f)) + def _generate_ngrams(self, tokens, n): - ''' - :param tokens: generated tokens from a string. - :param n: maximum n of n-grams - :type tokens: list - :type n: int - :return: generated 1-grams, ... , n-grams - :rtype: list - ''' generated_ngrams = [] for i in range(1, n+1): igrams = ngrams(tokens, i, pad_left=True, pad_right=True, left_pad_symbol = '$BOF$', right_pad_symbol = '$EOF$') for igram in igrams: generated_ngrams.append(''.join(igram)) return generated_ngrams - + def _count_ngrams(self, statistics, ngrams): - ''' - :param statistics: shared dictionary for statistics - :param ngrams: n-grams to be accumulated into statistics - ''' for ngram in ngrams: statistics[ngram] = statistics.get(ngram, 0) + 1 - + def test(self): try: r = open(self._path_result, 'rb') test_result = load(r) r.close() except FileNotFoundError: test_result = {} - models = self._load_models() + + model = self._load_models() - for language in [x for x in os.listdir(self._root_model) if not x.startswith('.') and x not in test_result.keys()]: - test_result[language] = self.test_class(models, language) + for language in [x for x in self._languages if x not in test_result.keys()]: + test_result[language] = self.test_class(model, language) with open(self._path_result, 'wb') as f: dump(test_result, f) - def speed_benchmark(self): - language = [x for x in os.listdir(self._root_model) if not x.startswith('.')][10] - models = self._load_models() - - test_set = self._get_test_set(language) - total_size = self._count_size(test_set) - print('{} kB in total'.format(total_size / 1024)) - - t_start = time.perf_counter() - self.test_class(models, language) - t_end = time.perf_counter() - - print('{} seconds.'.format(t_end - t_start)) - print('{} seconds per kB'.format(((t_end - t_start) / total_size) * 1024)) - - def _load_models(self): models = {} for model in [model for model in os.listdir(self._root_model) if not model.startswith('.')]: root_model = os.path.join(self._root_model, model) with open(root_model, 'rb') as sorted_file: models[model] = self._list_to_dict(load(sorted_file)) - + return models def _list_to_dict(self, model): model_ngrams = [x[0] for x in model] model_dict = {} index = 0 for ngram in model_ngrams: index += 1 model_dict[ngram] = index return model_dict - - def _get_test_set(self, language): - root_training_language = os.path.join(self._root_training_set, language) - root_language = os.path.join(self._root_language_dataset, language) - total = count_files(root_language) - training_set = [int(os.path.splitext(x)[0]) for x in os.listdir(root_training_language) if not x.startswith('.')] - it = (find_file(root_language, x) for x in range(1, total + 1) if x not in training_set and os.path.getsize(find_file(root_language, x)) <= 1048576) - test_set = list(islice(it, 1000)) - if len(test_set) == 0: - it = (find_file(root_language, x) for x in range(1, total + 1) if x not in training_set) - test_set = list(islice(it, 1000)) - return test_set - + def _count_size(self, files): size = 0 for f in files: size += os.path.getsize(f) return size - - def test_class(self, models, language): - test_set = self._get_test_set(language) - + + def test_class(self, model, language): ok = 0 results = [] count = 0 - length = len(test_set) - for test in test_set: - result = self._guess_file_language(models, test) - count += 1 - print('[{0:4d}/{1:4d}] {2}:{3} '.format(count, length, result[0][1], result[0][0]),end='\r') - results.append(result[0]) - if result[0][1] == language: - ok += 1 - - total_test = len(test_set) - accuracy = ok / len(test_set) + total_test = self.file_len(os.path.join(self._path_test_csv, language + '.csv')) + + with open(os.path.join(self._path_test_csv, language + '.csv'), newline='') as csvfile: + r = csv.reader(csvfile, delimiter=' ', quotechar='|') + for pair in r: + label, string = pair + label = int(label) + string = literal_eval(string) + result = self._guess_file_language(model, string) + count += 1 + print('[{0:4d}/{1:4d}] {2}:{3} '.format(count, total_test, result[0][1], result[0][0]),end='\r') + results.append(result[0]) + if result[0][1] == language: + ok += 1 + + accuracy = ok / total_test print('Tests for {} '.format(language)) print('Total test files : {}'.format(total_test)) print('Correctly classified files : {}'.format(ok)) print('Accuracy : {}%'.format(accuracy * 100)) - return (ok, len(test_set), accuracy, results) + return (ok, total_test, accuracy, results) + + def speed_benchmark(self): + language = self._languages[10] + model = self._load_model() - def test_single(self, models, filename): - self._guess_file_language(models, filename) + test_set = self._get_test_set(language) + total_size = self._count_size(test_set) + print('{} kB in total'.format(total_size / 1024)) - def _guess_file_language(self, models, filename): + t_start = time.perf_counter() + self.test_class(model, language) + t_end = time.perf_counter() - tokens = tokenizer(file_to_string(filename), 'letter') + print('{} seconds.'.format(t_end - t_start)) + print('{} seconds per KiB'.format(((t_end - t_start) / total_size) * 1024)) + + def _guess_file_language(self, models, string): + tokens = tokenizer(string, 'letter') generated_ngrams = self._generate_ngrams([chr(token) for token in tokens], 3) statistics = {} self._count_ngrams(statistics, generated_ngrams) test_profile = self._list_to_dict(self._sort_by_value(statistics)) result = [] for model in models.keys(): root_model = os.path.join(self._root_model, model) model_profile = models[model] distance = self._distance(model_profile, test_profile) result.append((distance, model)) - + return sorted(result) def _sort_by_value(self, statistics): statistics_sorted = sorted(statistics.items(), key = operator.itemgetter(1), reverse = True)[:500] return statistics_sorted - + def _distance(self, model_profile, test_profile): distance = 0 maximum = len(test_profile) for test_ngram in test_profile.keys(): test_rank = test_profile.get(test_ngram) model_rank = model_profile.get(test_ngram, maximum) d = abs(test_rank - model_rank) distance += d return distance - ''' - def _prob(model, trigrams): - print('Checking {} model ...'.format(model)) - with open(model, 'rb') as f: - kneser_ney = load(f) - result = 1 - for trigram in trigrams: - prob = kneser_ney.prob(trigram) - result = result * prob - return result - ''' if __name__ == '__main__': - if len(sys.argv) == 3 and sys.argv[1] == '--train': - n = NGramDist(sys.argv[2]) - n.train() - elif len(sys.argv) == 3 and sys.argv[1] == '--test': - n = NGramDist(sys.argv[2]) - n.test() - elif len(sys.argv) == 3 and sys.argv[1] == '--benchmark': - n = NGramDist(sys.argv[2]) - n.speed_benchmark() - elif len(sys.argv) == 4 and sys.argv[1] == '--test': - n = NGramDist(sys.argv[2]) - n.test_class(n.load_models(), sys.argv[3]) - else: - print('Wrong arguments, please check your input.') + main()