diff --git a/swh/langdetect/cnn.py b/swh/langdetect/cnn.py index 46b60db..fb8d5a0 100644 --- a/swh/langdetect/cnn.py +++ b/swh/langdetect/cnn.py @@ -1,350 +1,327 @@ import os import sys import subprocess import time import random import csv import numpy as np import warnings +import gzip with warnings.catch_warnings(): warnings.simplefilter("ignore") import tensorflow as tf import json import argparse from ast import literal_eval from pickle import dump from pickle import load from numpy import array from .utils.common import Tokenizer from .utils.common import file_to_string from keras.preprocessing.sequence import pad_sequences from keras.callbacks import EarlyStopping from keras.models import Model from keras.models import Sequential from keras.models import load_model from keras.layers import Input from keras.layers import Dense from keras.layers import Flatten from keras.layers import Dropout, AlphaDropout from keras.layers import ThresholdedReLU from keras.layers import Activation from keras.layers import Lambda from keras.layers import Embedding from keras.layers import Concatenate, GlobalMaxPooling1D from keras.layers.convolutional import Convolution1D, MaxPooling1D from keras.layers.normalization import BatchNormalization from keras.utils import np_utils from keras.optimizers import SGD -from pyspark import SparkContext, SparkConf -from elephas.spark_model import SparkModel # pip install flask -from elephas import optimizers as elephas_optimizers -from elephas.utils.rdd_utils import to_labeled_point +#from pyspark import SparkContext, SparkConf +#from elephas.spark_model import SparkModel # pip install flask +#from elephas import optimizers as elephas_optimizers +#from elephas.utils.rdd_utils import to_labeled_point csv.field_size_limit(sys.maxsize) - +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from keras import backend as K -K.set_session(K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1))) +#K.set_session(K.tf.Session(config=K.tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1))) def main(): parser = argparse.ArgumentParser(description='Training and test tool of charactor-level ConvNet text categorisation.') subparsers = parser.add_subparsers(dest='sub_command') parser_train = subparsers.add_parser('train', help='Training on the dataset, dataset must be a *.csv file. A model will be created in the same directory.') parser_train.add_argument('-s', '--spark', type=bool, help='Training on cluster.', dest='train_spark') parser_train.add_argument('train_path', metavar='PATH', type=str, help='Path of the training dataset.') parser_train.add_argument('-ms', '--maxsize', metavar='SIZE', dest='train_maxsize', type=int, help='Set maximum input size of ConvNet, default 1024.') parser_train.add_argument('-e', '--epochs', metavar='N', dest='train_epochs', type=int, help='Number of training epochs (iterations), default 50.') parser_test = subparsers.add_parser('test', help='Test on the dataset, dataset must be a directory with *.csv dataset named by corresponding language.') parser_test.add_argument('test_root', metavar='ROOT', type=str, help='Root of the test dataset.') + parser_clf = subparsers.add_parser('clf', help='Test a file.') + parser_clf.add_argument('clf_path', metavar='PATH', type=str, help='Path of test file.') if len(sys.argv[1:]) == 0: parser.print_help() parser.exit() args = parser.parse_args() - + + maxsize = 2048 + epochs = 15 if args.sub_command == 'train' : - maxsize = 1024 - epochs = 15 - if args.train_maxsize: maxsize = args.train_maxsize if args.train_epochs: epochs = args.train_epochs n = CNN(args.train_path, maxsize=maxsize, epochs=epochs) if args.train_spark: n.train_on_cluster() else: n.train() - - elif args.sub_command == 'test': - n = CNN(args.test_root) + n = CNN(args.test_root, maxsize=maxsize, epochs=epochs) n.test() + elif args.sub_command == 'clf': + n = CNN(None, maxsize, None) + n.classify(args.clf_path) else: parser.parse_args('-h') class CNN: def __init__(self, path, maxsize, epochs): - - self._path = path - # Root of model folder - self._root_model = os.path.join(os.path.dirname(path), 'model_cnn') - try: - os.mkdir(self._root_model) - except: - pass + if path != None: + self._path = path - # Path of result - self._path_result = os.path.join(os.path.dirname(path), 'result_cnn') + # Root of model folder + self._root_model = os.path.join(os.path.dirname(path), 'model_cnn') + try: + os.mkdir(self._root_model) + except: + pass + # Path of result + self._path_result = os.path.join(os.path.dirname(path), 'result_cnn') + self._path_test_csv = path dir_path = os.path.dirname(os.path.abspath(__file__)) - with open(os.path.join(dir_path, 'static_data', 'languages_less.json'), 'r') as f: + with open(os.path.join(dir_path, 'static_data', 'languages.json'), 'r') as f: self._languages = json.load(f) - - self._path_test_csv = path - self._input_size = maxsize self._vocab_size = 256 self._num_of_classes = len(self._languages) self._batch_size = 64 self._epochs = epochs + self._model = None + if path == None and epochs == None: + self._model = load_model(os.path.join(dir_path, 'static_data', 'model.h5')) def file_len(self, fname): with open(fname) as f: count = 0 for l in f: count += 1 return count def train(self): - model = self._get_model() + self._get_model() earlystop = EarlyStopping(monitor='loss', min_delta=0, patience=3, verbose=0, mode='auto') callbacks = [earlystop] - model.fit_generator( + self._model.fit_generator( self._generator(self._input_size, self._num_of_classes, self._batch_size), steps_per_epoch=self.file_len(self._path) / self._batch_size, epochs=self._epochs, callbacks=callbacks) - model.save(os.path.join(self._root_model, 'model.h5')) - - def train_on_cluster(self): - - rdd = self._get_train_rdd() - model = self._get_model() - adagrad = elephas_optimizers.Adagrad() - - spark_model = SparkModel(sc, model, optimizer=adagrad, frequency='epoch', mode='asynchronous', num_workers=2) - spark_model.train(rdd, nb_epoch=self._epochs, batch_size=self._batch_size, verbose=0, categorical=True, nb_classes=self._num_of_classes) - - model.save(os.path.join(self._root_model, 'model.h5')) - - def _get_train_rdd(self): - print('Prepairing RDD for training...') - X_train = np.empty((0, self._input_size)) - Y_train = np.empty((0, self._num_of_classes)) - with open(self._path, newline='') as csvfile: - r = csv.reader(csvfile, delimiter=' ', quotechar='|') - for pair in r: - label, string = pair - label = int(label) - print(label, end='\r') - string = literal_eval(string) - tokens = [x + 1 for x in Tokenizer.tokenize(string, 'letter')] - X_train = np.append(X_train, - pad_sequences([tokens], maxlen=self._input_size), - axis=0) - label = array(np_utils.to_categorical([label], self._num_of_classes)) - Y_train = np.append(Y_train, label, axis=0) - rdd = to_labeled_point(sc, X_train, Y_train, categorical=True) + self._model.save(os.path.join(self._root_model, 'model.h5')) def _generator(self, length, total_class, batch_size=128): counter = 0 while True: with open(self._path, newline='') as csvfile: r = csv.reader(csvfile, delimiter=' ', quotechar='|') for pair in r: if counter == 0: X = np.empty((0, length)) Y = np.empty((0, total_class)) label, string = pair label = int(label) string = literal_eval(string) tokens = [x + 1 for x in Tokenizer.tokenize(string, 'letter')] X = np.append(X, pad_sequences([tokens], maxlen=length), axis=0) label = array(np_utils.to_categorical([label], total_class)) Y = np.append(Y, label, axis=0) counter += 1 if counter == batch_size: counter = 0 yield(X,Y) def _get_model_zhang(self): input_size = self._input_size alphabet_size = self._vocab_size embedding_size = 128 conv_layers = [(256,7,3), (256,7,3), (256,3,-1), (256,3,-1), (256,3,-1), (256,3,3)] threshold = 1e-6 fully_connected_layers = [1024, 1024] dropout_p = 0.2 optimizer = 'adam' loss = 'categorical_crossentropy' num_of_classes = self._num_of_classes # Input layer inputs = Input(shape=(input_size,), name='sent_input', dtype='int64') # Embedding layers x = Embedding(alphabet_size + 1, embedding_size, input_length=input_size)(inputs) # Convolution layers for cl in conv_layers: x = Convolution1D(cl[0], cl[1])(x) x = ThresholdedReLU(threshold)(x) if cl[2] != -1: x = MaxPooling1D(cl[2])(x) x = Flatten()(x) # Fully connected layers for fl in fully_connected_layers: x = Dense(fl)(x) x = ThresholdedReLU(threshold)(x) x = Dropout(dropout_p)(x) # Output layer predictions = Dense(num_of_classes, activation='softmax')(x) # Build and compile model model = Model(inputs=inputs, outputs=predictions) model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy']) print(model.summary()) - return model + self._model = model def _get_model(self): input_size = self._input_size alphabet_size = self._vocab_size - embedding_size = 32 + embedding_size = 64 conv_layers = [(256,10), (256,7), (256,5), (256,3)] threshold = 1e-6 fully_connected_layers = [1024, 1024] dropout_p = 0.1 optimizer = 'adam' loss = 'categorical_crossentropy' num_of_classes = self._num_of_classes # Input layer inputs = Input(shape=(input_size,), name='sent_input', dtype='int64') # Embedding layers x = Embedding(alphabet_size + 1, embedding_size, input_length=input_size)(inputs) convolution_output = [] # Convolution layers for num_filters, filter_width in conv_layers: conv = Convolution1D(filters=num_filters, kernel_size=filter_width, activation='tanh', name='Conv1D_{}_{}'.format(num_filters, filter_width))(x) pool = GlobalMaxPooling1D(name='MaxPoolingOverTime_{}_{}'.format(num_filters, filter_width))(conv) convolution_output.append(pool) x = Concatenate()(convolution_output) # Fully connected layers for fl in fully_connected_layers: x = Dense(fl, activation='selu', kernel_initializer='lecun_normal')(x) x = Dropout(dropout_p)(x) # Output layer predictions = Dense(num_of_classes, activation='softmax')(x) # Build and compile model model = Model(inputs=inputs, outputs=predictions) model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy']) print(model.summary()) - return model + self._model = model def _max_len(self, texts): return max([len(text) for text in texts]) def test(self): csv.field_size_limit(sys.maxsize) try: r = open(self._path_result, 'rb') test_result = load(r) r.close() except FileNotFoundError: test_result = {} - model = self._load_model() + self._load_model() for language in [x for x in self._languages if x not in test_result.keys()]: - test_result[language] = self.test_class(model, language) + test_result[language] = self.test_class(language) with open(self._path_result, 'wb') as f: dump(test_result, f) - - def _load_model(self): - model = load_model(os.path.join(self._root_model, 'model.h5')) - - return model def _count_size(self, files): size = 0 for f in files: size += os.path.getsize(f) return size - def test_class(self, model, language): + def test_class(self, language): ok = 0 results = [] count = 0 total_test = self.file_len(os.path.join(self._path_test_csv, language + '.csv')) with open(os.path.join(self._path_test_csv, language + '.csv'), newline='') as csvfile: r = csv.reader(csvfile, delimiter=' ', quotechar='|') for pair in r: label, string = pair label = int(label) string = literal_eval(string) tokens = [x + 1 for x in Tokenizer.tokenize(string, 'letter')] - result = self._guess_file_language(model, tokens) + result = self._guess_file_language(tokens) count += 1 print('[{0:4d}/{1:4d}] {2}:{3} '.format(count, total_test, result[0][1], result[0][0]),end='\r') results.append(result[0]) if result[0][1] == language: ok += 1 accuracy = ok / total_test print('Tests for {} '.format(language)) print('Total test files : {}'.format(total_test)) print('Correctly classified files : {}'.format(ok)) print('Accuracy : {}%'.format(accuracy * 100)) return (ok, total_test, accuracy, results) def speed_benchmark(self): language = self._languages[10] - model = self._load_model() + self._model = load_model(os.path.join(self._root_model, 'model.h5')) test_set = self._get_test_set(language) total_size = self._count_size(test_set) print('{} kB in total'.format(total_size / 1024)) t_start = time.perf_counter() - self.test_class(model, language) + self.test_class(language) t_end = time.perf_counter() print('{} seconds.'.format(t_end - t_start)) print('{} seconds per KiB'.format(((t_end - t_start) / total_size) * 1024)) - def _guess_file_language(self, model, tokens): + def _guess_file_language(self, tokens): X = pad_sequences([tokens], maxlen=self._input_size) - result = list(model.predict(X))[0] + result = list(self._model.predict(X))[0] result = [(s, self._languages[i]) for i, s in enumerate(result)] return sorted(result, reverse=True) + def classify(self, path): + with gzip.open(path, 'rb') as f: + string = f.read() + tokens = [x + 1 for x in Tokenizer.tokenize(string, 'letter')] + res = self._guess_file_language(tokens) + print('Filename :\t{}\nLanguage :\t{}\nValue :\t{}'.format(path, res[0][1],res[0][0])) + return (res[0][1], res[0][0]) + if __name__ == '__main__': main()