from keras.preprocessing.sequence import pad_sequences
from keras.models import Model
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import ThresholdedReLU
from keras.layers import Activation
from keras.layers import Lambda
from keras.layers import Embedding
from keras.layers.convolutional import Convolution1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import concatenate
from keras.utils import np_utils
from keras.optimizers import SGD
def main():
parser = argparse.ArgumentParser(description='Training and test tool of charactor-level ConvNet text categorisation.')
subparsers = parser.add_subparsers()
parser_train = subparsers.add_parser('train', help='Training on the dataset, dataset must be a *.csv file. A model will be created in the same directory.')
parser_train.add_argument('train_path', metavar='PATH', type=str, help='Path of the training dataset.')
- parser_train.add_argument('-ms', '--maxsize', metavar='SIZE', dest='train_maxsize', type=int, help='Set maximum input size of ConvNet.')
+ parser_train.add_argument('-ms', '--maxsize', metavar='SIZE', dest='train_maxsize', type=int, help='Set maximum input size of ConvNet, default 1024.')
+ parser_train.add_argument('-e', '--epochs', metavar='N', dest='train_epochs', type=int, help='Number of training epoches (iteration), default 50.')
parser_test = subparsers.add_parser('test', help='Test on the dataset, dataset must be a directory with *.csv dataset named by corresponding language.')
parser_test.add_argument('test_root', metavar='ROOT', type=str, help='Root of the test dataset.')
args = parser.parse_args()
print(args)
if args.train_path:
if args.train_maxsize:
- n = CNN(args.train_path, args.train_maxsize)
- n.train()
+ if args.train_epochs:
+ n = CNN(args.train_path, maxsize=args.train_maxsize, epochs=args.train_epochs)
+ n.train()
+ else:
+ n = CNN(args.train_path, maxsize=args.train_maxsize)
+ n.train()
else:
- n = CNN(args.train_path)
- n.train()
+ if args.train_epochs:
+ n = CNN(args.train_path, epochs=args.train_epochs)