parser_train = subparsers.add_parser('train', help='Training on the dataset, dataset must be a *.csv file. A model will be created in the same directory.')
parser_train.add_argument('-s', '--spark', type=bool, help='Training on cluster.', dest='train_spark')
parser_train.add_argument('train_path', metavar='PATH', type=str, help='Path of the training dataset.')
parser_train.add_argument('-ms', '--maxsize', metavar='SIZE', dest='train_maxsize', type=int, help='Set maximum input size of ConvNet, default 1024.')
parser_train.add_argument('-e', '--epochs', metavar='N', dest='train_epochs', type=int, help='Number of training epochs (iterations), default 50.')
parser_test = subparsers.add_parser('test', help='Test on the dataset, dataset must be a directory with *.csv dataset named by corresponding language.')
parser_test.add_argument('test_root', metavar='ROOT', type=str, help='Root of the test dataset.')
parser_clf = subparsers.add_parser('clf', help='Test a file.')
parser_clf.add_argument('clf_path', metavar='PATH', type=str, help='Path of test file.')
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
args = parser.parse_args()
- maxsize = 2048
+ maxsize = 4096
epochs = 15
if args.sub_command == 'train' :
if args.train_maxsize:
maxsize = args.train_maxsize
if args.train_epochs:
epochs = args.train_epochs
n = CNN(args.train_path, maxsize=maxsize, epochs=epochs)
if args.train_spark:
n.train_on_cluster()
else:
n.train()
elif args.sub_command == 'test':
n = CNN(args.test_root, maxsize=maxsize, epochs=epochs)
parser_train = subparsers.add_parser('train', help='Training on the dataset, dataset must be a *.csv file. A model will be created in the same directory.')
parser_train.add_argument('train_path', metavar='PATH', type=str, help='Path of the training dataset.')
parser_train.add_argument('-ms', '--maxsize', metavar='SIZE', dest='train_maxsize', type=int, help='Set maximum input size of ConvNet, default 1024.')
parser_train.add_argument('-e', '--epochs', metavar='N', dest='train_epochs', type=int, help='Number of training epochs (iterations), default 50.')
parser_test = subparsers.add_parser('test', help='Test on the dataset, dataset must be a directory with *.csv dataset named by corresponding language.')
parser_test.add_argument('test_root', metavar='ROOT', type=str, help='Root of the test dataset.')
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
args = parser.parse_args()
if args.sub_command == "train":
if args.train_maxsize:
if args.train_epochs:
n = CNNword(args.train_path, maxsize=args.train_maxsize, epochs=args.train_epochs)
n.train()
else:
n = CNNword(args.train_path, maxsize=args.train_maxsize)
n.train()
else:
if args.train_epochs:
n = CNNword(args.train_path, epochs=args.train_epochs)
parser_train = subparsers.add_parser('train', help='Training on the dataset, dataset must be a *.csv file. A model will be created in the same directory.')
parser_train.add_argument('train_path', metavar='PATH', type=str, help='Path of the training dataset.')
# parser_train.add_argument('-n', '--ngrams', metavar='N', dest='train_maxsize', type=int, help='Set maximum input size of ConvNet, default 5.')
parser_test = subparsers.add_parser('test', help='Test on the dataset, dataset must be a directory with *.csv dataset named by corresponding language.')
parser_test.add_argument('test_root', metavar='ROOT', type=str, help='Root of the test dataset.')
parser_train = subparsers.add_parser('train', help='Training on the dataset, dataset must be a *.csv file. A model will be created in the same directory.')
parser_train.add_argument('train_path', metavar='PATH', type=str, help='Path of the training dataset.')
- # parser_train.add_argument('-n', '--ngrams', metavar='N', dest='train_maxsize', type=int, help='Set maximum input size of ConvNet, default 5.')
parser_test = subparsers.add_parser('test', help='Test on the dataset, dataset must be a directory with *.csv dataset named by corresponding language.')
parser_test.add_argument('test_root', metavar='ROOT', type=str, help='Root of the test dataset.')
parser_train = subparsers.add_parser('train', help='Training on the dataset, dataset must be a *.csv file. A model will be created in the same directory.')
parser_train.add_argument('train_path', metavar='PATH', type=str, help='Path of the training dataset.')
# parser_train.add_argument('-n', '--ngrams', metavar='N', dest='train_maxsize', type=int, help='Set maximum input size of ConvNet, default 5.')
parser_test = subparsers.add_parser('test', help='Test on the dataset, dataset must be a directory with *.csv dataset named by corresponding language.')
parser_test.add_argument('test_root', metavar='ROOT', type=str, help='Root of the test dataset.')
parser_train = subparsers.add_parser('train', help='Training on the dataset, dataset must be a *.csv file. A model will be created in the same directory.')
parser_train.add_argument('train_path', metavar='PATH', type=str, help='Path of the training dataset.')
# parser_train.add_argument('-n', '--ngrams', metavar='N', dest='train_maxsize', type=int, help='Set maximum input size of ConvNet, default 5.')
parser_test = subparsers.add_parser('test', help='Test on the dataset, dataset must be a directory with *.csv dataset named by corresponding language.')
parser_test.add_argument('test_root', metavar='ROOT', type=str, help='Root of the test dataset.')