diff --git a/code/DBN.py b/code/DBN.py index 8f9715f2..d1e37534 100644 --- a/code/DBN.py +++ b/code/DBN.py @@ -158,6 +158,7 @@ def pretraining_functions(self, train_set_x, batch_size, k): # number of batches n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size + n_batches = int(numpy.ceil(n_batches / float(batch_size))) # begining of a batch, given `index` batch_begin = index * batch_size # ending of a batch given `index` @@ -211,9 +212,9 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate): # compute number of minibatches for training, validation and testing n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] - n_valid_batches /= batch_size n_test_batches = test_set_x.get_value(borrow=True).shape[0] - n_test_batches /= batch_size + n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size))) + n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size))) index = T.lscalar('index') # index to a [mini]batch diff --git a/code/SdA.py b/code/SdA.py index fafa73b5..aca62d16 100644 --- a/code/SdA.py +++ b/code/SdA.py @@ -254,9 +254,9 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate): # compute number of minibatches for training, validation and testing n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] - n_valid_batches /= batch_size n_test_batches = test_set_x.get_value(borrow=True).shape[0] - n_test_batches /= batch_size + n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size))) + n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size))) index = T.lscalar('index') # index to a [mini]batch @@ -357,7 +357,7 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15, # compute number of minibatches for training, validation and testing n_train_batches = train_set_x.get_value(borrow=True).shape[0] - n_train_batches /= batch_size + n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size))) # numpy random generator # start-snippet-3 diff --git a/code/cA.py b/code/cA.py index c7ccd2b0..b0079223 100644 --- a/code/cA.py +++ b/code/cA.py @@ -246,7 +246,8 @@ def test_cA(learning_rate=0.01, training_epochs=20, train_set_x, train_set_y = datasets[0] # compute number of minibatches for training, validation and testing - n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size + n_train_batches = train_set_x.get_value(borrow=True).shape[0] + n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size))) # allocate symbolic variables for the data index = T.lscalar() # index to a [mini]batch diff --git a/code/convolutional_mlp.py b/code/convolutional_mlp.py index 0d88240d..195bb565 100644 --- a/code/convolutional_mlp.py +++ b/code/convolutional_mlp.py @@ -142,9 +142,9 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200, n_train_batches = train_set_x.get_value(borrow=True).shape[0] n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] n_test_batches = test_set_x.get_value(borrow=True).shape[0] - n_train_batches /= batch_size - n_valid_batches /= batch_size - n_test_batches /= batch_size + n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size))) + n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size))) + n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size))) # allocate symbolic variables for the data index = T.lscalar() # index to a [mini]batch diff --git a/code/dA.py b/code/dA.py index 19457aac..ab64a003 100644 --- a/code/dA.py +++ b/code/dA.py @@ -280,7 +280,8 @@ def test_dA(learning_rate=0.1, training_epochs=15, train_set_x, train_set_y = datasets[0] # compute number of minibatches for training, validation and testing - n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size + n_train_batches = train_set_x.get_value(borrow=True).shape[0] + n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size))) # start-snippet-2 # allocate symbolic variables for the data diff --git a/code/logistic_cg.py b/code/logistic_cg.py index 05f562a1..06682a83 100644 --- a/code/logistic_cg.py +++ b/code/logistic_cg.py @@ -162,9 +162,12 @@ def cg_optimization_mnist(n_epochs=50, mnist_pkl_gz='mnist.pkl.gz'): batch_size = 600 # size of the minibatch - n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size - n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size - n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size + n_train_batches = train_set_x.get_value(borrow=True).shape[0] + n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] + n_test_batches = test_set_x.get_value(borrow=True).shape[0] + n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size))) + n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size))) + n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size))) n_in = 28 * 28 # number of input units n_out = 10 # number of output units diff --git a/code/logistic_sgd.py b/code/logistic_sgd.py index 599f5658..5dcc6c66 100644 --- a/code/logistic_sgd.py +++ b/code/logistic_sgd.py @@ -273,9 +273,12 @@ def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000, test_set_x, test_set_y = datasets[2] # compute number of minibatches for training, validation and testing - n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size - n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size - n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size + n_train_batches = train_set_x.get_value(borrow=True).shape[0] + n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] + n_test_batches = test_set_x.get_value(borrow=True).shape[0] + n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size))) + n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size))) + n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size))) ###################### # BUILD ACTUAL MODEL # diff --git a/code/mlp.py b/code/mlp.py index e4b95ea8..bb4b2f62 100644 --- a/code/mlp.py +++ b/code/mlp.py @@ -228,9 +228,12 @@ def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000, test_set_x, test_set_y = datasets[2] # compute number of minibatches for training, validation and testing - n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size - n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size - n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size + n_train_batches = train_set_x.get_value(borrow=True).shape[0] + n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] + n_test_batches = test_set_x.get_value(borrow=True).shape[0] + n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size))) + n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size))) + n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size))) ###################### # BUILD ACTUAL MODEL # diff --git a/code/rbm.py b/code/rbm.py index 2c821fc9..fdeb6e59 100644 --- a/code/rbm.py +++ b/code/rbm.py @@ -384,7 +384,8 @@ def test_rbm(learning_rate=0.1, training_epochs=15, test_set_x, test_set_y = datasets[2] # compute number of minibatches for training, validation and testing - n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size + n_train_batches = train_set_x.get_value(borrow=True).shape[0] + n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size))) # allocate symbolic variables for the data index = T.lscalar() # index to a [mini]batch