Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Keep partial end batch #88

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions code/DBN.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ def pretraining_functions(self, train_set_x, batch_size, k):

# number of batches
n_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_batches = int(numpy.ceil(n_batches / float(batch_size)))
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
Expand Down Expand Up @@ -211,9 +212,9 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate):

# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches /= batch_size
n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size)))
n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size)))

index = T.lscalar('index') # index to a [mini]batch

Expand Down
6 changes: 3 additions & 3 deletions code/SdA.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,9 +254,9 @@ def build_finetune_functions(self, datasets, batch_size, learning_rate):

# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches /= batch_size
n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size)))
n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size)))

index = T.lscalar('index') # index to a [mini]batch

Expand Down Expand Up @@ -357,7 +357,7 @@ def test_SdA(finetune_lr=0.1, pretraining_epochs=15,

# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))

# numpy random generator
# start-snippet-3
Expand Down
3 changes: 2 additions & 1 deletion code/cA.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,8 @@ def test_cA(learning_rate=0.01, training_epochs=20,
train_set_x, train_set_y = datasets[0]

# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))

# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
Expand Down
6 changes: 3 additions & 3 deletions code/convolutional_mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,9 +142,9 @@ def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_valid_batches /= batch_size
n_test_batches /= batch_size
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))
n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size)))
n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size)))

# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
Expand Down
3 changes: 2 additions & 1 deletion code/dA.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,8 @@ def test_dA(learning_rate=0.1, training_epochs=15,
train_set_x, train_set_y = datasets[0]

# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))

# start-snippet-2
# allocate symbolic variables for the data
Expand Down
9 changes: 6 additions & 3 deletions code/logistic_cg.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,9 +162,12 @@ def cg_optimization_mnist(n_epochs=50, mnist_pkl_gz='mnist.pkl.gz'):

batch_size = 600 # size of the minibatch

n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))
n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size)))
n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size)))

n_in = 28 * 28 # number of input units
n_out = 10 # number of output units
Expand Down
9 changes: 6 additions & 3 deletions code/logistic_sgd.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,9 +273,12 @@ def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
test_set_x, test_set_y = datasets[2]

# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))
n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size)))
n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size)))

######################
# BUILD ACTUAL MODEL #
Expand Down
9 changes: 6 additions & 3 deletions code/mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,9 +228,12 @@ def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,
test_set_x, test_set_y = datasets[2]

# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))
n_valid_batches = int(numpy.ceil(n_valid_batches / float(batch_size)))
n_test_batches = int(numpy.ceil(n_test_batches / float(batch_size)))

######################
# BUILD ACTUAL MODEL #
Expand Down
3 changes: 2 additions & 1 deletion code/rbm.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,8 @@ def test_rbm(learning_rate=0.1, training_epochs=15,
test_set_x, test_set_y = datasets[2]

# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches = int(numpy.ceil(n_train_batches / float(batch_size)))

# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
Expand Down