-
Notifications
You must be signed in to change notification settings - Fork 0
Sourcery refactored master branch #1
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -21,7 +21,7 @@ def svm_baseline(): | |
| predictions = [int(a) for a in clf.predict(test_data[0])] | ||
| num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1])) | ||
| print("Baseline classifier using an SVM.") | ||
| print(str(num_correct) + " of " + str(len(test_data[1])) + " values correct.") | ||
| print(f'{str(num_correct)} of {len(test_data[1])} values correct.') | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
| if __name__ == "__main__": | ||
| svm_baseline() | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -70,9 +70,9 @@ def SGD(self, training_data, epochs, mini_batch_size, eta, | |
| for mini_batch in mini_batches: | ||
| self.update_mini_batch(mini_batch, eta) | ||
| if test_data: | ||
| print("Epoch {} : {} / {}".format(j,self.evaluate(test_data),n_test)) | ||
| print(f"Epoch {j} : {self.evaluate(test_data)} / {n_test}") | ||
| else: | ||
| print("Epoch {} complete".format(j)) | ||
| print(f"Epoch {j} complete") | ||
|
Comment on lines
-73
to
+75
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
| def update_mini_batch(self, mini_batch, eta): | ||
| """Update the network's weights and biases by applying | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -179,24 +179,27 @@ def SGD(self, training_data, epochs, mini_batch_size, eta, | |
| self.update_mini_batch( | ||
| mini_batch, eta, lmbda, len(training_data)) | ||
|
|
||
| print("Epoch %s training complete" % j) | ||
| print(f"Epoch {j} training complete") | ||
|
|
||
| if monitor_training_cost: | ||
| cost = self.total_cost(training_data, lmbda) | ||
| training_cost.append(cost) | ||
| print("Cost on training data: {}".format(cost)) | ||
| print(f"Cost on training data: {cost}") | ||
| if monitor_training_accuracy: | ||
| accuracy = self.accuracy(training_data, convert=True) | ||
| training_accuracy.append(accuracy) | ||
| print("Accuracy on training data: {} / {}".format(accuracy, n)) | ||
| print(f"Accuracy on training data: {accuracy} / {n}") | ||
| if monitor_evaluation_cost: | ||
| cost = self.total_cost(evaluation_data, lmbda, convert=True) | ||
| evaluation_cost.append(cost) | ||
| print("Cost on evaluation data: {}".format(cost)) | ||
| print(f"Cost on evaluation data: {cost}") | ||
| if monitor_evaluation_accuracy: | ||
| accuracy = self.accuracy(evaluation_data) | ||
| evaluation_accuracy.append(accuracy) | ||
| print("Accuracy on evaluation data: {} / {}".format(self.accuracy(evaluation_data), n_data)) | ||
| print( | ||
| f"Accuracy on evaluation data: {self.accuracy(evaluation_data)} / {n_data}" | ||
| ) | ||
|
|
||
|
Comment on lines
-182
to
+202
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
| # Early stopping: | ||
| if early_stopping_n > 0: | ||
|
|
@@ -297,8 +300,7 @@ def accuracy(self, data, convert=False): | |
| results = [(np.argmax(self.feedforward(x)), y) | ||
| for (x, y) in data] | ||
|
|
||
| result_accuracy = sum(int(x == y) for (x, y) in results) | ||
| return result_accuracy | ||
| return sum(int(x == y) for (x, y) in results) | ||
|
Comment on lines
-300
to
+303
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
| def total_cost(self, data, lmbda, convert=False): | ||
| """Return the total cost for the data set ``data``. The flag | ||
|
|
@@ -321,19 +323,17 @@ def save(self, filename): | |
| "weights": [w.tolist() for w in self.weights], | ||
| "biases": [b.tolist() for b in self.biases], | ||
| "cost": str(self.cost.__name__)} | ||
| f = open(filename, "w") | ||
| json.dump(data, f) | ||
| f.close() | ||
| with open(filename, "w") as f: | ||
| json.dump(data, f) | ||
|
Comment on lines
-324
to
+327
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
| #### Loading a Network | ||
| def load(filename): | ||
| """Load a neural network from the file ``filename``. Returns an | ||
| instance of Network. | ||
|
|
||
| """ | ||
| f = open(filename, "r") | ||
| data = json.load(f) | ||
| f.close() | ||
| with open(filename, "r") as f: | ||
| data = json.load(f) | ||
|
Comment on lines
-334
to
+336
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
| cost = getattr(sys.modules[__name__], data["cost"]) | ||
| net = Network(data["sizes"], cost=cost) | ||
| net.weights = [np.array(w) for w in data["weights"]] | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -113,7 +113,7 @@ def SGD(self, training_data, epochs, mini_batch_size, eta, | |
| num_test_batches = int(size(test_data)/mini_batch_size) | ||
|
|
||
| # define the (regularized) cost function, symbolic gradients, and updates | ||
| l2_norm_squared = sum([(layer.w**2).sum() for layer in self.layers]) | ||
| l2_norm_squared = sum((layer.w**2).sum() for layer in self.layers) | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
| cost = self.layers[-1].cost(self)+\ | ||
| 0.5*lmbda*l2_norm_squared/num_training_batches | ||
| grads = T.grad(cost, self.params) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -138,11 +138,11 @@ def testTheano(): | |
| f = function([], T.exp(x)) | ||
| print(f.maker.fgraph.toposort()) | ||
| t0 = time.time() | ||
| for i in range(iters): | ||
| for _ in range(iters): | ||
| r = f() | ||
| t1 = time.time() | ||
| print("Looping %d times took %f seconds" % (iters, t1 - t0)) | ||
| print("Result is %s" % (r,)) | ||
| print(f"Result is {r}") | ||
|
Comment on lines
-141
to
+145
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
| if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]): | ||
| print('Used the cpu') | ||
| else: | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Lines
36-40refactored with the following changes:convert-to-enumerate)This removes the following comments ( why? ):