diff --git a/10 trials.png b/10 trials.png new file mode 100644 index 0000000..a18420c Binary files /dev/null and b/10 trials.png differ diff --git a/50 trials.png b/50 trials.png new file mode 100644 index 0000000..ab2f7d0 Binary files /dev/null and b/50 trials.png differ diff --git a/learning_curve.py b/learning_curve.py index 2364f2c..13a18f9 100644 --- a/learning_curve.py +++ b/learning_curve.py @@ -1,5 +1,3 @@ -""" Exploring learning curves for classification of handwritten digits """ - import matplotlib.pyplot as plt import numpy from sklearn.datasets import * @@ -8,8 +6,8 @@ data = load_digits() print data.DESCR -num_trials = 10 -train_percentages = range(5,95,5) +num_trials = 50 +train_percentages = range(1,99,1) test_accuracies = numpy.zeros(len(train_percentages)) # train a model with training percentages between 5 and 90 (see train_percentages) and evaluate @@ -17,10 +15,19 @@ # You should repeat each training percentage num_trials times to smooth out variability # for consistency with the previous example use model = LogisticRegression(C=10**-10) for your learner -# TODO: your code here +for i, percent in enumerate(train_percentages): + for j in range(num_trials): + X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, train_size=percent/100.0) + model = LogisticRegression(C=10**-10) + model.fit(X_train, y_train) + print "Train accuracy %f" %model.score(X_train,y_train) + print "Test accuracy %f"%model.score(X_test,y_test) + test_accuracies[i] += model.score(X_test,y_test) + test_accuracies[i] /= num_trials fig = plt.figure() -plt.plot(train_percentages, test_accuracies) +plt.plot(train_percentages, test_accuracies*100) plt.xlabel('Percentage of Data Used for Training') plt.ylabel('Accuracy on Test Set') -plt.show() +plt.axis([0,100,0,100]) +plt.show() \ No newline at end of file diff --git a/questions.txt b/questions.txt new file mode 100644 index 0000000..a822237 --- /dev/null +++ b/questions.txt @@ -0,0 +1,7 @@ +1. The general trend is upward, as one would expect; the more data you give it to learn from, the better it learns. + +2. No, they all seem to have the same noise level. I would expect the right side to have more noise, since there is less data to test the machine with, but it actually comes out just as smooth as the bottom of the graph. + +3. The graph converges to a nice shape around 20 trials or so, although it is still easy to see the noise if you take a data point at each percentage, even with 50 trials per. + +4. Lower values of C yield slower rates of learning; a steeper learning curve in the colloquial sense. Higher values of C do the opposite. By turning C up way high (I went as far as 10**-1), the machine is able to effectively read handwriting with less than 10% of the data to learn from. However, it also slows the program down considerably. diff --git a/questions.txt~ b/questions.txt~ new file mode 100644 index 0000000..20315f1 --- /dev/null +++ b/questions.txt~ @@ -0,0 +1,7 @@ +1. The general trend is upward, as one would expect; the more data you give it to learn from, the better it learns. + +2. No, they all seem to have the same noise level. I would expect the right side to have more noise, since there is less data to test the machine with, but it actually comes out just as smooth as the bottom of the graph. + +3. The graph converges to a nice shape around 20 trials or so, although it is still easy to see the noise if you take a data point at each percentage, even with 50 trials per. + +4. Lower values of C yield slower rates of learning; a steeper learning curve in the colloquial sense. Higher values of C do the opposite. By turning C up way high (I went as far as 10**-1), the machine is able to effectively read handwriting with less than 10% of the data to learn from. However, it also slows the