diff --git a/SoftDes Questions.docx b/SoftDes Questions.docx new file mode 100644 index 0000000..f0cc49d Binary files /dev/null and b/SoftDes Questions.docx differ diff --git a/learning_curve.py b/learning_curve.py index 2364f2c..c78c12a 100644 --- a/learning_curve.py +++ b/learning_curve.py @@ -1,23 +1,33 @@ """ Exploring learning curves for classification of handwritten digits """ import matplotlib.pyplot as plt -import numpy +import numpy as np from sklearn.datasets import * from sklearn.cross_validation import train_test_split from sklearn.linear_model import LogisticRegression data = load_digits() -print data.DESCR -num_trials = 10 -train_percentages = range(5,95,5) -test_accuracies = numpy.zeros(len(train_percentages)) +# print data.DESCR +num_trials = 25 +train_percentages = range(5,90,5) -# train a model with training percentages between 5 and 90 (see train_percentages) and evaluate -# the resultant accuracy. -# You should repeat each training percentage num_trials times to smooth out variability -# for consistency with the previous example use model = LogisticRegression(C=10**-10) for your learner +test_accuracies = [] -# TODO: your code here +for num in train_percentages: + trainaccuracy = [] + testaccuracy = [] + + for num1 in range(0,num_trials): + + x_train, x_test, y_train, y_test = train_test_split(data.data, data.target, train_size=(float(num)/100.0)) + model = LogisticRegression(C=10**-10) + model.fit(x_train, y_train) + trainaccuracy.append(model.score(x_train,y_train)) + testaccuracy.append(model.score(x_test,y_test)) + print num + trainaccuracyavg = np.mean(trainaccuracy) + testaccuracyavg = np.mean(testaccuracy) + test_accuracies.append(np.mean(testaccuracy)) fig = plt.figure() plt.plot(train_percentages, test_accuracies) diff --git a/questions.txt b/questions.txt new file mode 100644 index 0000000..794655b --- /dev/null +++ b/questions.txt @@ -0,0 +1,9 @@ +1. In general, the equation curve is convex down and increasing. It starts at a minimum test set accuracy of about 0.2. This means the algorithm has about a 1/5 chance of correctly guessing the answer when 5% of the total test set is used to train it, and the remaining 95% of the test set is used to test it. It maxes out at a test set accuracy of about 0.8. This means the algorithm has about a 4/5 chance of correctly guessing the answer when 95% of the total test set is used to train it, and the remaining 5% of the test set is used to test it. +2. I generally had bigger variance in accuracy when smaller percentages of data were used to train the algorithm. This is likely because when the algorithm has less data to work with, its results fluctuate much more due to chance. +3. You can get a relatively smooth curve with as little as 20 trials. + + However, I found I got the smoothest results with upwards of 140 trials. + +4. When c = 10-20: There is a large initial spike, and then test set accuracy levels out to about 0.1. + + When c = 100: The graph is noticeably rounder and more convex. \ No newline at end of file