diff --git a/100trials.png b/100trials.png new file mode 100644 index 0000000..2fdabd4 Binary files /dev/null and b/100trials.png differ diff --git a/10Trials.png b/10Trials.png new file mode 100644 index 0000000..318adef Binary files /dev/null and b/10Trials.png differ diff --git a/LargeInverseRegularization.png b/LargeInverseRegularization.png new file mode 100644 index 0000000..3e0182a Binary files /dev/null and b/LargeInverseRegularization.png differ diff --git a/learning_curve.py b/learning_curve.py index 2364f2c..a2954e0 100644 --- a/learning_curve.py +++ b/learning_curve.py @@ -7,20 +7,24 @@ from sklearn.linear_model import LogisticRegression data = load_digits() -print data.DESCR -num_trials = 10 +#print data.DESCR +num_trials = 100 + train_percentages = range(5,95,5) test_accuracies = numpy.zeros(len(train_percentages)) -# train a model with training percentages between 5 and 90 (see train_percentages) and evaluate -# the resultant accuracy. -# You should repeat each training percentage num_trials times to smooth out variability -# for consistency with the previous example use model = LogisticRegression(C=10**-10) for your learner - -# TODO: your code here +def trainer(percent, num_trials): + results = [] + model = LogisticRegression(C=10**-4) + for i in range(num_trials): + X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, train_size=(percent/float(100))) + model.fit(X_train, y_train) + results.append(model.score(X_test,y_test)) + return sum(results)/float(num_trials) +results = [trainer(percent, num_trials) for percent in train_percentages] fig = plt.figure() -plt.plot(train_percentages, test_accuracies) +plt.plot(train_percentages, results) plt.xlabel('Percentage of Data Used for Training') plt.ylabel('Accuracy on Test Set') plt.show() diff --git a/questions.txt b/questions.txt new file mode 100644 index 0000000..55fea64 --- /dev/null +++ b/questions.txt @@ -0,0 +1,4 @@ +1) The bigger the training set, the more accurate the model is on the test set. +2) The lower end of the curve is noisy, because accuracy is highly dependent on how representative the training set data is of the test set. Regression to the mean causes larger sets to be inherently more "stable" in their evaluation. +3)100 trials produces a much smoother curve. +4)As the inverse regularization coefficient increases, the model tries to regress to more and more accurate levels, creating a more accurate model. (In rough terms). This causes a much more representative accuracy graph.