diff --git a/univariate-linear-regression/.vscode/settings.json b/univariate-linear-regression/.vscode/settings.json new file mode 100644 index 0000000000000000000000000000000000000000..0c0d9f6b0dfac50cb20465c28d62cc4873e17a31 --- /dev/null +++ b/univariate-linear-regression/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "python.pythonPath": "/home/lettuce/anaconda3/envs/dnn/bin/python" +} \ No newline at end of file diff --git a/univariate-linear-regression/linear-regression.py b/univariate-linear-regression/linear-regression.py index 84ce17a808530ae2287e8af2617652c0b5faa6ce..c76102952f8dd4ba4100af4b748275003fa987e3 100755 --- a/univariate-linear-regression/linear-regression.py +++ b/univariate-linear-regression/linear-regression.py @@ -7,7 +7,7 @@ data = np.loadtxt("dataset.csv", delimiter=",") alpha = 0.01 t0 = 0 t1 = 0 -iterations = 70000 +iterations = 100000 m = len(data) # normalize data between 0 and 1 @@ -35,21 +35,21 @@ def squared_error(): # train data i = 0 while i < iterations: - gradient_t0 = (1 / m) * \ - sum([(hypothesis(x[i]) - y[i]) for i in range(m)]) - gradient_t1 = (1 / m) * \ - sum([((hypothesis(x[i]) - y[i]) * x[i]) for i in range(m)]) - - temp_t0 = t0 - alpha * gradient_t0 - temp_t1 = t1 - alpha * gradient_t1 - - t0 = temp_t0 - t1 = temp_t1 - # line1.set_ydata([hypothesis(x[i]) for i in range(m)]) - # plt.draw() - i += 1 - print(f"Error: {squared_error()} | I: {i}") - # plt.pause(0.00001) + + for d in range(m): + gradient = (hypothesis(x[d]) - y[d]) * x[d] + # gradient_t1 = (hypothesis(x[d]) - y[d]) * x[d] + + temp_t0 = t0 - alpha * gradient + temp_t1 = t1 - alpha * gradient + + t0 = temp_t0 + t1 = temp_t1 + # line1.set_ydata([hypothesis(x[i]) for i in range(m)]) + # plt.draw() + i += 1 + print(f"Error: {squared_error()} | I: {i}") + # plt.pause(0.00001) print(f"\nSlope: {t0}\nIntercept(y): {t1}")