阅读 221

吴恩达机器学习作业(一)_python实现

吴恩达机器学习作业(一)_python实现

必做部分:(主要参考了黄海广老师的文档)

import numpy as np

import matplotlib.pyplot as plt

import pandas as pd

df = pd.read_csv('ex1data1.txt', names=['population', 'profit'])

data = df



#def normalize_feature(df):

    #return df.apply(lambda column: (column - column.mean()) / column.std())#特征缩放



def get_X(df):#读取特征

    ones = pd.DataFrame({'ones': np.ones(len(df))})#ones是m行1列的dataframe

    data = pd.concat([ones, df], axis=1)  # 合并数据,根据列合并 axis: 需要合并链接的轴,0是行,1是列

    return data.iloc[:, :-1]



def linear_cost(theta , X , y):

    m = X.shape[0]  #样本数

    inner = X @ theta - y   #与目标的差值即h(theta),inner算出来为一行

    square_sum = inner.T @ inner    #h(theta)的平方

    cost = square_sum/(2*m)

    return cost



def gradient(theta, X, y):

    m = X.shape[0]

    inner = X.T @ (X@theta - y) #X仅有仅有一个特征,恒为1的不算,即该语句算的是更新theta1时,损失函数对theta1的求导

    return inner/m



def batch_gradient_decent(theta, X, y, epoch, alpha=0.02):

    cost_data = [linear_cost(theta, X, y)]

    for _ in range(epoch):   #_仅是一个循环标志,在循环中不会用到

        theta = theta - alpha * gradient(theta, X, y)

        cost_data.append(linear_cost(theta, X, y))

    return theta, cost_data



X = get_X(df)

y = df.values[:, 1]

theta = np.zeros(df.shape[1])

epoch = 6000

final_theta, cost_data = batch_gradient_decent(theta, X, y, epoch)

b = final_theta[0]

k = final_theta[1]

plt.scatter(data.population, data.profit, label="Training data")

plt.plot(data.population, data.population*k + b, label="Prediction")

plt.xlabel('population')

plt.ylabel('profit')

plt.legend(loc=2)



forecast = float(input('population'))

predict_profit = forecast*k+b

print(predict_profit)

plt.scatter(forecast, predict_profit, marker='+', c='red')

plt.show()

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58


我预测值(forecast)输入的23,用红色标记了


选作部分

import numpy as np

import matplotlib.pyplot as plt

import pandas as pd

ax = plt.axes(projection='3d')

df = pd.read_csv('ex1data2.txt', names=['square', 'bedrooms', 'price'])



def normalize_feature(df):

    return df.apply(lambda column: (column - column.mean()) / column.std())



def get_X(df):#读取特征

    ones = pd.DataFrame({'ones': np.ones(len(df))})#ones是m行1列的dataframe

    data = pd.concat([ones, df], axis=1)  # 合并数据,根据列合并 axis: 需要合并链接的轴,0是行,1是列

    return data.iloc[:, :-1]



def lr_cost(theta, X, y):


    m = X.shape[0]#m为样本数

    inner = X @ theta - y  # R(m*1),X @ theta等价于X.dot(theta)

    square_sum = inner.T @ inner

    cost = square_sum / (2 * m)

    return cost



def gradient(theta, X, y):

    m = X.shape[0] #样本个数

    inner = X.T @ (X @ theta - y)  # (m,n).T @ (m, 1) -> (n, 1),X @ theta等价于X.dot(theta)

    return inner / m



def batch_gradient_decent(theta, X, y, epoch, alpha=0.01):

    cost_data = [lr_cost(theta, X, y)]

    for _ in range(epoch):

        theta = theta - alpha * gradient(theta, X, y)

        cost_data.append(lr_cost(theta, X, y))

    return theta, cost_data



def normalEqn(X, y): #正规方程

    theta = np.linalg.inv(X.T@X)@X.T@y#X.T@X等价于X.T.dot(X)

    return theta



data = normalize_feature(df)  #特征缩放

y = data.values[:, 2]

X = get_X(data)

ax.scatter(X['square'], X['bedrooms'], y, alpha=0.3)

plt.xlabel('square')

plt.ylabel('bedrooms')

ax.set_zlabel(r'$prices$')

epoch = 500

alpha = 0.01

theta = np.zeros(X.shape[1])   #在该问题中X有三个特征(1,square,bedrooms),所以theta初始为三个零

final_theta, cost_data = batch_gradient_decent(theta, X, y, epoch, alpha=alpha)

D = final_theta[0]

A = final_theta[1]

B = final_theta[2]

Z = A*X['square'] + B*X['bedrooms'] + D

ax.plot_trisurf(X['square'], X['bedrooms'], Z,

                       linewidth=0, antialiased=False)


predict_square = float(input('square:'))

predict_square = ((predict_square - df.square.mean())/df.square.std())


predict_bedrooms = float(input('bedrooms'))

predict_bedrooms = ((predict_bedrooms - df.bedrooms.mean())/df.bedrooms.std())


p = A * predict_square + B*predict_bedrooms + D

ax.scatter(predict_square, predict_bedrooms, marker='+', c='red')

p = p * df.price.std() + df.price.mean()

print('I predict the prices is :')

print(p)

plt.show()

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75




输入为

square=1635

bedrooms=3

I predict the prices is :

292611.913236568

理论上说预测的点应该在所画的平面上,但实际却不在,可能是因为我平面画的不对。预测结果可以使用。

————————————————

版权声明:本文为CSDN博主「挂科难」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。

原文链接:https://blog.csdn.net/qq_45882032/article/details/116500250


文章分类
后端
版权声明:本站是系统测试站点,无实际运营。本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌抄袭侵权/违法违规的内容, 请发送邮件至 XXXXXXo@163.com 举报,一经查实,本站将立刻删除。
相关推荐