Numpy.dot issue: Am I the idiot or there is a real issue with the runtime? /u/NormalEarth5293 Python Education

Cell 1:

# UNQ_C2 # GRADED FUNCTION: compute_gradient def compute_gradient(x, y, w, b): """ Computes the gradient for linear regression Args: x (ndarray): Shape (m,) Input to the model (Population of cities) y (ndarray): Shape (m,) Label (Actual profits for the cities) w, b (scalar): Parameters of the model Returns dj_dw (scalar): The gradient of the cost w.r.t. the parameters w dj_db (scalar): The gradient of the cost w.r.t. the parameter b """ # Number of training examples m = x.shape[0] # You need to return the following variables correctly dj_dw = 0 dj_db = 0 ### START CODE HERE ### f_w_b = np.dot(w,x)+b print(f_w_b[:5]) dj_dw_temp = f_w_b-y print(dj_dw_temp[:5]) temp = f_w_b-y print(np.shape(temp),np.shape(x)) dj_db_temp = temp * x print("hi",np.dot(temp,x)) dj_dw = np.sum(dj_dw_temp)/m dj_db = np.sum(dj_db_temp)/m ### END CODE HERE ### return dj_dw, dj_db Cell 1: # UNQ_C2 # GRADED FUNCTION: compute_gradient def compute_gradient(x, y, w, b): """ Computes the gradient for linear regression Args: x (ndarray): Shape (m,) Input to the model (Population of cities) y (ndarray): Shape (m,) Label (Actual profits for the cities) w, b (scalar): Parameters of the model Returns dj_dw (scalar): The gradient of the cost w.r.t. the parameters w dj_db (scalar): The gradient of the cost w.r.t. the parameter b """ # Number of training examples m = x.shape[0] # You need to return the following variables correctly dj_dw = 0 dj_db = 0 ### START CODE HERE ### f_w_b = np.dot(w,x)+b print(f_w_b[:5]) dj_dw_temp = f_w_b-y print(dj_dw_temp[:5]) temp = f_w_b-y print(np.shape(temp),np.shape(x)) dj_db_temp = temp * x print("hi",np.dot(temp,x)) dj_dw = np.sum(dj_dw_temp)/m dj_db = np.sum(dj_db_temp)/m ### END CODE HERE ### return dj_dw, dj_db 

Cell 2:

# Compute and display gradient with w initialized to zeroes initial_w = 0 initial_b = 0 tmp_dj_dw, tmp_dj_db = compute_gradient(x_train, y_train, initial_w, initial_b) print('Gradient at initial w, b (zeros):', tmp_dj_dw, tmp_dj_db) compute_gradient_test(compute_gradient) 

Results:

[0. 0. 0. 0. 0.]

[-17.592 -9.1302 -13.662 -11.854 -6.8233]

(97,) (97,)

hi -6336.898425318999

Gradient at initial w, b (zeros): -5.839135051546393 -65.32884974555671

Using X with shape (4, 1)

[ 4.5 8.5 12.5 16.5]

[0. 0. 0. 0.]

(4,) (4,)

hi 0.0

[ 4. 7. 10. 13.]

[-2. -2. -2. -2.]

(4,) (4,)

hi -40.0

—————————————————————————

AssertionError Traceback (most recent call last)

<ipython-input-98-dd854b16287c> in <module>

6 print(‘Gradient at initial w, b (zeros):’, tmp_dj_dw, tmp_dj_db)

7

—-> 8 compute_gradient_test(compute_gradient)

~/work/public_tests.py in compute_gradient_test(target)

60 dj_dw, dj_db = target(x, y, initial_w, initial_b)

61 #assert dj_dw.shape == initial_w.shape, f”Wrong shape for dj_dw. {dj_dw} != {initial_w.shape}”

—> 62 assert dj_db == -2, f”Case 2: dj_db is wrong: {dj_db} != -2″

63 assert np.allclose(dj_dw, -10.0), f”Case 1: dj_dw is wrong: {dj_dw} != -10.0″

64

AssertionError: Case 2: dj_db is wrong: -10.0 != -2

Can somene please explain why the numpy.dot() gives in the first time the vector product and in the 2nd time it gives me the product summed up?

Thank you

submitted by /u/NormalEarth5293
[link] [comments]

​r/learnpython Cell 1: # UNQ_C2 # GRADED FUNCTION: compute_gradient def compute_gradient(x, y, w, b): “”” Computes the gradient for linear regression Args: x (ndarray): Shape (m,) Input to the model (Population of cities) y (ndarray): Shape (m,) Label (Actual profits for the cities) w, b (scalar): Parameters of the model Returns dj_dw (scalar): The gradient of the cost w.r.t. the parameters w dj_db (scalar): The gradient of the cost w.r.t. the parameter b “”” # Number of training examples m = x.shape[0] # You need to return the following variables correctly dj_dw = 0 dj_db = 0 ### START CODE HERE ### f_w_b = np.dot(w,x)+b print(f_w_b[:5]) dj_dw_temp = f_w_b-y print(dj_dw_temp[:5]) temp = f_w_b-y print(np.shape(temp),np.shape(x)) dj_db_temp = temp * x print(“hi”,np.dot(temp,x)) dj_dw = np.sum(dj_dw_temp)/m dj_db = np.sum(dj_db_temp)/m ### END CODE HERE ### return dj_dw, dj_db Cell 1: # UNQ_C2 # GRADED FUNCTION: compute_gradient def compute_gradient(x, y, w, b): “”” Computes the gradient for linear regression Args: x (ndarray): Shape (m,) Input to the model (Population of cities) y (ndarray): Shape (m,) Label (Actual profits for the cities) w, b (scalar): Parameters of the model Returns dj_dw (scalar): The gradient of the cost w.r.t. the parameters w dj_db (scalar): The gradient of the cost w.r.t. the parameter b “”” # Number of training examples m = x.shape[0] # You need to return the following variables correctly dj_dw = 0 dj_db = 0 ### START CODE HERE ### f_w_b = np.dot(w,x)+b print(f_w_b[:5]) dj_dw_temp = f_w_b-y print(dj_dw_temp[:5]) temp = f_w_b-y print(np.shape(temp),np.shape(x)) dj_db_temp = temp * x print(“hi”,np.dot(temp,x)) dj_dw = np.sum(dj_dw_temp)/m dj_db = np.sum(dj_db_temp)/m ### END CODE HERE ### return dj_dw, dj_db Cell 2: # Compute and display gradient with w initialized to zeroes initial_w = 0 initial_b = 0 tmp_dj_dw, tmp_dj_db = compute_gradient(x_train, y_train, initial_w, initial_b) print(‘Gradient at initial w, b (zeros):’, tmp_dj_dw, tmp_dj_db) compute_gradient_test(compute_gradient) Results: [0. 0. 0. 0. 0.] [-17.592 -9.1302 -13.662 -11.854 -6.8233] (97,) (97,) hi -6336.898425318999 Gradient at initial w, b (zeros): -5.839135051546393 -65.32884974555671 Using X with shape (4, 1) [ 4.5 8.5 12.5 16.5] [0. 0. 0. 0.] (4,) (4,) hi 0.0 [ 4. 7. 10. 13.] [-2. -2. -2. -2.] (4,) (4,) hi -40.0 ————————————————————————— AssertionError Traceback (most recent call last) <ipython-input-98-dd854b16287c> in <module> 6 print(‘Gradient at initial w, b (zeros):’, tmp_dj_dw, tmp_dj_db) 7 —-> 8 compute_gradient_test(compute_gradient) ~/work/public_tests.py in compute_gradient_test(target) 60 dj_dw, dj_db = target(x, y, initial_w, initial_b) 61 #assert dj_dw.shape == initial_w.shape, f”Wrong shape for dj_dw. {dj_dw} != {initial_w.shape}” —> 62 assert dj_db == -2, f”Case 2: dj_db is wrong: {dj_db} != -2″ 63 assert np.allclose(dj_dw, -10.0), f”Case 1: dj_dw is wrong: {dj_dw} != -10.0″ 64 AssertionError: Case 2: dj_db is wrong: -10.0 != -2 Can somene please explain why the numpy.dot() gives in the first time the vector product and in the 2nd time it gives me the product summed up? Thank you submitted by /u/NormalEarth5293 [link] [comments] 

Cell 1:

# UNQ_C2 # GRADED FUNCTION: compute_gradient def compute_gradient(x, y, w, b): """ Computes the gradient for linear regression Args: x (ndarray): Shape (m,) Input to the model (Population of cities) y (ndarray): Shape (m,) Label (Actual profits for the cities) w, b (scalar): Parameters of the model Returns dj_dw (scalar): The gradient of the cost w.r.t. the parameters w dj_db (scalar): The gradient of the cost w.r.t. the parameter b """ # Number of training examples m = x.shape[0] # You need to return the following variables correctly dj_dw = 0 dj_db = 0 ### START CODE HERE ### f_w_b = np.dot(w,x)+b print(f_w_b[:5]) dj_dw_temp = f_w_b-y print(dj_dw_temp[:5]) temp = f_w_b-y print(np.shape(temp),np.shape(x)) dj_db_temp = temp * x print("hi",np.dot(temp,x)) dj_dw = np.sum(dj_dw_temp)/m dj_db = np.sum(dj_db_temp)/m ### END CODE HERE ### return dj_dw, dj_db Cell 1: # UNQ_C2 # GRADED FUNCTION: compute_gradient def compute_gradient(x, y, w, b): """ Computes the gradient for linear regression Args: x (ndarray): Shape (m,) Input to the model (Population of cities) y (ndarray): Shape (m,) Label (Actual profits for the cities) w, b (scalar): Parameters of the model Returns dj_dw (scalar): The gradient of the cost w.r.t. the parameters w dj_db (scalar): The gradient of the cost w.r.t. the parameter b """ # Number of training examples m = x.shape[0] # You need to return the following variables correctly dj_dw = 0 dj_db = 0 ### START CODE HERE ### f_w_b = np.dot(w,x)+b print(f_w_b[:5]) dj_dw_temp = f_w_b-y print(dj_dw_temp[:5]) temp = f_w_b-y print(np.shape(temp),np.shape(x)) dj_db_temp = temp * x print("hi",np.dot(temp,x)) dj_dw = np.sum(dj_dw_temp)/m dj_db = np.sum(dj_db_temp)/m ### END CODE HERE ### return dj_dw, dj_db 

Cell 2:

# Compute and display gradient with w initialized to zeroes initial_w = 0 initial_b = 0 tmp_dj_dw, tmp_dj_db = compute_gradient(x_train, y_train, initial_w, initial_b) print('Gradient at initial w, b (zeros):', tmp_dj_dw, tmp_dj_db) compute_gradient_test(compute_gradient) 

Results:

[0. 0. 0. 0. 0.]

[-17.592 -9.1302 -13.662 -11.854 -6.8233]

(97,) (97,)

hi -6336.898425318999

Gradient at initial w, b (zeros): -5.839135051546393 -65.32884974555671

Using X with shape (4, 1)

[ 4.5 8.5 12.5 16.5]

[0. 0. 0. 0.]

(4,) (4,)

hi 0.0

[ 4. 7. 10. 13.]

[-2. -2. -2. -2.]

(4,) (4,)

hi -40.0

—————————————————————————

AssertionError Traceback (most recent call last)

<ipython-input-98-dd854b16287c> in <module>

6 print(‘Gradient at initial w, b (zeros):’, tmp_dj_dw, tmp_dj_db)

7

—-> 8 compute_gradient_test(compute_gradient)

~/work/public_tests.py in compute_gradient_test(target)

60 dj_dw, dj_db = target(x, y, initial_w, initial_b)

61 #assert dj_dw.shape == initial_w.shape, f”Wrong shape for dj_dw. {dj_dw} != {initial_w.shape}”

—> 62 assert dj_db == -2, f”Case 2: dj_db is wrong: {dj_db} != -2″

63 assert np.allclose(dj_dw, -10.0), f”Case 1: dj_dw is wrong: {dj_dw} != -10.0″

64

AssertionError: Case 2: dj_db is wrong: -10.0 != -2

Can somene please explain why the numpy.dot() gives in the first time the vector product and in the 2nd time it gives me the product summed up?

Thank you

submitted by /u/NormalEarth5293
[link] [comments] 

Leave a Reply

Your email address will not be published. Required fields are marked *