Compute a numerical derivative
Clash Royale CLAN TAG#URR8PPP
.everyoneloves__top-leaderboard:empty,.everyoneloves__mid-leaderboard:empty margin-bottom:0;
up vote
5
down vote
favorite
Since I could not get numpy.gradient()
to compute a derivative successfully, I wrote a script to compute it manually. Running the script below will output a plot of two functions f(x) = sin(x)
and f'(x) = cos(x)
over the interval 0 ⤠x ⤠2 pi
.
import numpy as np
import matplotlib.pyplot as plt
def compute_numerical_derivative(func, x, method='custom'):
y = func(x)
if method == 'custom':
size = len(y0)
res = np.zeros(size, 'd') # 'd' for double
# centered differences
for idx in range(1, size-1):
res[idx] = (y[idx+1] - y[idx-1]) / (x[idx+1] - x[idx-1])
# one-sided differences
res[0] = (y[1] - y[0]) / (x[1] - x[0])
res[-1] = (y[size-1] - y[size-2]) / (x[size-1] - x[size-2])
# elif method == 'numpy':
# res = np.gradient(y)
return res
x = np.linspace(0, 2*np.pi, 100)
y0 = np.sin(x)
y1_true = np.cos(x) # exactly d/dx (y0)
y1_cust = compute_numerical_derivative(np.sin, x, method='custom')
# y1_nump = compute_numerical_derivative(np.sin, x, method='numpy')
plt.plot(x, y1_true, 'r', marker='o', linestyle=None, alpha=0.5)
plt.plot(x, y1_cust, 'b', marker='^', linestyle=None, alpha=0.5)
# plt.plot(x, y1_nump, 'k', marker='*', linestyle='-', alpha=0.5)
plt.show()
Can the speed and/or accuracy of this algorithm be improved? Is it proper to use centered differences at interior points and one-sided differences at the boundaries?
python python-3.x numpy numerical-methods vectorization
add a comment |Â
up vote
5
down vote
favorite
Since I could not get numpy.gradient()
to compute a derivative successfully, I wrote a script to compute it manually. Running the script below will output a plot of two functions f(x) = sin(x)
and f'(x) = cos(x)
over the interval 0 ⤠x ⤠2 pi
.
import numpy as np
import matplotlib.pyplot as plt
def compute_numerical_derivative(func, x, method='custom'):
y = func(x)
if method == 'custom':
size = len(y0)
res = np.zeros(size, 'd') # 'd' for double
# centered differences
for idx in range(1, size-1):
res[idx] = (y[idx+1] - y[idx-1]) / (x[idx+1] - x[idx-1])
# one-sided differences
res[0] = (y[1] - y[0]) / (x[1] - x[0])
res[-1] = (y[size-1] - y[size-2]) / (x[size-1] - x[size-2])
# elif method == 'numpy':
# res = np.gradient(y)
return res
x = np.linspace(0, 2*np.pi, 100)
y0 = np.sin(x)
y1_true = np.cos(x) # exactly d/dx (y0)
y1_cust = compute_numerical_derivative(np.sin, x, method='custom')
# y1_nump = compute_numerical_derivative(np.sin, x, method='numpy')
plt.plot(x, y1_true, 'r', marker='o', linestyle=None, alpha=0.5)
plt.plot(x, y1_cust, 'b', marker='^', linestyle=None, alpha=0.5)
# plt.plot(x, y1_nump, 'k', marker='*', linestyle='-', alpha=0.5)
plt.show()
Can the speed and/or accuracy of this algorithm be improved? Is it proper to use centered differences at interior points and one-sided differences at the boundaries?
python python-3.x numpy numerical-methods vectorization
3
Did you consider passingx
as the second parameter ofnp.gradient
?
â Mathias Ettinger
Apr 6 at 8:39
Ahhh, that is varargs... I can try that in a little bit.
â MPath
Apr 6 at 8:41
yes, by default numpy consider each point on the x axis to be spaced by one unit, unless you tell it otherwise.
â Mathias Ettinger
Apr 6 at 8:43
Out of curiosity, is there an advantage to using *varargs as an argument including x rather than having x as an argument by itself?
â MPath
Apr 6 at 11:37
1
*varargs
is just the Python notation to mean variable number of arguments, just callnp.gradient(y, x)
Python will figure it out just fine.
â Mathias Ettinger
Apr 6 at 11:42
add a comment |Â
up vote
5
down vote
favorite
up vote
5
down vote
favorite
Since I could not get numpy.gradient()
to compute a derivative successfully, I wrote a script to compute it manually. Running the script below will output a plot of two functions f(x) = sin(x)
and f'(x) = cos(x)
over the interval 0 ⤠x ⤠2 pi
.
import numpy as np
import matplotlib.pyplot as plt
def compute_numerical_derivative(func, x, method='custom'):
y = func(x)
if method == 'custom':
size = len(y0)
res = np.zeros(size, 'd') # 'd' for double
# centered differences
for idx in range(1, size-1):
res[idx] = (y[idx+1] - y[idx-1]) / (x[idx+1] - x[idx-1])
# one-sided differences
res[0] = (y[1] - y[0]) / (x[1] - x[0])
res[-1] = (y[size-1] - y[size-2]) / (x[size-1] - x[size-2])
# elif method == 'numpy':
# res = np.gradient(y)
return res
x = np.linspace(0, 2*np.pi, 100)
y0 = np.sin(x)
y1_true = np.cos(x) # exactly d/dx (y0)
y1_cust = compute_numerical_derivative(np.sin, x, method='custom')
# y1_nump = compute_numerical_derivative(np.sin, x, method='numpy')
plt.plot(x, y1_true, 'r', marker='o', linestyle=None, alpha=0.5)
plt.plot(x, y1_cust, 'b', marker='^', linestyle=None, alpha=0.5)
# plt.plot(x, y1_nump, 'k', marker='*', linestyle='-', alpha=0.5)
plt.show()
Can the speed and/or accuracy of this algorithm be improved? Is it proper to use centered differences at interior points and one-sided differences at the boundaries?
python python-3.x numpy numerical-methods vectorization
Since I could not get numpy.gradient()
to compute a derivative successfully, I wrote a script to compute it manually. Running the script below will output a plot of two functions f(x) = sin(x)
and f'(x) = cos(x)
over the interval 0 ⤠x ⤠2 pi
.
import numpy as np
import matplotlib.pyplot as plt
def compute_numerical_derivative(func, x, method='custom'):
y = func(x)
if method == 'custom':
size = len(y0)
res = np.zeros(size, 'd') # 'd' for double
# centered differences
for idx in range(1, size-1):
res[idx] = (y[idx+1] - y[idx-1]) / (x[idx+1] - x[idx-1])
# one-sided differences
res[0] = (y[1] - y[0]) / (x[1] - x[0])
res[-1] = (y[size-1] - y[size-2]) / (x[size-1] - x[size-2])
# elif method == 'numpy':
# res = np.gradient(y)
return res
x = np.linspace(0, 2*np.pi, 100)
y0 = np.sin(x)
y1_true = np.cos(x) # exactly d/dx (y0)
y1_cust = compute_numerical_derivative(np.sin, x, method='custom')
# y1_nump = compute_numerical_derivative(np.sin, x, method='numpy')
plt.plot(x, y1_true, 'r', marker='o', linestyle=None, alpha=0.5)
plt.plot(x, y1_cust, 'b', marker='^', linestyle=None, alpha=0.5)
# plt.plot(x, y1_nump, 'k', marker='*', linestyle='-', alpha=0.5)
plt.show()
Can the speed and/or accuracy of this algorithm be improved? Is it proper to use centered differences at interior points and one-sided differences at the boundaries?
python python-3.x numpy numerical-methods vectorization
asked Apr 6 at 6:23
MPath
27529
27529
3
Did you consider passingx
as the second parameter ofnp.gradient
?
â Mathias Ettinger
Apr 6 at 8:39
Ahhh, that is varargs... I can try that in a little bit.
â MPath
Apr 6 at 8:41
yes, by default numpy consider each point on the x axis to be spaced by one unit, unless you tell it otherwise.
â Mathias Ettinger
Apr 6 at 8:43
Out of curiosity, is there an advantage to using *varargs as an argument including x rather than having x as an argument by itself?
â MPath
Apr 6 at 11:37
1
*varargs
is just the Python notation to mean variable number of arguments, just callnp.gradient(y, x)
Python will figure it out just fine.
â Mathias Ettinger
Apr 6 at 11:42
add a comment |Â
3
Did you consider passingx
as the second parameter ofnp.gradient
?
â Mathias Ettinger
Apr 6 at 8:39
Ahhh, that is varargs... I can try that in a little bit.
â MPath
Apr 6 at 8:41
yes, by default numpy consider each point on the x axis to be spaced by one unit, unless you tell it otherwise.
â Mathias Ettinger
Apr 6 at 8:43
Out of curiosity, is there an advantage to using *varargs as an argument including x rather than having x as an argument by itself?
â MPath
Apr 6 at 11:37
1
*varargs
is just the Python notation to mean variable number of arguments, just callnp.gradient(y, x)
Python will figure it out just fine.
â Mathias Ettinger
Apr 6 at 11:42
3
3
Did you consider passing
x
as the second parameter of np.gradient
?â Mathias Ettinger
Apr 6 at 8:39
Did you consider passing
x
as the second parameter of np.gradient
?â Mathias Ettinger
Apr 6 at 8:39
Ahhh, that is varargs... I can try that in a little bit.
â MPath
Apr 6 at 8:41
Ahhh, that is varargs... I can try that in a little bit.
â MPath
Apr 6 at 8:41
yes, by default numpy consider each point on the x axis to be spaced by one unit, unless you tell it otherwise.
â Mathias Ettinger
Apr 6 at 8:43
yes, by default numpy consider each point on the x axis to be spaced by one unit, unless you tell it otherwise.
â Mathias Ettinger
Apr 6 at 8:43
Out of curiosity, is there an advantage to using *varargs as an argument including x rather than having x as an argument by itself?
â MPath
Apr 6 at 11:37
Out of curiosity, is there an advantage to using *varargs as an argument including x rather than having x as an argument by itself?
â MPath
Apr 6 at 11:37
1
1
*varargs
is just the Python notation to mean variable number of arguments, just call np.gradient(y, x)
Python will figure it out just fine.â Mathias Ettinger
Apr 6 at 11:42
*varargs
is just the Python notation to mean variable number of arguments, just call np.gradient(y, x)
Python will figure it out just fine.â Mathias Ettinger
Apr 6 at 11:42
add a comment |Â
2 Answers
2
active
oldest
votes
up vote
7
down vote
accepted
You can use np.roll
to compute the centered differences as a vectorised operation rather than in a for loop:
res = (np.roll(y, -1) - np.roll(y, 1)) / (np.roll(x, -1) - np.roll(x, 1))
You then still need to account for incorrect values at the boundaries.
Also note that you have a bug at the line
size = len(y0)
It should be len(y)
. But you don't need it anymore as you can access the last values as y[-1]
or x[-2]
.
Lastly, consider putting your test code under an if __name__ == '__main__'
clause.
add a comment |Â
up vote
5
down vote
You can vectorize the calculation
def compute_numerical_derivative3(func, x, method='custom'):
y = func(x)
if method == 'custom':
res = np.zeros_like(y) # 'd' for double
# centered differences
x_dif = x[2:] - x[:-2]
y_dif = y[2:] - y[:-2]
res[1:-1] = y_dif / x_dif
res[0] = (y[1] - y[0]) / (x[1] - x[0])
res[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
# elif method == 'numpy':
# res = np.gradient(y)
return res
add a comment |Â
2 Answers
2
active
oldest
votes
2 Answers
2
active
oldest
votes
active
oldest
votes
active
oldest
votes
up vote
7
down vote
accepted
You can use np.roll
to compute the centered differences as a vectorised operation rather than in a for loop:
res = (np.roll(y, -1) - np.roll(y, 1)) / (np.roll(x, -1) - np.roll(x, 1))
You then still need to account for incorrect values at the boundaries.
Also note that you have a bug at the line
size = len(y0)
It should be len(y)
. But you don't need it anymore as you can access the last values as y[-1]
or x[-2]
.
Lastly, consider putting your test code under an if __name__ == '__main__'
clause.
add a comment |Â
up vote
7
down vote
accepted
You can use np.roll
to compute the centered differences as a vectorised operation rather than in a for loop:
res = (np.roll(y, -1) - np.roll(y, 1)) / (np.roll(x, -1) - np.roll(x, 1))
You then still need to account for incorrect values at the boundaries.
Also note that you have a bug at the line
size = len(y0)
It should be len(y)
. But you don't need it anymore as you can access the last values as y[-1]
or x[-2]
.
Lastly, consider putting your test code under an if __name__ == '__main__'
clause.
add a comment |Â
up vote
7
down vote
accepted
up vote
7
down vote
accepted
You can use np.roll
to compute the centered differences as a vectorised operation rather than in a for loop:
res = (np.roll(y, -1) - np.roll(y, 1)) / (np.roll(x, -1) - np.roll(x, 1))
You then still need to account for incorrect values at the boundaries.
Also note that you have a bug at the line
size = len(y0)
It should be len(y)
. But you don't need it anymore as you can access the last values as y[-1]
or x[-2]
.
Lastly, consider putting your test code under an if __name__ == '__main__'
clause.
You can use np.roll
to compute the centered differences as a vectorised operation rather than in a for loop:
res = (np.roll(y, -1) - np.roll(y, 1)) / (np.roll(x, -1) - np.roll(x, 1))
You then still need to account for incorrect values at the boundaries.
Also note that you have a bug at the line
size = len(y0)
It should be len(y)
. But you don't need it anymore as you can access the last values as y[-1]
or x[-2]
.
Lastly, consider putting your test code under an if __name__ == '__main__'
clause.
answered Apr 6 at 9:10
Mathias Ettinger
21.9k32876
21.9k32876
add a comment |Â
add a comment |Â
up vote
5
down vote
You can vectorize the calculation
def compute_numerical_derivative3(func, x, method='custom'):
y = func(x)
if method == 'custom':
res = np.zeros_like(y) # 'd' for double
# centered differences
x_dif = x[2:] - x[:-2]
y_dif = y[2:] - y[:-2]
res[1:-1] = y_dif / x_dif
res[0] = (y[1] - y[0]) / (x[1] - x[0])
res[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
# elif method == 'numpy':
# res = np.gradient(y)
return res
add a comment |Â
up vote
5
down vote
You can vectorize the calculation
def compute_numerical_derivative3(func, x, method='custom'):
y = func(x)
if method == 'custom':
res = np.zeros_like(y) # 'd' for double
# centered differences
x_dif = x[2:] - x[:-2]
y_dif = y[2:] - y[:-2]
res[1:-1] = y_dif / x_dif
res[0] = (y[1] - y[0]) / (x[1] - x[0])
res[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
# elif method == 'numpy':
# res = np.gradient(y)
return res
add a comment |Â
up vote
5
down vote
up vote
5
down vote
You can vectorize the calculation
def compute_numerical_derivative3(func, x, method='custom'):
y = func(x)
if method == 'custom':
res = np.zeros_like(y) # 'd' for double
# centered differences
x_dif = x[2:] - x[:-2]
y_dif = y[2:] - y[:-2]
res[1:-1] = y_dif / x_dif
res[0] = (y[1] - y[0]) / (x[1] - x[0])
res[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
# elif method == 'numpy':
# res = np.gradient(y)
return res
You can vectorize the calculation
def compute_numerical_derivative3(func, x, method='custom'):
y = func(x)
if method == 'custom':
res = np.zeros_like(y) # 'd' for double
# centered differences
x_dif = x[2:] - x[:-2]
y_dif = y[2:] - y[:-2]
res[1:-1] = y_dif / x_dif
res[0] = (y[1] - y[0]) / (x[1] - x[0])
res[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
# elif method == 'numpy':
# res = np.gradient(y)
return res
answered Apr 6 at 9:46
Maarten Fabré
3,204214
3,204214
add a comment |Â
add a comment |Â
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
StackExchange.ready(
function ()
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fcodereview.stackexchange.com%2fquestions%2f191379%2fcompute-a-numerical-derivative%23new-answer', 'question_page');
);
Post as a guest
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
3
Did you consider passing
x
as the second parameter ofnp.gradient
?â Mathias Ettinger
Apr 6 at 8:39
Ahhh, that is varargs... I can try that in a little bit.
â MPath
Apr 6 at 8:41
yes, by default numpy consider each point on the x axis to be spaced by one unit, unless you tell it otherwise.
â Mathias Ettinger
Apr 6 at 8:43
Out of curiosity, is there an advantage to using *varargs as an argument including x rather than having x as an argument by itself?
â MPath
Apr 6 at 11:37
1
*varargs
is just the Python notation to mean variable number of arguments, just callnp.gradient(y, x)
Python will figure it out just fine.â Mathias Ettinger
Apr 6 at 11:42