Compute a numerical derivative

The name of the pictureThe name of the pictureThe name of the pictureClash Royale CLAN TAG#URR8PPP





.everyoneloves__top-leaderboard:empty,.everyoneloves__mid-leaderboard:empty margin-bottom:0;







up vote
5
down vote

favorite












Since I could not get numpy.gradient() to compute a derivative successfully, I wrote a script to compute it manually. Running the script below will output a plot of two functions f(x) = sin(x) and f'(x) = cos(x) over the interval 0 ≤ x ≤ 2 pi.



import numpy as np
import matplotlib.pyplot as plt

def compute_numerical_derivative(func, x, method='custom'):
y = func(x)
if method == 'custom':
size = len(y0)
res = np.zeros(size, 'd') # 'd' for double
# centered differences
for idx in range(1, size-1):
res[idx] = (y[idx+1] - y[idx-1]) / (x[idx+1] - x[idx-1])
# one-sided differences
res[0] = (y[1] - y[0]) / (x[1] - x[0])
res[-1] = (y[size-1] - y[size-2]) / (x[size-1] - x[size-2])
# elif method == 'numpy':
# res = np.gradient(y)
return res

x = np.linspace(0, 2*np.pi, 100)
y0 = np.sin(x)
y1_true = np.cos(x) # exactly d/dx (y0)

y1_cust = compute_numerical_derivative(np.sin, x, method='custom')
# y1_nump = compute_numerical_derivative(np.sin, x, method='numpy')

plt.plot(x, y1_true, 'r', marker='o', linestyle=None, alpha=0.5)
plt.plot(x, y1_cust, 'b', marker='^', linestyle=None, alpha=0.5)
# plt.plot(x, y1_nump, 'k', marker='*', linestyle='-', alpha=0.5)
plt.show()


Can the speed and/or accuracy of this algorithm be improved? Is it proper to use centered differences at interior points and one-sided differences at the boundaries?







share|improve this question















  • 3




    Did you consider passing x as the second parameter of np.gradient?
    – Mathias Ettinger
    Apr 6 at 8:39










  • Ahhh, that is varargs... I can try that in a little bit.
    – MPath
    Apr 6 at 8:41










  • yes, by default numpy consider each point on the x axis to be spaced by one unit, unless you tell it otherwise.
    – Mathias Ettinger
    Apr 6 at 8:43










  • Out of curiosity, is there an advantage to using *varargs as an argument including x rather than having x as an argument by itself?
    – MPath
    Apr 6 at 11:37






  • 1




    *varargs is just the Python notation to mean variable number of arguments, just call np.gradient(y, x) Python will figure it out just fine.
    – Mathias Ettinger
    Apr 6 at 11:42
















up vote
5
down vote

favorite












Since I could not get numpy.gradient() to compute a derivative successfully, I wrote a script to compute it manually. Running the script below will output a plot of two functions f(x) = sin(x) and f'(x) = cos(x) over the interval 0 ≤ x ≤ 2 pi.



import numpy as np
import matplotlib.pyplot as plt

def compute_numerical_derivative(func, x, method='custom'):
y = func(x)
if method == 'custom':
size = len(y0)
res = np.zeros(size, 'd') # 'd' for double
# centered differences
for idx in range(1, size-1):
res[idx] = (y[idx+1] - y[idx-1]) / (x[idx+1] - x[idx-1])
# one-sided differences
res[0] = (y[1] - y[0]) / (x[1] - x[0])
res[-1] = (y[size-1] - y[size-2]) / (x[size-1] - x[size-2])
# elif method == 'numpy':
# res = np.gradient(y)
return res

x = np.linspace(0, 2*np.pi, 100)
y0 = np.sin(x)
y1_true = np.cos(x) # exactly d/dx (y0)

y1_cust = compute_numerical_derivative(np.sin, x, method='custom')
# y1_nump = compute_numerical_derivative(np.sin, x, method='numpy')

plt.plot(x, y1_true, 'r', marker='o', linestyle=None, alpha=0.5)
plt.plot(x, y1_cust, 'b', marker='^', linestyle=None, alpha=0.5)
# plt.plot(x, y1_nump, 'k', marker='*', linestyle='-', alpha=0.5)
plt.show()


Can the speed and/or accuracy of this algorithm be improved? Is it proper to use centered differences at interior points and one-sided differences at the boundaries?







share|improve this question















  • 3




    Did you consider passing x as the second parameter of np.gradient?
    – Mathias Ettinger
    Apr 6 at 8:39










  • Ahhh, that is varargs... I can try that in a little bit.
    – MPath
    Apr 6 at 8:41










  • yes, by default numpy consider each point on the x axis to be spaced by one unit, unless you tell it otherwise.
    – Mathias Ettinger
    Apr 6 at 8:43










  • Out of curiosity, is there an advantage to using *varargs as an argument including x rather than having x as an argument by itself?
    – MPath
    Apr 6 at 11:37






  • 1




    *varargs is just the Python notation to mean variable number of arguments, just call np.gradient(y, x) Python will figure it out just fine.
    – Mathias Ettinger
    Apr 6 at 11:42












up vote
5
down vote

favorite









up vote
5
down vote

favorite











Since I could not get numpy.gradient() to compute a derivative successfully, I wrote a script to compute it manually. Running the script below will output a plot of two functions f(x) = sin(x) and f'(x) = cos(x) over the interval 0 ≤ x ≤ 2 pi.



import numpy as np
import matplotlib.pyplot as plt

def compute_numerical_derivative(func, x, method='custom'):
y = func(x)
if method == 'custom':
size = len(y0)
res = np.zeros(size, 'd') # 'd' for double
# centered differences
for idx in range(1, size-1):
res[idx] = (y[idx+1] - y[idx-1]) / (x[idx+1] - x[idx-1])
# one-sided differences
res[0] = (y[1] - y[0]) / (x[1] - x[0])
res[-1] = (y[size-1] - y[size-2]) / (x[size-1] - x[size-2])
# elif method == 'numpy':
# res = np.gradient(y)
return res

x = np.linspace(0, 2*np.pi, 100)
y0 = np.sin(x)
y1_true = np.cos(x) # exactly d/dx (y0)

y1_cust = compute_numerical_derivative(np.sin, x, method='custom')
# y1_nump = compute_numerical_derivative(np.sin, x, method='numpy')

plt.plot(x, y1_true, 'r', marker='o', linestyle=None, alpha=0.5)
plt.plot(x, y1_cust, 'b', marker='^', linestyle=None, alpha=0.5)
# plt.plot(x, y1_nump, 'k', marker='*', linestyle='-', alpha=0.5)
plt.show()


Can the speed and/or accuracy of this algorithm be improved? Is it proper to use centered differences at interior points and one-sided differences at the boundaries?







share|improve this question











Since I could not get numpy.gradient() to compute a derivative successfully, I wrote a script to compute it manually. Running the script below will output a plot of two functions f(x) = sin(x) and f'(x) = cos(x) over the interval 0 ≤ x ≤ 2 pi.



import numpy as np
import matplotlib.pyplot as plt

def compute_numerical_derivative(func, x, method='custom'):
y = func(x)
if method == 'custom':
size = len(y0)
res = np.zeros(size, 'd') # 'd' for double
# centered differences
for idx in range(1, size-1):
res[idx] = (y[idx+1] - y[idx-1]) / (x[idx+1] - x[idx-1])
# one-sided differences
res[0] = (y[1] - y[0]) / (x[1] - x[0])
res[-1] = (y[size-1] - y[size-2]) / (x[size-1] - x[size-2])
# elif method == 'numpy':
# res = np.gradient(y)
return res

x = np.linspace(0, 2*np.pi, 100)
y0 = np.sin(x)
y1_true = np.cos(x) # exactly d/dx (y0)

y1_cust = compute_numerical_derivative(np.sin, x, method='custom')
# y1_nump = compute_numerical_derivative(np.sin, x, method='numpy')

plt.plot(x, y1_true, 'r', marker='o', linestyle=None, alpha=0.5)
plt.plot(x, y1_cust, 'b', marker='^', linestyle=None, alpha=0.5)
# plt.plot(x, y1_nump, 'k', marker='*', linestyle='-', alpha=0.5)
plt.show()


Can the speed and/or accuracy of this algorithm be improved? Is it proper to use centered differences at interior points and one-sided differences at the boundaries?









share|improve this question










share|improve this question




share|improve this question









asked Apr 6 at 6:23









MPath

27529




27529







  • 3




    Did you consider passing x as the second parameter of np.gradient?
    – Mathias Ettinger
    Apr 6 at 8:39










  • Ahhh, that is varargs... I can try that in a little bit.
    – MPath
    Apr 6 at 8:41










  • yes, by default numpy consider each point on the x axis to be spaced by one unit, unless you tell it otherwise.
    – Mathias Ettinger
    Apr 6 at 8:43










  • Out of curiosity, is there an advantage to using *varargs as an argument including x rather than having x as an argument by itself?
    – MPath
    Apr 6 at 11:37






  • 1




    *varargs is just the Python notation to mean variable number of arguments, just call np.gradient(y, x) Python will figure it out just fine.
    – Mathias Ettinger
    Apr 6 at 11:42












  • 3




    Did you consider passing x as the second parameter of np.gradient?
    – Mathias Ettinger
    Apr 6 at 8:39










  • Ahhh, that is varargs... I can try that in a little bit.
    – MPath
    Apr 6 at 8:41










  • yes, by default numpy consider each point on the x axis to be spaced by one unit, unless you tell it otherwise.
    – Mathias Ettinger
    Apr 6 at 8:43










  • Out of curiosity, is there an advantage to using *varargs as an argument including x rather than having x as an argument by itself?
    – MPath
    Apr 6 at 11:37






  • 1




    *varargs is just the Python notation to mean variable number of arguments, just call np.gradient(y, x) Python will figure it out just fine.
    – Mathias Ettinger
    Apr 6 at 11:42







3




3




Did you consider passing x as the second parameter of np.gradient?
– Mathias Ettinger
Apr 6 at 8:39




Did you consider passing x as the second parameter of np.gradient?
– Mathias Ettinger
Apr 6 at 8:39












Ahhh, that is varargs... I can try that in a little bit.
– MPath
Apr 6 at 8:41




Ahhh, that is varargs... I can try that in a little bit.
– MPath
Apr 6 at 8:41












yes, by default numpy consider each point on the x axis to be spaced by one unit, unless you tell it otherwise.
– Mathias Ettinger
Apr 6 at 8:43




yes, by default numpy consider each point on the x axis to be spaced by one unit, unless you tell it otherwise.
– Mathias Ettinger
Apr 6 at 8:43












Out of curiosity, is there an advantage to using *varargs as an argument including x rather than having x as an argument by itself?
– MPath
Apr 6 at 11:37




Out of curiosity, is there an advantage to using *varargs as an argument including x rather than having x as an argument by itself?
– MPath
Apr 6 at 11:37




1




1




*varargs is just the Python notation to mean variable number of arguments, just call np.gradient(y, x) Python will figure it out just fine.
– Mathias Ettinger
Apr 6 at 11:42




*varargs is just the Python notation to mean variable number of arguments, just call np.gradient(y, x) Python will figure it out just fine.
– Mathias Ettinger
Apr 6 at 11:42










2 Answers
2






active

oldest

votes

















up vote
7
down vote



accepted










You can use np.roll to compute the centered differences as a vectorised operation rather than in a for loop:



res = (np.roll(y, -1) - np.roll(y, 1)) / (np.roll(x, -1) - np.roll(x, 1))


You then still need to account for incorrect values at the boundaries.



Also note that you have a bug at the line



size = len(y0)


It should be len(y). But you don't need it anymore as you can access the last values as y[-1] or x[-2].



Lastly, consider putting your test code under an if __name__ == '__main__' clause.






share|improve this answer




























    up vote
    5
    down vote













    You can vectorize the calculation



    def compute_numerical_derivative3(func, x, method='custom'):
    y = func(x)
    if method == 'custom':
    res = np.zeros_like(y) # 'd' for double
    # centered differences
    x_dif = x[2:] - x[:-2]
    y_dif = y[2:] - y[:-2]
    res[1:-1] = y_dif / x_dif
    res[0] = (y[1] - y[0]) / (x[1] - x[0])
    res[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
    # elif method == 'numpy':
    # res = np.gradient(y)
    return res





    share|improve this answer





















      Your Answer




      StackExchange.ifUsing("editor", function ()
      return StackExchange.using("mathjaxEditing", function ()
      StackExchange.MarkdownEditor.creationCallbacks.add(function (editor, postfix)
      StackExchange.mathjaxEditing.prepareWmdForMathJax(editor, postfix, [["\$", "\$"]]);
      );
      );
      , "mathjax-editing");

      StackExchange.ifUsing("editor", function ()
      StackExchange.using("externalEditor", function ()
      StackExchange.using("snippets", function ()
      StackExchange.snippets.init();
      );
      );
      , "code-snippets");

      StackExchange.ready(function()
      var channelOptions =
      tags: "".split(" "),
      id: "196"
      ;
      initTagRenderer("".split(" "), "".split(" "), channelOptions);

      StackExchange.using("externalEditor", function()
      // Have to fire editor after snippets, if snippets enabled
      if (StackExchange.settings.snippets.snippetsEnabled)
      StackExchange.using("snippets", function()
      createEditor();
      );

      else
      createEditor();

      );

      function createEditor()
      StackExchange.prepareEditor(
      heartbeatType: 'answer',
      convertImagesToLinks: false,
      noModals: false,
      showLowRepImageUploadWarning: true,
      reputationToPostImages: null,
      bindNavPrevention: true,
      postfix: "",
      onDemand: true,
      discardSelector: ".discard-answer"
      ,immediatelyShowMarkdownHelp:true
      );



      );








       

      draft saved


      draft discarded


















      StackExchange.ready(
      function ()
      StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fcodereview.stackexchange.com%2fquestions%2f191379%2fcompute-a-numerical-derivative%23new-answer', 'question_page');

      );

      Post as a guest






























      2 Answers
      2






      active

      oldest

      votes








      2 Answers
      2






      active

      oldest

      votes









      active

      oldest

      votes






      active

      oldest

      votes








      up vote
      7
      down vote



      accepted










      You can use np.roll to compute the centered differences as a vectorised operation rather than in a for loop:



      res = (np.roll(y, -1) - np.roll(y, 1)) / (np.roll(x, -1) - np.roll(x, 1))


      You then still need to account for incorrect values at the boundaries.



      Also note that you have a bug at the line



      size = len(y0)


      It should be len(y). But you don't need it anymore as you can access the last values as y[-1] or x[-2].



      Lastly, consider putting your test code under an if __name__ == '__main__' clause.






      share|improve this answer

























        up vote
        7
        down vote



        accepted










        You can use np.roll to compute the centered differences as a vectorised operation rather than in a for loop:



        res = (np.roll(y, -1) - np.roll(y, 1)) / (np.roll(x, -1) - np.roll(x, 1))


        You then still need to account for incorrect values at the boundaries.



        Also note that you have a bug at the line



        size = len(y0)


        It should be len(y). But you don't need it anymore as you can access the last values as y[-1] or x[-2].



        Lastly, consider putting your test code under an if __name__ == '__main__' clause.






        share|improve this answer























          up vote
          7
          down vote



          accepted







          up vote
          7
          down vote



          accepted






          You can use np.roll to compute the centered differences as a vectorised operation rather than in a for loop:



          res = (np.roll(y, -1) - np.roll(y, 1)) / (np.roll(x, -1) - np.roll(x, 1))


          You then still need to account for incorrect values at the boundaries.



          Also note that you have a bug at the line



          size = len(y0)


          It should be len(y). But you don't need it anymore as you can access the last values as y[-1] or x[-2].



          Lastly, consider putting your test code under an if __name__ == '__main__' clause.






          share|improve this answer













          You can use np.roll to compute the centered differences as a vectorised operation rather than in a for loop:



          res = (np.roll(y, -1) - np.roll(y, 1)) / (np.roll(x, -1) - np.roll(x, 1))


          You then still need to account for incorrect values at the boundaries.



          Also note that you have a bug at the line



          size = len(y0)


          It should be len(y). But you don't need it anymore as you can access the last values as y[-1] or x[-2].



          Lastly, consider putting your test code under an if __name__ == '__main__' clause.







          share|improve this answer













          share|improve this answer



          share|improve this answer











          answered Apr 6 at 9:10









          Mathias Ettinger

          21.9k32876




          21.9k32876






















              up vote
              5
              down vote













              You can vectorize the calculation



              def compute_numerical_derivative3(func, x, method='custom'):
              y = func(x)
              if method == 'custom':
              res = np.zeros_like(y) # 'd' for double
              # centered differences
              x_dif = x[2:] - x[:-2]
              y_dif = y[2:] - y[:-2]
              res[1:-1] = y_dif / x_dif
              res[0] = (y[1] - y[0]) / (x[1] - x[0])
              res[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
              # elif method == 'numpy':
              # res = np.gradient(y)
              return res





              share|improve this answer

























                up vote
                5
                down vote













                You can vectorize the calculation



                def compute_numerical_derivative3(func, x, method='custom'):
                y = func(x)
                if method == 'custom':
                res = np.zeros_like(y) # 'd' for double
                # centered differences
                x_dif = x[2:] - x[:-2]
                y_dif = y[2:] - y[:-2]
                res[1:-1] = y_dif / x_dif
                res[0] = (y[1] - y[0]) / (x[1] - x[0])
                res[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
                # elif method == 'numpy':
                # res = np.gradient(y)
                return res





                share|improve this answer























                  up vote
                  5
                  down vote










                  up vote
                  5
                  down vote









                  You can vectorize the calculation



                  def compute_numerical_derivative3(func, x, method='custom'):
                  y = func(x)
                  if method == 'custom':
                  res = np.zeros_like(y) # 'd' for double
                  # centered differences
                  x_dif = x[2:] - x[:-2]
                  y_dif = y[2:] - y[:-2]
                  res[1:-1] = y_dif / x_dif
                  res[0] = (y[1] - y[0]) / (x[1] - x[0])
                  res[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
                  # elif method == 'numpy':
                  # res = np.gradient(y)
                  return res





                  share|improve this answer













                  You can vectorize the calculation



                  def compute_numerical_derivative3(func, x, method='custom'):
                  y = func(x)
                  if method == 'custom':
                  res = np.zeros_like(y) # 'd' for double
                  # centered differences
                  x_dif = x[2:] - x[:-2]
                  y_dif = y[2:] - y[:-2]
                  res[1:-1] = y_dif / x_dif
                  res[0] = (y[1] - y[0]) / (x[1] - x[0])
                  res[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
                  # elif method == 'numpy':
                  # res = np.gradient(y)
                  return res






                  share|improve this answer













                  share|improve this answer



                  share|improve this answer











                  answered Apr 6 at 9:46









                  Maarten Fabré

                  3,204214




                  3,204214






















                       

                      draft saved


                      draft discarded


























                       


                      draft saved


                      draft discarded














                      StackExchange.ready(
                      function ()
                      StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fcodereview.stackexchange.com%2fquestions%2f191379%2fcompute-a-numerical-derivative%23new-answer', 'question_page');

                      );

                      Post as a guest













































































                      Popular posts from this blog

                      Chat program with C++ and SFML

                      Function to Return a JSON Like Objects Using VBA Collections and Arrays

                      Will my employers contract hold up in court?