Middleware Django statistics
Clash Royale CLAN TAG#URR8PPP
.everyoneloves__top-leaderboard:empty,.everyoneloves__mid-leaderboard:empty margin-bottom:0;
up vote
1
down vote
favorite
I have this middleware to log the number of queries, the time spent on each view and some errors and tracebacks.
This will be called on each request, so I had to put a 75ms timeout. I don't like it, but I feel like using async request for this is unnecessary, I don't care about the host response.
What do you think? Can I improve this in any way?
import sys
import traceback
import os
import json
import time
import requests
from django.conf import settings
CUSTOM_LOG_AGENT_HOST = 'http://localhost:9875'
CUSTOM_LOG_AGENT_HEADERS =
CUSTOM_LOG_AGENT_TIMEOUT = 0.0075
class SimpleMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
self.customLogAgent =
request.start_time = time.time()
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
total = time.time() - request.start_time
self.customLogAgent['queries'] = connection.queries
self.customLogAgent['time'] = int(total * 1000)
try:
url = getattr(settings, 'CUSTOM_LOG_AGENT_HOST', CUSTOM_LOG_AGENT_HOST)
headers = getattr(settings, 'CUSTOM_LOG_AGENT_HEADERS', CUSTOM_LOG_AGENT_HEADERS)
timeout = getattr(settings, 'CUSTOM_LOG_AGENT_TIMEOUT', CUSTOM_LOG_AGENT_TIMEOUT)
requests.post(url, headers=headers, json=self.customLogAgent, timeout=timeout)
except requests.exceptions.RequestException as e:
print(e)
print(json.dumps(self.customLogAgent, indent=4))
return response
def process_view(self, request, view_func, view_args, view_kwargs):
self.customLogAgent['view_func'] = view_func.__name__
self.customLogAgent['view_kwargs'] = view_kwargs
self.customLogAgent['url'] = request.get_full_path()
def process_exception(self, request, exception):
ltype, lvalue, ltb = sys.exc_info()
error =
error['type'] = ltype.__name__
error['message'] = str(lvalue)
error['line'] = ltb.tb_lineno
error['traceback'] = ''.join(traceback.format_tb(ltb))
self.customLogAgent['error'] = error
return None
python logging django benchmarking
add a comment |Â
up vote
1
down vote
favorite
I have this middleware to log the number of queries, the time spent on each view and some errors and tracebacks.
This will be called on each request, so I had to put a 75ms timeout. I don't like it, but I feel like using async request for this is unnecessary, I don't care about the host response.
What do you think? Can I improve this in any way?
import sys
import traceback
import os
import json
import time
import requests
from django.conf import settings
CUSTOM_LOG_AGENT_HOST = 'http://localhost:9875'
CUSTOM_LOG_AGENT_HEADERS =
CUSTOM_LOG_AGENT_TIMEOUT = 0.0075
class SimpleMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
self.customLogAgent =
request.start_time = time.time()
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
total = time.time() - request.start_time
self.customLogAgent['queries'] = connection.queries
self.customLogAgent['time'] = int(total * 1000)
try:
url = getattr(settings, 'CUSTOM_LOG_AGENT_HOST', CUSTOM_LOG_AGENT_HOST)
headers = getattr(settings, 'CUSTOM_LOG_AGENT_HEADERS', CUSTOM_LOG_AGENT_HEADERS)
timeout = getattr(settings, 'CUSTOM_LOG_AGENT_TIMEOUT', CUSTOM_LOG_AGENT_TIMEOUT)
requests.post(url, headers=headers, json=self.customLogAgent, timeout=timeout)
except requests.exceptions.RequestException as e:
print(e)
print(json.dumps(self.customLogAgent, indent=4))
return response
def process_view(self, request, view_func, view_args, view_kwargs):
self.customLogAgent['view_func'] = view_func.__name__
self.customLogAgent['view_kwargs'] = view_kwargs
self.customLogAgent['url'] = request.get_full_path()
def process_exception(self, request, exception):
ltype, lvalue, ltb = sys.exc_info()
error =
error['type'] = ltype.__name__
error['message'] = str(lvalue)
error['line'] = ltb.tb_lineno
error['traceback'] = ''.join(traceback.format_tb(ltb))
self.customLogAgent['error'] = error
return None
python logging django benchmarking
add a comment |Â
up vote
1
down vote
favorite
up vote
1
down vote
favorite
I have this middleware to log the number of queries, the time spent on each view and some errors and tracebacks.
This will be called on each request, so I had to put a 75ms timeout. I don't like it, but I feel like using async request for this is unnecessary, I don't care about the host response.
What do you think? Can I improve this in any way?
import sys
import traceback
import os
import json
import time
import requests
from django.conf import settings
CUSTOM_LOG_AGENT_HOST = 'http://localhost:9875'
CUSTOM_LOG_AGENT_HEADERS =
CUSTOM_LOG_AGENT_TIMEOUT = 0.0075
class SimpleMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
self.customLogAgent =
request.start_time = time.time()
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
total = time.time() - request.start_time
self.customLogAgent['queries'] = connection.queries
self.customLogAgent['time'] = int(total * 1000)
try:
url = getattr(settings, 'CUSTOM_LOG_AGENT_HOST', CUSTOM_LOG_AGENT_HOST)
headers = getattr(settings, 'CUSTOM_LOG_AGENT_HEADERS', CUSTOM_LOG_AGENT_HEADERS)
timeout = getattr(settings, 'CUSTOM_LOG_AGENT_TIMEOUT', CUSTOM_LOG_AGENT_TIMEOUT)
requests.post(url, headers=headers, json=self.customLogAgent, timeout=timeout)
except requests.exceptions.RequestException as e:
print(e)
print(json.dumps(self.customLogAgent, indent=4))
return response
def process_view(self, request, view_func, view_args, view_kwargs):
self.customLogAgent['view_func'] = view_func.__name__
self.customLogAgent['view_kwargs'] = view_kwargs
self.customLogAgent['url'] = request.get_full_path()
def process_exception(self, request, exception):
ltype, lvalue, ltb = sys.exc_info()
error =
error['type'] = ltype.__name__
error['message'] = str(lvalue)
error['line'] = ltb.tb_lineno
error['traceback'] = ''.join(traceback.format_tb(ltb))
self.customLogAgent['error'] = error
return None
python logging django benchmarking
I have this middleware to log the number of queries, the time spent on each view and some errors and tracebacks.
This will be called on each request, so I had to put a 75ms timeout. I don't like it, but I feel like using async request for this is unnecessary, I don't care about the host response.
What do you think? Can I improve this in any way?
import sys
import traceback
import os
import json
import time
import requests
from django.conf import settings
CUSTOM_LOG_AGENT_HOST = 'http://localhost:9875'
CUSTOM_LOG_AGENT_HEADERS =
CUSTOM_LOG_AGENT_TIMEOUT = 0.0075
class SimpleMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
self.customLogAgent =
request.start_time = time.time()
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
total = time.time() - request.start_time
self.customLogAgent['queries'] = connection.queries
self.customLogAgent['time'] = int(total * 1000)
try:
url = getattr(settings, 'CUSTOM_LOG_AGENT_HOST', CUSTOM_LOG_AGENT_HOST)
headers = getattr(settings, 'CUSTOM_LOG_AGENT_HEADERS', CUSTOM_LOG_AGENT_HEADERS)
timeout = getattr(settings, 'CUSTOM_LOG_AGENT_TIMEOUT', CUSTOM_LOG_AGENT_TIMEOUT)
requests.post(url, headers=headers, json=self.customLogAgent, timeout=timeout)
except requests.exceptions.RequestException as e:
print(e)
print(json.dumps(self.customLogAgent, indent=4))
return response
def process_view(self, request, view_func, view_args, view_kwargs):
self.customLogAgent['view_func'] = view_func.__name__
self.customLogAgent['view_kwargs'] = view_kwargs
self.customLogAgent['url'] = request.get_full_path()
def process_exception(self, request, exception):
ltype, lvalue, ltb = sys.exc_info()
error =
error['type'] = ltype.__name__
error['message'] = str(lvalue)
error['line'] = ltb.tb_lineno
error['traceback'] = ''.join(traceback.format_tb(ltb))
self.customLogAgent['error'] = error
return None
python logging django benchmarking
edited May 16 at 6:20
200_success
123k14143399
123k14143399
asked May 11 at 9:09
Marcos Aguayo
1506
1506
add a comment |Â
add a comment |Â
2 Answers
2
active
oldest
votes
up vote
2
down vote
I believe that performance is extremely important on websites.
I would consider any delay at all due to requests to a logging server unacceptable.
I think you should move the logging call to a background thread,
and return the response to the user as quickly as possible.
As usual, it's probably a good idea to put a cap on the maximum number of background threads. It could be even just one thread doing all the logging tasks in sequence.
Other than this, the code looks fine to me.
It's simple and not easy to pick on.
1
Agreed. Probably keep an in-memory queue of logging messages, then dedicate a separate thread to pushing those messages to the logging server.
â scnerd
May 11 at 17:04
Great feedback guys! Thanks
â Marcos Aguayo
May 12 at 5:30
add a comment |Â
up vote
1
down vote
When using Django, using Django timezone is pretty good practice. If your log agent is a custom build, put it inside the same VPC to decrease latency. Removing unused libraries is also good practice.
add a comment |Â
2 Answers
2
active
oldest
votes
2 Answers
2
active
oldest
votes
active
oldest
votes
active
oldest
votes
up vote
2
down vote
I believe that performance is extremely important on websites.
I would consider any delay at all due to requests to a logging server unacceptable.
I think you should move the logging call to a background thread,
and return the response to the user as quickly as possible.
As usual, it's probably a good idea to put a cap on the maximum number of background threads. It could be even just one thread doing all the logging tasks in sequence.
Other than this, the code looks fine to me.
It's simple and not easy to pick on.
1
Agreed. Probably keep an in-memory queue of logging messages, then dedicate a separate thread to pushing those messages to the logging server.
â scnerd
May 11 at 17:04
Great feedback guys! Thanks
â Marcos Aguayo
May 12 at 5:30
add a comment |Â
up vote
2
down vote
I believe that performance is extremely important on websites.
I would consider any delay at all due to requests to a logging server unacceptable.
I think you should move the logging call to a background thread,
and return the response to the user as quickly as possible.
As usual, it's probably a good idea to put a cap on the maximum number of background threads. It could be even just one thread doing all the logging tasks in sequence.
Other than this, the code looks fine to me.
It's simple and not easy to pick on.
1
Agreed. Probably keep an in-memory queue of logging messages, then dedicate a separate thread to pushing those messages to the logging server.
â scnerd
May 11 at 17:04
Great feedback guys! Thanks
â Marcos Aguayo
May 12 at 5:30
add a comment |Â
up vote
2
down vote
up vote
2
down vote
I believe that performance is extremely important on websites.
I would consider any delay at all due to requests to a logging server unacceptable.
I think you should move the logging call to a background thread,
and return the response to the user as quickly as possible.
As usual, it's probably a good idea to put a cap on the maximum number of background threads. It could be even just one thread doing all the logging tasks in sequence.
Other than this, the code looks fine to me.
It's simple and not easy to pick on.
I believe that performance is extremely important on websites.
I would consider any delay at all due to requests to a logging server unacceptable.
I think you should move the logging call to a background thread,
and return the response to the user as quickly as possible.
As usual, it's probably a good idea to put a cap on the maximum number of background threads. It could be even just one thread doing all the logging tasks in sequence.
Other than this, the code looks fine to me.
It's simple and not easy to pick on.
answered May 11 at 16:46
janos
95.4k12119342
95.4k12119342
1
Agreed. Probably keep an in-memory queue of logging messages, then dedicate a separate thread to pushing those messages to the logging server.
â scnerd
May 11 at 17:04
Great feedback guys! Thanks
â Marcos Aguayo
May 12 at 5:30
add a comment |Â
1
Agreed. Probably keep an in-memory queue of logging messages, then dedicate a separate thread to pushing those messages to the logging server.
â scnerd
May 11 at 17:04
Great feedback guys! Thanks
â Marcos Aguayo
May 12 at 5:30
1
1
Agreed. Probably keep an in-memory queue of logging messages, then dedicate a separate thread to pushing those messages to the logging server.
â scnerd
May 11 at 17:04
Agreed. Probably keep an in-memory queue of logging messages, then dedicate a separate thread to pushing those messages to the logging server.
â scnerd
May 11 at 17:04
Great feedback guys! Thanks
â Marcos Aguayo
May 12 at 5:30
Great feedback guys! Thanks
â Marcos Aguayo
May 12 at 5:30
add a comment |Â
up vote
1
down vote
When using Django, using Django timezone is pretty good practice. If your log agent is a custom build, put it inside the same VPC to decrease latency. Removing unused libraries is also good practice.
add a comment |Â
up vote
1
down vote
When using Django, using Django timezone is pretty good practice. If your log agent is a custom build, put it inside the same VPC to decrease latency. Removing unused libraries is also good practice.
add a comment |Â
up vote
1
down vote
up vote
1
down vote
When using Django, using Django timezone is pretty good practice. If your log agent is a custom build, put it inside the same VPC to decrease latency. Removing unused libraries is also good practice.
When using Django, using Django timezone is pretty good practice. If your log agent is a custom build, put it inside the same VPC to decrease latency. Removing unused libraries is also good practice.
edited May 16 at 1:52
Jamalâ¦
30.1k11114225
30.1k11114225
answered May 16 at 1:42
ShrAwan Poudel
212
212
add a comment |Â
add a comment |Â
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
StackExchange.ready(
function ()
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fcodereview.stackexchange.com%2fquestions%2f194182%2fmiddleware-django-statistics%23new-answer', 'question_page');
);
Post as a guest
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Sign up or log in
StackExchange.ready(function ()
StackExchange.helpers.onClickDraftSave('#login-link');
);
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Sign up using Google
Sign up using Facebook
Sign up using Email and Password