Fusionne deux fonctions chunkify

On rajoute de l'agrégation optionnelle dans la fonction.
This commit is contained in:
Ludovic Stephan 2020-03-09 16:11:08 +01:00
parent 26bcd729bb
commit ef35f45ad2
2 changed files with 18 additions and 116 deletions

View file

@ -2,7 +2,6 @@ from datetime import date, datetime, time, timedelta
from dateutil.parser import parse as dateutil_parse
from dateutil.relativedelta import relativedelta
from django.db.models import Sum
from django.utils import timezone
KFET_WAKES_UP_AT = time(5, 0) # La K-Fêt ouvre à 5h (UTC) du matin
@ -99,97 +98,18 @@ class Scale(object):
for i, (begin, end) in enumerate(self)
]
def chunkify_qs(self, qs, field=None):
if field is None:
field = "at"
def chunkify_qs(self, qs, field="at", aggregate=None):
"""
Découpe un queryset en subdivisions, avec agrégation optionnelle des résultats
NB : on pourrait faire ça en une requête, au détriment de la lisibilité...
"""
begin_f = "{}__gte".format(field)
end_f = "{}__lte".format(field)
return [qs.filter(**{begin_f: begin, end_f: end}) for begin, end in self]
def get_by_chunks(self, qs, field_callback=None, field_db="at"):
"""Objects of queryset ranked according to the scale.
Returns a generator whose each item, corresponding to a scale chunk,
is a generator of objects from qs for this chunk.
Args:
qs: Queryset of source objects, must be ordered *first* on the
same field returned by `field_callback`.
field_callback: Callable which gives value from an object used
to compare against limits of the scale chunks.
Default to: lambda obj: getattr(obj, field_db)
field_db: Used to filter against `scale` limits.
Default to 'at'.
Examples:
If queryset `qs` use `values()`, `field_callback` must be set and
could be: `lambda d: d['at']`
If `field_db` use foreign attributes (eg with `__`), it should be
something like: `lambda obj: obj.group.at`.
"""
if field_callback is None:
def field_callback(obj):
return getattr(obj, field_db)
begin_f = "{}__gte".format(field_db)
end_f = "{}__lte".format(field_db)
qs = qs.filter(**{begin_f: self.begin, end_f: self.end})
obj_iter = iter(qs)
last_obj = None
def _objects_until(obj_iter, field_callback, end):
"""Generator of objects until `end`.
Ends if objects source is empty or when an object not verifying
field_callback(obj) <= end is met.
If this object exists, it is stored in `last_obj` which is found
from outer scope.
Also, if this same variable is non-empty when the function is
called, it first yields its content.
Args:
obj_iter: Source used to get objects.
field_callback: Returned value, when it is called on an object
will be used to test ordering against `end`.
end
"""
nonlocal last_obj
if last_obj is not None:
yield last_obj
last_obj = None
for obj in obj_iter:
if field_callback(obj) <= end:
yield obj
else:
last_obj = obj
return
for begin, end in self:
# forward last seen object, if it exists, to the right chunk,
# and fill with empty generators for intermediate chunks of scale
if last_obj is not None:
if field_callback(last_obj) > end:
yield iter(())
continue
# yields generator for this chunk
# this set last_obj to None if obj_iter reach its end, otherwise
# it's set to the first met object from obj_iter which doesn't
# belong to this chunk
yield _objects_until(obj_iter, field_callback, end)
chunks = [qs.filter(**{begin_f: begin, end_f: end}) for begin, end in self]
if aggregate is None:
return chunks
else:
return [chunk.aggregate(agg=aggregate)["agg"] or 0 for chunk in chunks]
class DayScale(Scale):

View file

@ -2465,7 +2465,7 @@ class AccountStatOperation(UserAccountMixin, ScaleMixin, JSONDetailView):
context_object_name = "account"
id_prefix = ""
def get_operations(self, scale, types=None):
def get_operations(self, types=None):
# On selectionne les opérations qui correspondent
# à l'article en question et qui ne sont pas annulées
# puis on choisi pour chaques intervalle les opérations
@ -2477,28 +2477,20 @@ class AccountStatOperation(UserAccountMixin, ScaleMixin, JSONDetailView):
)
if types is not None:
all_operations = all_operations.filter(type__in=types)
chunks = scale.get_by_chunks(
all_operations,
field_db="group__at",
field_callback=(lambda d: d["group__at"]),
)
return chunks
return all_operations
def get_context_data(self, *args, **kwargs):
old_ctx = super().get_context_data(*args, **kwargs)
context = {"labels": old_ctx["labels"]}
scale = self.scale
context = super().get_context_data(*args, **kwargs)
types = self.request.GET.get("types", None)
if types is not None:
types = ast.literal_eval(types)
operations = self.get_operations(types=types, scale=scale)
operations = self.get_operations(types=types)
# On compte les opérations
nb_ventes = []
for chunk in operations:
ventes = sum(ope["article_nb"] for ope in chunk)
nb_ventes.append(ventes)
nb_ventes = self.scale.chunkify_qs(
operations, field="group__at", aggregate=Sum("article_nb")
)
context["charts"] = [
{
@ -2558,23 +2550,13 @@ class ArticleStatSales(ScaleMixin, JSONDetailView):
liq_only = all_purchases.filter(group__on_acc__trigramme="LIQ")
liq_exclude = all_purchases.exclude(group__on_acc__trigramme="LIQ")
chunks_liq = scale.get_by_chunks(
liq_only, field_db="group__at", field_callback=lambda d: d["group__at"]
nb_liq = scale.chunkify_qs(
liq_only, field="group__at", aggregate=Sum("article_nb")
)
chunks_no_liq = scale.get_by_chunks(
liq_exclude, field_db="group__at", field_callback=lambda d: d["group__at"]
nb_accounts = scale.chunkify_qs(
liq_exclude, field="group__at", aggregate=Sum("article_nb")
)
# On compte les opérations
nb_ventes = []
nb_accounts = []
nb_liq = []
for chunk_liq, chunk_no_liq in zip(chunks_liq, chunks_no_liq):
sum_accounts = sum(ope["article_nb"] for ope in chunk_no_liq)
sum_liq = sum(ope["article_nb"] for ope in chunk_liq)
nb_ventes.append(sum_accounts + sum_liq)
nb_accounts.append(sum_accounts)
nb_liq.append(sum_liq)
nb_ventes = [n1 + n2 for n1, n2 in zip(nb_liq, nb_accounts)]
context["charts"] = [
{