diff --git a/my_limiter/algos/__init__.py b/my_limiter/algos/__init__.py index ffac238..36e1425 100644 --- a/my_limiter/algos/__init__.py +++ b/my_limiter/algos/__init__.py @@ -4,7 +4,8 @@ These are implementations of different (in-application) rate limiting algorithms `identifier` is used as the first (usually only) argument for each implementation because it might refer to IP address, a session ID, or perhaps an API key or token. """ -from .token_bucket_in_memory import token_bucket_in_memory_lazy_refill, TooManyRequests +from .exceptions import TooManyRequests +from .token_bucket_in_memory import token_bucket_in_memory_lazy_refill from .leaky_bucket import ( leaking_bucket_dequeue, leaking_bucket_enqueue, diff --git a/my_limiter/algos/exceptions.py b/my_limiter/algos/exceptions.py new file mode 100644 index 0000000..623dde7 --- /dev/null +++ b/my_limiter/algos/exceptions.py @@ -0,0 +1,3 @@ + +class TooManyRequests(Exception): + pass diff --git a/my_limiter/algos/leaky_bucket.py b/my_limiter/algos/leaky_bucket.py index 9fdfabc..fc4c1e4 100644 --- a/my_limiter/algos/leaky_bucket.py +++ b/my_limiter/algos/leaky_bucket.py @@ -2,13 +2,11 @@ import datetime as dt import redis +from .exceptions import TooManyRequests r = redis.Redis() -class TooManyRequests(Exception): - pass - MAX_CAPACITY = 8 STORE_NAME_PREFIX_LEAKING_BUCKET = "leaking_bucket:queue:tasks" @@ -17,7 +15,7 @@ RUN_LEAKING_BUCKET_TASKS_EVERY_X_SECONDS = 15 NUM_TASKS_TO_RUN_FOR_EACH_USER_AT_INTERVAL = 2 -def leaking_bucket_enqueue(identifier: str, data: str) -> None: +def leaking_bucket_enqueue(identifier: str, data: str = "") -> None: """ When a request arrives, the system checks if the queue for this particular `identifier` is full. If it is not full, the request is added to the queue. diff --git a/my_limiter/algos/token_bucket_in_memory.py b/my_limiter/algos/token_bucket_in_memory.py index 8e5a981..49a9cc9 100644 --- a/my_limiter/algos/token_bucket_in_memory.py +++ b/my_limiter/algos/token_bucket_in_memory.py @@ -2,13 +2,11 @@ import datetime as dt import redis +from .exceptions import TooManyRequests r = redis.Redis() -class TooManyRequests(Exception): - pass - TOKEN_BUCKET = {} MAX_CAPACITY = 8 @@ -20,7 +18,7 @@ def get_entry_from_token_bucket_in_memory(identifier: str) -> dict | None: """ This is implemented independently in order to decouple it from its caller. Here it is initially implemented in-memory, but for scalability we'd - want to use something more long-lived. + want to use something more durable. """ return TOKEN_BUCKET.get(identifier) @@ -34,7 +32,7 @@ def token_bucket_in_memory_lazy_refill(identifier: str) -> str: To be explicit, there is a token bucket for every `identifier`, aka every user/IP """ - entry = get_entry_from_token_bucket(identifier) + entry = get_entry_from_token_bucket_in_memory(identifier) if entry is None: TOKEN_BUCKET[identifier] = { diff --git a/my_limiter/wsgi.py b/my_limiter/wsgi.py index f4f7465..306a5e0 100644 --- a/my_limiter/wsgi.py +++ b/my_limiter/wsgi.py @@ -6,7 +6,7 @@ from . import algos application = f.Flask(__name__) -increment_requests_func = algos.token_bucket_in_memory_lazy_refill +increment_requests_func = algos.leaking_bucket_enqueue @application.before_request