Skip to content

Improve readability by adding whitespace between code paragraphs #138090

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 23, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions Lib/functools.py
Original file line number Diff line number Diff line change
Expand Up @@ -580,12 +580,14 @@ def lru_cache(maxsize=128, typed=False):
# Negative maxsize is treated as 0
if maxsize < 0:
maxsize = 0

elif callable(maxsize) and isinstance(typed, bool):
# The user_function was passed in directly via the maxsize argument
user_function, maxsize = maxsize, 128
wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed}
return update_wrapper(wrapper, user_function)

elif maxsize is not None:
raise TypeError(
'Expected first argument to be an integer, a callable, or None')
Expand Down Expand Up @@ -617,6 +619,7 @@ def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
def wrapper(*args, **kwds):
# No caching -- just a statistics update
nonlocal misses

misses += 1
result = user_function(*args, **kwds)
return result
Expand All @@ -626,6 +629,7 @@ def wrapper(*args, **kwds):
def wrapper(*args, **kwds):
# Simple caching without ordering or size limit
nonlocal hits, misses

key = make_key(args, kwds, typed)
result = cache_get(key, sentinel)
if result is not sentinel:
Expand All @@ -641,7 +645,9 @@ def wrapper(*args, **kwds):
def wrapper(*args, **kwds):
# Size limited caching that tracks accesses by recency
nonlocal root, hits, misses, full

key = make_key(args, kwds, typed)

with lock:
link = cache_get(key)
if link is not None:
Expand All @@ -656,19 +662,23 @@ def wrapper(*args, **kwds):
hits += 1
return result
misses += 1

result = user_function(*args, **kwds)

with lock:
if key in cache:
# Getting here means that this same key was added to the
# cache while the lock was released. Since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass

elif full:
# Use the old root to store the new key and result.
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result

# Empty the oldest link and make it the new root.
# Keep a reference to the old key and old result to
# prevent their ref counts from going to zero during the
Expand All @@ -679,20 +689,25 @@ def wrapper(*args, **kwds):
oldkey = root[KEY]
oldresult = root[RESULT]
root[KEY] = root[RESULT] = None

# Now update the cache dictionary.
del cache[oldkey]

# Save the potentially reentrant cache[key] assignment
# for last, after the root and links have been put in
# a consistent state.
cache[key] = oldroot

else:
# Put result in a new link at the front of the queue.
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link

# Use the cache_len bound method instead of the len() function
# which could potentially be wrapped in an lru_cache itself.
full = (cache_len() >= maxsize)

return result

def cache_info():
Expand All @@ -703,6 +718,7 @@ def cache_info():
def cache_clear():
"""Clear the cache and cache statistics"""
nonlocal hits, misses, full

with lock:
cache.clear()
root[:] = [root, root, None, None]
Expand Down
Loading