pax_global_header 0000666 0000000 0000000 00000000064 14775671154 0014533 g ustar 00root root 0000000 0000000 52 comment=6f8db9763bf6ff100287d9c920159938ce534f9d
drf-extensions-0.8.0/ 0000775 0000000 0000000 00000000000 14775671154 0014510 5 ustar 00root root 0000000 0000000 drf-extensions-0.8.0/.github/ 0000775 0000000 0000000 00000000000 14775671154 0016050 5 ustar 00root root 0000000 0000000 drf-extensions-0.8.0/.github/workflows/ 0000775 0000000 0000000 00000000000 14775671154 0020105 5 ustar 00root root 0000000 0000000 drf-extensions-0.8.0/.github/workflows/ci.yml 0000664 0000000 0000000 00000001331 14775671154 0021221 0 ustar 00root root 0000000 0000000 name: Django CI
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
strategy:
max-parallel: 4
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
django-version: ["2.2","3.2", "4.2", "5.2"]
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install Dependencies
run: |
python -m pip install --upgrade pip tox
pip install -r tests_app/requirements.txt
- name: Run Tests
run: |
tox -- tests_app
drf-extensions-0.8.0/.github/workflows/codeql-analysis.yml 0000664 0000000 0000000 00000004374 14775671154 0023730 0 ustar 00root root 0000000 0000000 # For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ master ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://git.io/codeql-language-support
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# âšī¸ Command-line programs to run using the OS shell.
# đ https://git.io/JvXDl
# âī¸ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
drf-extensions-0.8.0/.gitignore 0000664 0000000 0000000 00000000143 14775671154 0016476 0 ustar 00root root 0000000 0000000 __pycache__/
*.pyc
*.egg-info
.tox
*.egg
.idea
env
build
dist
.DS_Store
venv
tests_app/tests/files
drf-extensions-0.8.0/.travis.yml 0000664 0000000 0000000 00000000260 14775671154 0016617 0 ustar 00root root 0000000 0000000 language: python
cache: pip
dist: bionic
sudo: false
arch:
- amd64
- ppc64le
python:
- 3.6
- 3.7
- 3.8
install:
- pip install tox tox-travis
script:
- tox -r
drf-extensions-0.8.0/AUTHORS.md 0000664 0000000 0000000 00000000346 14775671154 0016162 0 ustar 00root root 0000000 0000000 ## Original Author
---------------
Gennady Chibisov https://github.com/chibisov
## Core maintainer
Asif Saif Uddin https://github.com/auvipy
## Contributors
------------
Luke Murphy https://github.com/lwm
drf-extensions-0.8.0/GNUmakefile 0000664 0000000 0000000 00000000365 14775671154 0016566 0 ustar 00root root 0000000 0000000 build_docs:
PYTHONIOENCODING=utf-8 python docs/backdoc.py --title "Django Rest Framework extensions documentation" < docs/index.md > docs/index.html
watch_docs:
make build_docs
watchmedo shell-command -p "*.md" -R -c "make build_docs" docs/
drf-extensions-0.8.0/LICENSE 0000664 0000000 0000000 00000002074 14775671154 0015520 0 ustar 00root root 0000000 0000000 The MIT License (MIT)
Copyright (c) 2013 Gennady Chibisov.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
drf-extensions-0.8.0/MANIFEST.in 0000664 0000000 0000000 00000000306 14775671154 0016245 0 ustar 00root root 0000000 0000000 include LICENSE
include README.md
include tox.ini
recursive-include docs *.md *.html *.txt *.py
recursive-include tests_app requirements.txt *.py
recursive-exclude * __pycache__
global-exclude *pyc
drf-extensions-0.8.0/README.md 0000664 0000000 0000000 00000014124 14775671154 0015771 0 ustar 00root root 0000000 0000000 ## Django REST Framework extensions
DRF-extensions is a collection of custom extensions for [Django REST Framework](https://github.com/tomchristie/django-rest-framework)
Full documentation for project is available at [http://chibisov.github.io/drf-extensions/docs](http://chibisov.github.io/drf-extensions/docs)
[](#backers) [](#sponsors) [](https://pypi.python.org/pypi/drf-extensions)
### Sponsor
[Tidelift gives software development teams a single source for purchasing and maintaining their software, with professional grade assurances from the experts who know it best, while seamlessly integrating with existing tools.](https://tidelift.com/subscription/pkg/pypi-drf-extensions?utm_source=pypi-drf-extensions&utm_medium=referral&utm_campaign=readme)
## Requirements
* Tested for Python 3.8, 3.9, 3.10, 3.11 and 3.12
* Tested for Django Rest Framework 3.12, 3.13, 3.14 and 3.15
* Tested for Django 2.2 to 5.2
* Tested for django-filter 2.1.0+
## Installation:
pip3 install drf-extensions
or from github
pip3 install https://github.com/chibisov/drf-extensions/archive/master.zip
## Some features
* DetailSerializerMixin
* Caching
* Conditional requests
* Customizable key construction for caching and conditional requests
* Nested routes
* Bulk operations
Read more in [documentation](http://chibisov.github.io/drf-extensions/docs)
## Development
Running the tests:
$ pip3 install tox
$ tox -- tests_app
Running test for exact environment:
$ tox -e py38 -- tests_app
Recreate envs before running tests:
$ tox --recreate -- tests_app
Pass custom arguments:
$ tox -- tests_app --verbosity=3
Run with pdb support:
$ tox -- tests_app --processes=0 --nocapture
Run exact TestCase:
$ tox -- tests_app.tests.unit.mixins.tests:DetailSerializerMixinTest_serializer_detail_class
Run tests from exact module:
$ tox -- tests_app.tests.unit.mixins.tests
Build docs:
$ make build_docs
Automatically build docs by watching changes:
$ pip install watchdog
$ make watch_docs
## Developing new features
Every new feature should be:
* Documented
* Tested
* Implemented
* Pushed to main repository
### How to write documentation
When new feature implementation starts you should place it into `development version` pull. Add `Development version`
section to `Release notes` and describe every new feature in it. Use `#anchors` to facilitate navigation.
Every feature should have title and information that it was implemented in current development version.
For example if we've just implemented `Usage of the specific cache`:
...
#### Usage of the specific cache
*New in DRF-extensions development version*
`@cache_response` can also take...
...
### Release notes
...
#### Development version
* Added ability to [use a specific cache](#usage-of-the-specific-cache) for `@cache_response` decorator
## Publishing new releases
Increment version in `rest_framework_extensions/__init__.py`. For example:
__version__ = '0.2.2' # from 0.2.1
Move to new version section all release notes in documentation.
Add date for release note section.
Replace in documentation all `New in DRF-extensions development version` notes to `New in DRF-extensions 0.2.2`.
Rebuild documentation.
Run tests.
Commit changes with message "Version 0.2.2"
Add new tag version for commit:
$ git tag 0.2.2
Push to master with tags:
$ git push origin master --tags
Don't forget to merge `master` to `gh-pages` branch and push to origin:
$ git co gh-pages
$ git merge --no-ff master
$ git push origin gh-pages
Publish to pypi:
$ python setup.py publish
## Contributors
This project exists thanks to all the people who contribute.
## Backers
Thank you to all our backers! đ [[Become a backer](https://opencollective.com/drf-extensions#backer)]
## Sponsors
Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/drf-extensions#sponsor)]
drf-extensions-0.8.0/SECURITY.md 0000664 0000000 0000000 00000000452 14775671154 0016302 0 ustar 00root root 0000000 0000000 # Security Policy
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
| 0.7.x | :white_check_mark: |
| 0.6.x | :x: |
| < 0.7 | :x: |
## Reporting a Vulnerability
Please report Vulnerability to auvipy@gmail.com via email.
drf-extensions-0.8.0/docs/ 0000775 0000000 0000000 00000000000 14775671154 0015440 5 ustar 00root root 0000000 0000000 drf-extensions-0.8.0/docs/backdoc.py 0000664 0000000 0000000 00000616015 14775671154 0017411 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
"""
Backdoc is a tool for backbone-like documentation generation.
Backdoc main goal is to help to generate one page documentation from one markdown source file.
https://github.com/chibisov/backdoc
"""
import sys
import argparse
# Copyright (c) 2012 Trent Mick.
# Copyright (c) 2007-2008 ActiveState Corp.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
r"""A fast and complete Python implementation of Markdown.
[from http://daringfireball.net/projects/markdown/]
> Markdown is a text-to-HTML filter; it translates an easy-to-read /
> easy-to-write structured text format into HTML. Markdown's text
> format is most similar to that of plain text email, and supports
> features such as headers, *emphasis*, code blocks, blockquotes, and
> links.
>
> Markdown's syntax is designed not as a generic markup language, but
> specifically to serve as a front-end to (X)HTML. You can use span-level
> HTML tags anywhere in a Markdown document, and you can use block level
> HTML tags (like
tags.
"""
yield 0, ""
for tup in inner:
yield tup
yield 0, ""
def wrap(self, source, outfile):
"""Return the source with a code, pre, and div."""
return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
formatter_opts.setdefault("cssclass", "codehilite")
formatter = HtmlCodeFormatter(**formatter_opts)
return pygments.highlight(codeblock, lexer, formatter)
def _code_block_sub(self, match, is_fenced_code_block=False):
lexer_name = None
if is_fenced_code_block:
lexer_name = match.group(1)
if lexer_name:
formatter_opts = self.extras['fenced-code-blocks'] or {}
codeblock = match.group(2)
codeblock = codeblock[:-1] # drop one trailing newline
else:
codeblock = match.group(1)
codeblock = self._outdent(codeblock)
codeblock = self._detab(codeblock)
codeblock = codeblock.lstrip('\n') # trim leading newlines
codeblock = codeblock.rstrip() # trim trailing whitespace
# Note: "code-color" extra is DEPRECATED.
if "code-color" in self.extras and codeblock.startswith(":::"):
lexer_name, rest = codeblock.split('\n', 1)
lexer_name = lexer_name[3:].strip()
codeblock = rest.lstrip("\n") # Remove lexer declaration line.
formatter_opts = self.extras['code-color'] or {}
if lexer_name:
lexer = self._get_pygments_lexer(lexer_name)
if lexer:
colored = self._color_with_pygments(codeblock, lexer,
**formatter_opts)
return "\n\n%s\n\n" % colored
codeblock = self._encode_code(codeblock)
pre_class_str = self._html_class_str_from_tag("pre")
code_class_str = self._html_class_str_from_tag("code")
return "\n\n%s\n
\n\n" % (
pre_class_str, code_class_str, codeblock)
def _html_class_str_from_tag(self, tag):
"""Get the appropriate ' class="..."' string (note the leading
space), if any, for the given tag.
"""
if "html-classes" not in self.extras:
return ""
try:
html_classes_from_tag = self.extras["html-classes"]
except TypeError:
return ""
else:
if tag in html_classes_from_tag:
return ' class="%s"' % html_classes_from_tag[tag]
return ""
def _do_code_blocks(self, text):
"""Process Markdown `` blocks."""
code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
( # $1 = the code block -- one or more lines, starting with a space/tab
(?:
(?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
.*\n+
)+
)
((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
''' % (self.tab_width, self.tab_width),
re.M | re.X)
return code_block_re.sub(self._code_block_sub, text)
_fenced_code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
^```([\w+-]+)?[ \t]*\n # opening fence, $1 = optional lang
(.*?) # $2 = code block content
^```[ \t]*\n # closing fence
''', re.M | re.X | re.S)
def _fenced_code_block_sub(self, match):
return self._code_block_sub(match, is_fenced_code_block=True);
def _do_fenced_code_blocks(self, text):
"""Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
# Rules for a code span:
# - backslash escapes are not interpreted in a code span
# - to include one or or a run of more backticks the delimiters must
# be a longer run of backticks
# - cannot start or end a code span with a backtick; pad with a
# space and that space will be removed in the emitted HTML
# See `test/tm-cases/escapes.text` for a number of edge-case
# examples.
_code_span_re = re.compile(r'''
(?%s" % c
def _do_code_spans(self, text):
# * Backtick quotes are used for spans.
#
# * You can use multiple backticks as the delimiters if you want to
# include literal backticks in the code span. So, this input:
#
# Just type ``foo `bar` baz`` at the prompt.
#
# Will translate to:
#
# Just type foo `bar` baz at the prompt.
#
# There's no arbitrary limit to the number of backticks you
# can use as delimiter. If you need three consecutive backticks
# in your code, use four for delimiters, etc.
#
# * You can use spaces to get literal backticks at the edges:
#
# ... type `` `bar` `` ...
#
# Turns to:
#
# ... type `bar` ...
return self._code_span_re.sub(self._code_span_sub, text)
def _encode_code(self, text):
"""Encode/escape certain characters inside Markdown code runs.
The point is that in code, these characters are literals,
and lose their special Markdown meanings.
"""
replacements = [
# Encode all ampersands; HTML entities are not
# entities within a Markdown code span.
('&', '&'),
# Do the angle bracket song and dance:
('<', '<'),
('>', '>'),
]
for before, after in replacements:
text = text.replace(before, after)
hashed = _hash_text(text)
self._escape_table[text] = hashed
return hashed
_strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
_em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
_code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
_code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
def _do_italics_and_bold(self, text):
# must go first:
if "code-friendly" in self.extras:
text = self._code_friendly_strong_re.sub(r"\1", text)
text = self._code_friendly_em_re.sub(r"\1", text)
else:
text = self._strong_re.sub(r"\2", text)
text = self._em_re.sub(r"\2", text)
return text
# "smarty-pants" extra: Very liberal in interpreting a single prime as an
# apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
# "twixt" can be written without an initial apostrophe. This is fine because
# using scare quotes (single quotation marks) is rare.
_apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
_contractions = ["tis", "twas", "twer", "neath", "o", "n",
"round", "bout", "twixt", "nuff", "fraid", "sup"]
def _do_smart_contractions(self, text):
text = self._apostrophe_year_re.sub(r"’\1", text)
for c in self._contractions:
text = text.replace("'%s" % c, "’%s" % c)
text = text.replace("'%s" % c.capitalize(),
"’%s" % c.capitalize())
return text
# Substitute double-quotes before single-quotes.
_opening_single_quote_re = re.compile(r"(?
See "test/tm-cases/smarty_pants.text" for a full discussion of the
support here and
for a
discussion of some diversion from the original SmartyPants.
"""
if "'" in text: # guard for perf
text = self._do_smart_contractions(text)
text = self._opening_single_quote_re.sub("‘", text)
text = self._closing_single_quote_re.sub("’", text)
if '"' in text: # guard for perf
text = self._opening_double_quote_re.sub("“", text)
text = self._closing_double_quote_re.sub("”", text)
text = text.replace("---", "—")
text = text.replace("--", "–")
text = text.replace("...", "…")
text = text.replace(" . . . ", "…")
text = text.replace(". . .", "…")
return text
_block_quote_re = re.compile(r'''
( # Wrap whole match in \1
(
^[ \t]*>[ \t]? # '>' at the start of a line
.+\n # rest of the first line
(.+\n)* # subsequent consecutive lines
\n* # blanks
)+
)
''', re.M | re.X)
_bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
_html_pre_block_re = re.compile(r'(\s*.+?
)', re.S)
def _dedent_two_spaces_sub(self, match):
return re.sub(r'(?m)^ ', '', match.group(1))
def _block_quote_sub(self, match):
bq = match.group(1)
bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting
bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines
bq = self._run_block_gamut(bq) # recurse
bq = re.sub('(?m)^', ' ', bq)
# These leading spaces screw with content, so we need to fix that:
bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
return "\n%s\n
\n\n" % bq
def _do_block_quotes(self, text):
if '>' not in text:
return text
return self._block_quote_re.sub(self._block_quote_sub, text)
def _form_paragraphs(self, text):
# Strip leading and trailing lines:
text = text.strip('\n')
# Wrap tags.
grafs = []
for i, graf in enumerate(re.split(r"\n{2,}", text)):
if graf in self.html_blocks:
# Unhashify HTML blocks
grafs.append(self.html_blocks[graf])
else:
cuddled_list = None
if "cuddled-lists" in self.extras:
# Need to put back trailing '\n' for `_list_item_re`
# match at the end of the paragraph.
li = self._list_item_re.search(graf + '\n')
# Two of the same list marker in this paragraph: a likely
# candidate for a list cuddled to preceding paragraph
# text (issue 33). Note the `[-1]` is a quick way to
# consider numeric bullets (e.g. "1." and "2.") to be
# equal.
if (li and len(li.group(2)) <= 3 and li.group("next_marker")
and li.group("marker")[-1] == li.group("next_marker")[-1]):
start = li.start()
cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
assert cuddled_list.startswith("
") or cuddled_list.startswith("")
graf = graf[:start]
# Wrap tags.
graf = self._run_span_gamut(graf)
grafs.append("
" + graf.lstrip(" \t") + "
")
if cuddled_list:
grafs.append(cuddled_list)
return "\n\n".join(grafs)
def _add_footnotes(self, text):
if self.footnotes:
footer = [
'',
'
',
]
for i, id in enumerate(self.footnote_ids):
if i != 0:
footer.append('')
footer.append('- ' % id)
footer.append(self._run_block_gamut(self.footnotes[id]))
backlink = (''
'↩' % (id, i+1))
if footer[-1].endswith(""):
footer[-1] = footer[-1][:-len("")] \
+ ' ' + backlink + ""
else:
footer.append("\n
%s
" % backlink)
footer.append(' ')
footer.append('')
footer.append('')
return text + '\n\n' + '\n'.join(footer)
else:
return text
# Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
# http://bumppo.net/projects/amputator/
_ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
_naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
_naked_gt_re = re.compile(r'''(?''', re.I)
def _encode_amps_and_angles(self, text):
# Smart processing for ampersands and angle brackets that need
# to be encoded.
text = self._ampersand_re.sub('&', text)
# Encode naked <'s
text = self._naked_lt_re.sub('<', text)
# Encode naked >'s
# Note: Other markdown implementations (e.g. Markdown.pl, PHP
# Markdown) don't do this.
text = self._naked_gt_re.sub('>', text)
return text
def _encode_backslash_escapes(self, text):
for ch, escape in list(self._escape_table.items()):
text = text.replace("\\"+ch, escape)
return text
_auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
def _auto_link_sub(self, match):
g1 = match.group(1)
return '%s' % (g1, g1)
_auto_email_link_re = re.compile(r"""
<
(?:mailto:)?
(
[-.\w]+
\@
[-\w]+(\.[-\w]+)*\.[a-z]+
)
>
""", re.I | re.X | re.U)
def _auto_email_link_sub(self, match):
return self._encode_email_address(
self._unescape_special_chars(match.group(1)))
def _do_auto_links(self, text):
text = self._auto_link_re.sub(self._auto_link_sub, text)
text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
return text
def _encode_email_address(self, addr):
# Input: an email address, e.g. "foo@example.com"
#
# Output: the email address as a mailto link, with each character
# of the address encoded as either a decimal or hex entity, in
# the hopes of foiling most address harvesting spam bots. E.g.:
#
# foo
# @example.com
#
# Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
# mailing list:
chars = [_xml_encode_email_char_at_random(ch)
for ch in "mailto:" + addr]
# Strip the mailto: from the visible part.
addr = '%s' \
% (''.join(chars), ''.join(chars[7:]))
return addr
def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for regex, repl in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, "__call__"):
href = repl(match)
else:
href = match.expand(repl)
replacements.append((match.span(), href))
for (start, end), href in reversed(replacements):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown and :
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
link = '%s' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
for hash, link in list(link_from_hash.items()):
text = text.replace(hash, link)
return text
def _unescape_special_chars(self, text):
# Swap back in all the special characters we've hidden.
for ch, hash in list(self._escape_table.items()):
text = text.replace(hash, ch)
return text
def _outdent(self, text):
# Remove one level of line-leading tabs or spaces
return self._outdent_re.sub('', text)
class MarkdownWithExtras(Markdown):
"""A markdowner class that enables most extras:
- footnotes
- code-color (only has effect if 'pygments' Python module on path)
These are not included:
- pyshell (specific to Python-related documenting)
- code-friendly (because it *disables* part of the syntax)
- link-patterns (because you need to specify some actual
link-patterns anyway)
"""
extras = ["footnotes", "code-color"]
#---- internal support functions
class UnicodeWithAttrs(unicode):
"""A subclass of unicode used for the return value of conversion to
possibly attach some attributes. E.g. the "toc_html" attribute when
the "toc" extra is used.
"""
metadata = None
_toc = None
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += ""
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith(""):
lines[-1] += ""
lines.append("%s
" % indent())
lines.append('%s- %s' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("
"):
lines[-1] += ""
lines.append("%s
" % indent())
return '\n'.join(lines) + '\n'
toc_html = property(toc_html)
## {{{ http://code.activestate.com/recipes/577257/ (r1)
import re
char_map = {u'Ã': 'A', u'Ã': 'A', u'Ã': 'A', u'Ã': 'A', u'Ã': 'Ae', u'Ã
': 'A', u'Ã': 'A', u'Ä': 'A', u'Ä': 'A', u'Ä': 'A', u'Ã': 'C', u'Ä': 'C', u'Ä': 'C', u'Ä': 'C', u'Ä': 'C', u'Ä': 'D', u'Ä': 'D', u'Ã': 'E', u'Ã': 'E', u'Ã': 'E', u'Ã': 'E', u'Ä': 'E', u'Ä': 'E', u'Ä': 'E', u'Ä': 'E', u'Ä': 'E', u'Ä': 'G', u'Ä': 'G', u'Ä ': 'G', u'Äĸ': 'G', u'Ĥ': 'H', u'ÄĻ': 'H', u'Ã': 'I', u'Ã': 'I', u'Ã': 'I', u'Ã': 'I', u'ÄĒ': 'I', u'Ĩ': 'I', u'ÄŦ': 'I', u'ÄŽ': 'I', u'İ': 'I', u'IJ': 'IJ', u'Ä´': 'J', u'Äļ': 'K', u'ÄŊ': 'K', u'Äš': 'K', u'Äģ': 'K', u'Äŋ': 'K', u'Å': 'L', u'Ã': 'N', u'Å': 'N', u'Å': 'N', u'Å
': 'N', u'Å': 'N', u'Ã': 'O', u'Ã': 'O', u'Ã': 'O', u'Ã': 'O', u'Ã': 'Oe', u'Ã': 'O', u'Å': 'O', u'Å': 'O', u'Å': 'O', u'Å': 'OE', u'Å': 'R', u'Å': 'R', u'Å': 'R', u'Å': 'S', u'Å': 'S', u'Å': 'S', u'Č': 'S', u'Å ': 'S', u'Ť': 'T', u'Åĸ': 'T', u'ÅĻ': 'T', u'Č': 'T', u'Ã': 'U', u'Ã': 'U', u'Ã': 'U', u'Ã': 'Ue', u'ÅĒ': 'U', u'ÅŽ': 'U', u'Ű': 'U', u'ÅŦ': 'U', u'Ũ': 'U', u'Ş': 'U', u'Å´': 'W', u'Åļ': 'Y', u'Ÿ': 'Y', u'Ã': 'Y', u'Åš': 'Z', u'Åģ': 'Z', u'ÅŊ': 'Z', u'à ': 'a', u'ÃĄ': 'a', u'Ãĸ': 'a', u'ÃŖ': 'a', u'ä': 'ae', u'Ä': 'a', u'Ä
': 'a', u'Ä': 'a', u'ÃĨ': 'a', u'ÃĻ': 'ae', u'ç': 'c', u'Ä': 'c', u'Ä': 'c', u'Ä': 'c', u'Ä': 'c', u'Ä': 'd', u'Ä': 'd', u'è': 'e', u'Ê': 'e', u'ÃĒ': 'e', u'ÃĢ': 'e', u'Ä': 'e', u'Ä': 'e', u'Ä': 'e', u'Ä': 'e', u'Ä': 'e', u'Æ': 'f', u'Ä': 'g', u'Ä': 'g', u'ÄĄ': 'g', u'ÄŖ': 'g', u'ÄĨ': 'h', u'ħ': 'h', u'ÃŦ': 'i', u'Ã': 'i', u'ÃŽ': 'i', u'ï': 'i', u'ÄĢ': 'i', u'ÄŠ': 'i', u'Ä': 'i', u'į': 'i', u'Äą': 'i', u'Äŗ': 'ij', u'Äĩ': 'j', u'ġ': 'k', u'ĸ': 'k', u'Å': 'l', u'Äž': 'l', u'Äē': 'l', u'Äŧ': 'l', u'Å': 'l', u'Ãą': 'n', u'Å': 'n', u'Å': 'n', u'Å': 'n', u'Å': 'n', u'Å': 'n', u'Ã˛': 'o', u'Ãŗ': 'o', u'ô': 'o', u'Ãĩ': 'o', u'Ãļ': 'oe', u'ø': 'o', u'Å': 'o', u'Å': 'o', u'Å': 'o', u'Å': 'oe', u'Å': 'r', u'Å': 'r', u'Å': 'r', u'Å': 's', u'ÅĄ': 's', u'ÅĨ': 't', u'Ú': 'u', u'Ãē': 'u', u'Ãģ': 'u', u'Ãŧ': 'ue', u'ÅĢ': 'u', u'ů': 'u', u'Åą': 'u', u'Å': 'u', u'ÅŠ': 'u', u'Åŗ': 'u', u'Åĩ': 'w', u'Ãŋ': 'y', u'ÃŊ': 'y', u'Ŏ': 'y', u'Åŧ': 'z', u'Åē': 'z', u'Åž': 'z', u'Ã': 'ss', u'Åŋ': 'ss', u'Î': 'A', u'Î': 'A', u'áŧ': 'A', u'áŧ': 'A', u'áŧ': 'A', u'áŧ': 'A', u'áŧ': 'A', u'áŧ': 'A', u'áŧ': 'A', u'áŧ': 'A', u'áž': 'A', u'áž': 'A', u'áž': 'A', u'áž': 'A', u'áž': 'A', u'áž': 'A', u'áž': 'A', u'áž': 'A', u'ី': 'A', u'ážš': 'A', u'ážē': 'A', u'Î': 'A', u'ážŧ': 'A', u'Î': 'B', u'Î': 'G', u'Î': 'D', u'Î': 'E', u'Î': 'E', u'áŧ': 'E', u'áŧ': 'E', u'áŧ': 'E', u'áŧ': 'E', u'áŧ': 'E', u'áŧ': 'E', u'Î': 'E', u'áŋ': 'E', u'Î': 'Z', u'Î': 'I', u'Î': 'I', u'áŧ¨': 'I', u'áŧŠ': 'I', u'áŧĒ': 'I', u'áŧĢ': 'I', u'áŧŦ': 'I', u'áŧ': 'I', u'áŧŽ': 'I', u'áŧ¯': 'I', u'áž': 'I', u'áž': 'I', u'áž': 'I', u'áž': 'I', u'áž': 'I', u'áž': 'I', u'áž': 'I', u'áž': 'I', u'áŋ': 'I', u'Î': 'I', u'áŋ': 'I', u'Î': 'TH', u'Î': 'I', u'Î': 'I', u'ÎĒ': 'I', u'áŧ¸': 'I', u'áŧš': 'I', u'áŧē': 'I', u'áŧģ': 'I', u'áŧŧ': 'I', u'áŧŊ': 'I', u'áŧž': 'I', u'áŧŋ': 'I', u'áŋ': 'I', u'áŋ': 'I', u'áŋ': 'I', u'Î': 'I', u'Î': 'K', u'Î': 'L', u'Î': 'M', u'Î': 'N', u'Î': 'KS', u'Î': 'O', u'Î': 'O', u'áŊ': 'O', u'áŊ': 'O', u'áŊ': 'O', u'áŊ': 'O', u'áŊ': 'O', u'áŊ': 'O', u'áŋ¸': 'O', u'Î': 'O', u'Î ': 'P', u'ÎĄ': 'R', u'áŋŦ': 'R', u'ÎŖ': 'S', u'Τ': 'T', u'ÎĨ': 'Y', u'Î': 'Y', u'ÎĢ': 'Y', u'áŊ': 'Y', u'áŊ': 'Y', u'áŊ': 'Y', u'áŊ': 'Y', u'áŋ¨': 'Y', u'áŋŠ': 'Y', u'áŋĒ': 'Y', u'Î': 'Y', u'ÎĻ': 'F', u'Χ': 'X', u'Ψ': 'PS', u'Ί': 'O', u'Î': 'O', u'áŊ¨': 'O', u'áŊŠ': 'O', u'áŊĒ': 'O', u'áŊĢ': 'O', u'áŊŦ': 'O', u'áŊ': 'O', u'áŊŽ': 'O', u'áŊ¯': 'O', u'ឨ': 'O', u'ដ': 'O', u'ážĒ': 'O', u'ážĢ': 'O', u'ážŦ': 'O', u'áž': 'O', u'ណ': 'O', u'ឯ': 'O', u'áŋē': 'O', u'Î': 'O', u'áŋŧ': 'O', u'Îą': 'a', u'ÎŦ': 'a', u'áŧ': 'a', u'áŧ': 'a', u'áŧ': 'a', u'áŧ': 'a', u'áŧ': 'a', u'áŧ
': 'a', u'áŧ': 'a', u'áŧ': 'a', u'áž': 'a', u'áž': 'a', u'áž': 'a', u'áž': 'a', u'áž': 'a', u'áž
': 'a', u'áž': 'a', u'áž': 'a', u'áŊ°': 'a', u'ÎŦ': 'a', u'áž°': 'a', u'ážą': 'a', u'ឲ': 'a', u'ážŗ': 'a', u'áž´': 'a', u'ážļ': 'a', u'ឡ': 'a', u'β': 'b', u'Îŗ': 'g', u'δ': 'd', u'Îĩ': 'e', u'Î': 'e', u'áŧ': 'e', u'áŧ': 'e', u'áŧ': 'e', u'áŧ': 'e', u'áŧ': 'e', u'áŧ': 'e', u'áŊ˛': 'e', u'Î': 'e', u'Îļ': 'z', u'Ρ': 'i', u'ÎŽ': 'i', u'áŧ ': 'i', u'áŧĄ': 'i', u'áŧĸ': 'i', u'áŧŖ': 'i', u'áŧ¤': 'i', u'áŧĨ': 'i', u'áŧĻ': 'i', u'áŧ§': 'i', u'áž': 'i', u'áž': 'i', u'áž': 'i', u'áž': 'i', u'áž': 'i', u'áž': 'i', u'áž': 'i', u'áž': 'i', u'áŊ´': 'i', u'ÎŽ': 'i', u'áŋ': 'i', u'áŋ': 'i', u'áŋ': 'i', u'áŋ': 'i', u'áŋ': 'i', u'θ': 'th', u'Κ': 'i', u'ί': 'i', u'Ī': 'i', u'Î': 'i', u'áŧ°': 'i', u'áŧą': 'i', u'áŧ˛': 'i', u'áŧŗ': 'i', u'áŧ´': 'i', u'áŧĩ': 'i', u'áŧļ': 'i', u'áŧˇ': 'i', u'áŊļ': 'i', u'ί': 'i', u'áŋ': 'i', u'áŋ': 'i', u'áŋ': 'i', u'Î': 'i', u'áŋ': 'i', u'áŋ': 'i', u'Îē': 'k', u'Îģ': 'l', u'Îŧ': 'm', u'ÎŊ': 'n', u'Ξ': 'ks', u'Îŋ': 'o', u'Ī': 'o', u'áŊ': 'o', u'áŊ': 'o', u'áŊ': 'o', u'áŊ': 'o', u'áŊ': 'o', u'áŊ
': 'o', u'áŊ¸': 'o', u'Ī': 'o', u'Ī': 'p', u'Ī': 'r', u'áŋ¤': 'r', u'áŋĨ': 'r', u'Ī': 's', u'Ī': 's', u'Ī': 't', u'Ī
': 'y', u'Ī': 'y', u'Ī': 'y', u'ΰ': 'y', u'áŊ': 'y', u'áŊ': 'y', u'áŊ': 'y', u'áŊ': 'y', u'áŊ': 'y', u'áŊ': 'y', u'áŊ': 'y', u'áŊ': 'y', u'áŊē': 'y', u'Ī': 'y', u'áŋ ': 'y', u'áŋĄ': 'y', u'áŋĸ': 'y', u'ΰ': 'y', u'áŋĻ': 'y', u'áŋ§': 'y', u'Ī': 'f', u'Ī': 'x', u'Ī': 'ps', u'Ī': 'o', u'Ī': 'o', u'áŊ ': 'o', u'áŊĄ': 'o', u'áŊĸ': 'o', u'áŊŖ': 'o', u'áŊ¤': 'o', u'áŊĨ': 'o', u'áŊĻ': 'o', u'áŊ§': 'o', u'áž ': 'o', u'ឥ': 'o', u'ážĸ': 'o', u'ážŖ': 'o', u'ឤ': 'o', u'ážĨ': 'o', u'ážĻ': 'o', u'áž§': 'o', u'áŊŧ': 'o', u'Ī': 'o', u'áŋ˛': 'o', u'áŋŗ': 'o', u'áŋ´': 'o', u'áŋļ': 'o', u'áŋˇ': 'o', u'¨': '', u'Î
': '', u'ážŋ': '', u'áŋž': '', u'áŋ': '', u'áŋ': '', u'áŋ': '', u'áŋ': '', u'áŋ': '', u'áŋ': '', u'áŋ': '', u'áŋ': '', u'Î': '', u'Î
': '', u'`': '', u'áŋ': '', u'Íē': '', u'ážŊ': '', u'Đ': 'A', u'Đ': 'B', u'Đ': 'V', u'Đ': 'G', u'Đ': 'D', u'Đ': 'E', u'Đ': 'YO', u'Đ': 'ZH', u'Đ': 'Z', u'Đ': 'I', u'Đ': 'J', u'Đ': 'K', u'Đ': 'L', u'Đ': 'M', u'Đ': 'N', u'Đ': 'O', u'Đ': 'P', u'Đ ': 'R', u'ĐĄ': 'S', u'Đĸ': 'T', u'ĐŖ': 'U', u'Ф': 'F', u'ĐĨ': 'H', u'ĐĻ': 'TS', u'Ч': 'CH', u'Ш': 'SH', u'ĐŠ': 'SCH', u'ĐĢ': 'YI', u'Đ': 'E', u'ĐŽ': 'YU', u'Đ¯': 'YA', u'а': 'A', u'Đą': 'B', u'в': 'V', u'Đŗ': 'G', u'Đ´': 'D', u'Đĩ': 'E', u'Ņ': 'YO', u'Đļ': 'ZH', u'С': 'Z', u'и': 'I', u'Đš': 'J', u'Đē': 'K', u'Đģ': 'L', u'Đŧ': 'M', u'ĐŊ': 'N', u'Đž': 'O', u'Đŋ': 'P', u'Ņ': 'R', u'Ņ': 'S', u'Ņ': 'T', u'Ņ': 'U', u'Ņ': 'F', u'Ņ
': 'H', u'Ņ': 'TS', u'Ņ': 'CH', u'Ņ': 'SH', u'Ņ': 'SCH', u'Ņ': 'YI', u'Ņ': 'E', u'Ņ': 'YU', u'Ņ': 'YA', u'ĐĒ': '', u'Ņ': '', u'ĐŦ': '', u'Ņ': '', u'ð': 'd', u'Ã': 'D', u'Þ': 'th', u'Ã': 'TH',u'á': 'a', u'á': 'b', u'á': 'g', u'á': 'd', u'á': 'e', u'á': 'v', u'á': 'z', u'á': 't', u'á': 'i', u'á': 'k', u'á': 'l', u'á': 'm', u'á': 'n', u'á': 'o', u'á': 'p', u'á': 'zh', u'á ': 'r', u'áĄ': 's', u'áĸ': 't', u'áŖ': 'u', u'á¤': 'p', u'áĨ': 'k', u'áĻ': 'gh', u'á§': 'q', u'á¨': 'sh', u'áŠ': 'ch', u'áĒ': 'ts', u'áĢ': 'dz', u'áŦ': 'ts', u'á': 'ch', u'áŽ': 'kh', u'á¯': 'j', u'á°': 'h' }
def replace_char(m):
char = m.group()
if char_map.has_key(char):
return char_map[char]
else:
return char
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def _slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = word.encode('utf-8')
if word:
result.append(word)
slugified = delim.join([i.decode('utf-8') for i in result])
return re.sub('[^a-zA-Z0-9\\s\\-]{1}', replace_char, slugified).lower()
## end of http://code.activestate.com/recipes/577257/ }}}
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def _curry(*args, **kwargs):
function, args = args[0], args[1:]
def result(*rest, **kwrest):
combined = kwargs.copy()
combined.update(kwrest)
return function(*args + rest, **combined)
return result
# Recipe: regex_from_encoded_pattern (1.0)
def _regex_from_encoded_pattern(s):
"""'foo' -> re.compile(re.escape('foo'))
'/foo/' -> re.compile('foo')
'/foo/i' -> re.compile('foo', re.I)
"""
if s.startswith('/') and s.rfind('/') != 0:
# Parse it: /PATTERN/FLAGS
idx = s.rfind('/')
pattern, flags_str = s[1:idx], s[idx+1:]
flag_from_char = {
"i": re.IGNORECASE,
"l": re.LOCALE,
"s": re.DOTALL,
"m": re.MULTILINE,
"u": re.UNICODE,
}
flags = 0
for char in flags_str:
try:
flags |= flag_from_char[char]
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
% (char, s, ''.join(list(flag_from_char.keys()))))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
# Recipe: dedent (0.1.2)
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line))
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print("dedent: margin=%r" % margin)
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print("dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin))
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
else:
if removed:
lines[i] = lines[i][removed:]
return lines
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
class _memoized:
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
http://wiki.python.org/moin/PythonDecoratorLibrary
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def _xml_oneliner_re_from_tab_width(tab_width):
"""Standalone XML processing instruction regex."""
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
[ ]{0,%d}
(?:
<\?\w+\b\s+.*?\?> # XML processing instruction
|
<\w+:\w+\b\s+.*?/> # namespaced single tag
)
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
def _hr_tag_re_from_tab_width(tab_width):
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in \1
[ ]{0,%d}
<(hr) # start tag = \2
\b # word break
([^<>])*? #
/?> # the matching end tag
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
def _xml_escape_attr(attr, skip_single_quote=True):
"""Escape the given string for use in an HTML/XML tag attribute.
By default this doesn't bother with escaping `'` to `'`, presuming that
the tag attribute is surrounded by double quotes.
"""
escaped = (attr
.replace('&', '&')
.replace('"', '"')
.replace('<', '<')
.replace('>', '>'))
if not skip_single_quote:
escaped = escaped.replace("'", "'")
return escaped
def _xml_encode_email_char_at_random(ch):
r = random()
# Roughly 10% raw, 45% hex, 45% dec.
# '@' *must* be encoded. I [John Gruber] insist.
# Issue 26: '_' must be encoded.
if r > 0.9 and ch not in "@_":
return ch
elif r < 0.45:
# The [1:] is to drop leading '0': 0x63 -> x63
return '%s;' % hex(ord(ch))[1:]
else:
return '%s;' % ord(ch)
#---- mainline
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
def _test():
import doctest
doctest.testmod()
def main(argv=None):
if argv is None:
argv = sys.argv
if not logging.root.handlers:
logging.basicConfig()
usage = "usage: %prog [PATHS...]"
version = "%prog "+__version__
parser = optparse.OptionParser(prog="markdown2", usage=usage,
version=version, description=cmdln_desc,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("--encoding",
help="specify encoding of text content")
parser.add_option("--html4tags", action="store_true", default=False,
help="use HTML 4 style for empty element tags")
parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
help="sanitize literal HTML: 'escape' escapes "
"HTML meta chars, 'replace' replaces with an "
"[HTML_REMOVED] note")
parser.add_option("-x", "--extras", action="append",
help="Turn on specific extra features (not part of "
"the core Markdown spec). See above.")
parser.add_option("--use-file-vars",
help="Look for and use Emacs-style 'markdown-extras' "
"file var to turn on extras. See "
"")
parser.add_option("--link-patterns-file",
help="path to a link pattern file")
parser.add_option("--self-test", action="store_true",
help="run internal self-tests (some doctests)")
parser.add_option("--compare", action="store_true",
help="run against Markdown.pl as well (for testing)")
parser.set_defaults(log_level=logging.INFO, compare=False,
encoding="utf-8", safe_mode=None, use_file_vars=False)
opts, paths = parser.parse_args()
log.setLevel(opts.log_level)
if opts.self_test:
return _test()
if opts.extras:
extras = {}
for s in opts.extras:
splitter = re.compile("[,;: ]+")
for e in splitter.split(s):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
extras[ename] = earg
else:
extras = None
if opts.link_patterns_file:
link_patterns = []
f = open(opts.link_patterns_file)
try:
for i, line in enumerate(f.readlines()):
if not line.strip(): continue
if line.lstrip().startswith("#"): continue
try:
pat, href = line.rstrip().rsplit(None, 1)
except ValueError:
raise MarkdownError("%s:%d: invalid link pattern line: %r"
% (opts.link_patterns_file, i+1, line))
link_patterns.append(
(_regex_from_encoded_pattern(pat), href))
finally:
f.close()
else:
link_patterns = None
from os.path import join, dirname, abspath, exists
markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
"Markdown.pl")
if not paths:
paths = ['-']
for path in paths:
if path == '-':
text = sys.stdin.read()
else:
fp = codecs.open(path, 'r', opts.encoding)
text = fp.read()
fp.close()
if opts.compare:
from subprocess import Popen, PIPE
print("==== Markdown.pl ====")
p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
p.stdin.write(text.encode('utf-8'))
p.stdin.close()
perl_html = p.stdout.read().decode('utf-8')
if py3:
sys.stdout.write(perl_html)
else:
sys.stdout.write(perl_html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
print("==== markdown2.py ====")
html = markdown(text,
html4tags=opts.html4tags,
safe_mode=opts.safe_mode,
extras=extras, link_patterns=link_patterns,
use_file_vars=opts.use_file_vars)
if py3:
sys.stdout.write(html)
else:
sys.stdout.write(html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if extras and "toc" in extras:
log.debug("toc_html: " +
html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if opts.compare:
test_dir = join(dirname(dirname(abspath(__file__))), "test")
if exists(join(test_dir, "test_markdown2.py")):
sys.path.insert(0, test_dir)
from test_markdown2 import norm_html_from_html
norm_html = norm_html_from_html(html)
norm_perl_html = norm_html_from_html(perl_html)
else:
norm_html = html
norm_perl_html = perl_html
print("==== match? %r ====" % (norm_perl_html == norm_html))
template_html = u'''
'''
def force_text(text):
if isinstance(text, unicode):
return text
else:
return text.decode('utf-8')
class BackDoc:
def __init__(self, markdown_converter, template_html, stdin, stdout):
self.markdown_converter = markdown_converter
self.template_html = force_text(template_html)
self.stdin = stdin
self.stdout = stdout
self.parser = self.get_parser()
def run(self, argv):
kwargs = self.get_kwargs(argv)
self.stdout.write(self.get_result_html(**kwargs))
def get_kwargs(self, argv):
parsed = dict(self.parser.parse_args(argv)._get_kwargs())
return self.prepare_kwargs_from_parsed_data(parsed)
def prepare_kwargs_from_parsed_data(self, parsed):
kwargs = {}
kwargs['title'] = force_text(parsed.get('title') or 'Documentation')
if parsed.get('source'):
kwargs['markdown_src'] = open(parsed['source'], 'r').read()
else:
kwargs['markdown_src'] = self.stdin.read()
kwargs['markdown_src'] = force_text(kwargs['markdown_src'] or '')
return kwargs
def get_result_html(self, title, markdown_src):
response = self.get_converted_to_html_response(markdown_src)
return (
self.template_html.replace('', title)
.replace('', response.toc_html and force_text(response.toc_html) or '')
.replace('', force_text(response))
)
def get_converted_to_html_response(self, markdown_src):
return self.markdown_converter.convert(markdown_src)
def get_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument(
'-t',
'--title',
help='Documentation title header',
required=False,
)
parser.add_argument(
'-s',
'--source',
help='Markdown source file path',
required=False,
)
return parser
if __name__ == '__main__':
BackDoc(
markdown_converter=Markdown(extras=['toc']),
template_html=template_html,
stdin=sys.stdin,
stdout=sys.stdout
).run(argv=sys.argv[1:])
drf-extensions-0.8.0/docs/index.html 0000664 0000000 0000000 00000631367 14775671154 0017455 0 ustar 00root root 0000000 0000000
Django Rest Framework extensions documentation
DRF-extensions
DRF-extensions is a collection of custom extensions for Django REST Framework.
Source repository is available at https://github.com/chibisov/drf-extensions.
Viewsets
Extensions for viewsets.
DetailSerializerMixin
This mixin lets add custom serializer for detail view. Just add mixin and specify serializer_detail_class attribute:
from django.contrib.auth.models import User
from myapps.serializers import UserSerializer, UserDetailSerializer
from rest_framework_extensions.mixins import DetailSerializerMixin
class UserViewSet(DetailSerializerMixin, viewsets.ReadOnlyModelViewSet):
serializer_class = UserSerializer
serializer_detail_class = UserDetailSerializer
queryset = User.objects.all()
Sometimes you need to set custom QuerySet for detail view. For example, in detail view you want to show user groups and permissions for these groups. You can make it by specifying queryset_detail attribute:
from django.contrib.auth.models import User
from myapps.serializers import UserSerializer, UserDetailSerializer
from rest_framework_extensions.mixins import DetailSerializerMixin
class UserViewSet(DetailSerializerMixin, viewsets.ReadOnlyModelViewSet):
serializer_class = UserSerializer
serializer_detail_class = UserDetailSerializer
queryset = User.objects.all()
queryset_detail = queryset.prefetch_related('groups__permissions')
If you use DetailSerializerMixin and don't specify serializer_detail_class attribute, then serializer_class will be used.
If you use DetailSerializerMixin and don't specify queryset_detail attribute, then queryset will be used.
PaginateByMaxMixin
New in DRF-extensions 0.2.2
This mixin allows to paginate results by max_paginate_by
value. This approach is useful when clients want to take as much paginated data as possible,
but don't want to bother about backend limitations.
from myapps.serializers import UserSerializer
from rest_framework_extensions.mixins import PaginateByMaxMixin
class UserViewSet(PaginateByMaxMixin,
viewsets.ReadOnlyModelViewSet):
max_paginate_by = 100
serializer_class = UserSerializer
And now you can send requests with ?page_size=max argument:
# Request
GET /users/?page_size=max HTTP/1.1
Accept: application/json
# Response
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
{
count: 1000,
next: "https://localhost:8000/v1/users/?page=2&page_size=max",
previous: null,
results: [
...100 items...
]
}
This mixin could be used only with Django Rest Framework >= 2.3.8, because
max_paginate_by
was introduced in 2.3.8 version.
Cache/ETAG mixins
The etag functionality is pending an overhaul has been temporarily removed since 0.4.0.
ReadOnlyCacheResponseAndETAGMixin and CacheResponseAndETAGMixin are no longer available to use.
See discussion in Issue #177
Routers
Extensions for routers.
You will need to use custom ExtendedDefaultRouter or ExtendedSimpleRouter for routing if you want to take advantages of described extensions. For example you have standard implementation:
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
You should replace DefaultRouter with ExtendedDefaultRouter:
from rest_framework_extensions.routers import (
ExtendedDefaultRouter as DefaultRouter
)
router = DefaultRouter()
Or SimpleRouter with ExtendedSimpleRouter:
from rest_framework_extensions.routers import (
ExtendedSimpleRouter as SimpleRouter
)
router = SimpleRouter()
Pluggable router mixins
New in DRF-extensions 0.2.4
Every feature in extended routers has it's own mixin. That means that you can use the only features you need in your custom
routers. ExtendedRouterMixin has all set of drf-extensions features. For example you can use it with third-party routes:
from rest_framework_extensions.routers import ExtendedRouterMixin
from third_party_app.routers import SomeRouter
class ExtendedSomeRouter(ExtendedRouterMixin, SomeRouter):
pass
Nested routes
New in DRF-extensions 0.2.4
Nested routes allows you create nested resources with viewsets.
For example:
from rest_framework_extensions.routers import ExtendedSimpleRouter
from yourapp.views import (
UserViewSet,
GroupViewSet,
PermissionViewSet,
)
router = ExtendedSimpleRouter()
(
router.register(r'users', UserViewSet, basename='user')
.register(r'groups',
GroupViewSet,
basename='users-group',
parents_query_lookups=['user_groups'])
.register(r'permissions',
PermissionViewSet,
basename='users-groups-permission',
parents_query_lookups=['group__user', 'group'])
)
urlpatterns = router.urls
There is one requirement for viewsets which used in nested routers. They should add mixin NestedViewSetMixin. That mixin
adds automatic filtering by parent lookups:
# yourapp.views
from rest_framework_extensions.mixins import NestedViewSetMixin
class UserViewSet(NestedViewSetMixin, ModelViewSet):
model = UserModel
class GroupViewSet(NestedViewSetMixin, ModelViewSet):
model = GroupModel
class PermissionViewSet(NestedViewSetMixin, ModelViewSet):
model = PermissionModel
With such kind of router we have next resources:
/users/ - list of all users. Resolve name is user-list
/users/<pk>/ - user detail. Resolve name is user-detail
/users/<parent_lookup_user_groups>/groups/ - list of groups for exact user.
Resolve name is users-group-list
/users/<parent_lookup_user_groups>/groups/<pk>/ - user group detail. If user doesn't have group then resource will
be not found. Resolve name is users-group-detail
/users/<parent_lookup_group__user>/groups/<parent_lookup_group>/permissions/ - list of permissions for user group.
Resolve name is users-groups-permission-list
/users/<parent_lookup_group__user>/groups/<parent_lookup_group>/permissions/<pk>/ - user group permission detail.
If user doesn't have group or group doesn't have permission then resource will be not found.
Resolve name is users-groups-permission-detail
Every resource is automatically filtered by parent lookups.
# Request
GET /users/1/groups/2/permissions/ HTTP/1.1
Accept: application/json
# Response
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
[
{
id: 3,
name: "read"
},
{
id: 4,
name: "update"
},
{
id: 5,
name: "delete"
}
]
For request above permissions will be filtered by user with pk 1 and group with pk 2:
Permission.objects.filter(group__user=1, group=2)
Example with registering more then one nested resource in one depth:
permissions_routes = router.register(
r'permissions',
PermissionViewSet,
basename='permission'
)
permissions_routes.register(
r'groups',
GroupViewSet,
basename='permissions-group',
parents_query_lookups=['permissions']
)
permissions_routes.register(
r'users',
UserViewSet,
basename='permissions-user',
parents_query_lookups=['groups__permissions']
)
With such kind of router we have next resources:
/permissions/ - list of all permissions. Resolve name is permission-list
/permissions/<pk>/ - permission detail. Resolve name is permission-detail
/permissions/<parent_lookup_permissions>/groups/ - list of groups for exact permission.
Resolve name is permissions-group-list
/permissions/<parent_lookup_permissions>/groups/<pk>/ - permission group detail. If group doesn't have
permission then resource will be not found. Resolve name is permissions-group-detail
/permissions/<parent_lookup_groups__permissions>/users/ - list of users for exact permission.
Resolve name is permissions-user-list
/permissions/<parent_lookup_groups__permissions>/user/<pk>/ - permission user detail. If user doesn't have
permission then resource will be not found. Resolve name is permissions-user-detail
Nested router mixin
You can use rest_framework_extensions.routers.NestedRouterMixin for adding nesting feature into your routers:
from rest_framework_extensions.routers import NestedRouterMixin
from rest_framework.routers import SimpleRouter
class SimpleRouterWithNesting(NestedRouterMixin, SimpleRouter):
pass
Usage with generic relations
If you want to use nested router for generic relation
fields, you should explicitly filter QuerySet by content type.
For example if you have such kind of models:
class Task(models.Model):
title = models.CharField(max_length=30)
class Book(models.Model):
title = models.CharField(max_length=30)
class Comment(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
text = models.CharField(max_length=30)
Lets create viewsets for that models:
class TaskViewSet(NestedViewSetMixin, ModelViewSet):
model = TaskModel
class BookViewSet(NestedViewSetMixin, ModelViewSet):
model = BookModel
class CommentViewSet(NestedViewSetMixin, ModelViewSet):
queryset = CommentModel.objects.all()
And router like this:
router = ExtendedSimpleRouter()
# tasks route
(
router.register(r'tasks', TaskViewSet)
.register(r'comments',
CommentViewSet,
'tasks-comment',
parents_query_lookups=['object_id'])
)
# books route
(
router.register(r'books', BookViewSet)
.register(r'comments',
CommentViewSet,
'books-comment',
parents_query_lookups=['object_id'])
)
As you can see we've added to parents_query_lookups only one object_id value. But when you make requests to comments
endpoint for both tasks and books routes there is no context for current content type.
# Request
GET /tasks/123/comments/ HTTP/1.1
Accept: application/json
# Response
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
[
{
id: 1,
content_type: 1,
object_id: 123,
text: "Good task!"
},
{
id: 2,
content_type: 2, // oops. Wrong content type (for book)
object_id: 123, // task and book has the same id
text: "Good book!"
},
]
For such kind of cases you should explicitly filter QuerySets of nested viewsets by content type:
from django.contrib.contenttypes.models import ContentType
class CommentViewSet(NestedViewSetMixin, ModelViewSet):
queryset = CommentModel.objects.all()
class TaskCommentViewSet(CommentViewSet):
def get_queryset(self):
return super(TaskCommentViewSet, self).get_queryset().filter(
content_type=ContentType.objects.get_for_model(TaskModel)
)
class BookCommentViewSet(CommentViewSet):
def get_queryset(self):
return super(BookCommentViewSet, self).get_queryset().filter(
content_type=ContentType.objects.get_for_model(BookModel)
)
Lets use new viewsets in router:
router = ExtendedSimpleRouter()
# tasks route
(
router.register(r'tasks', TaskViewSet)
.register(r'comments',
TaskCommentViewSet,
'tasks-comment',
parents_query_lookups=['object_id'])
)
# books route
(
router.register(r'books', BookViewSet)
.register(r'comments',
BookCommentViewSet,
'books-comment',
parents_query_lookups=['object_id'])
)
Serializers
Extensions for serializers functionality.
PartialUpdateSerializerMixin
New in DRF-extensions 0.2.3
By default every saving of ModelSerializer
saves the whole object. Even partial update just patches model instance. For example:
from myapps.models import City
from myapps.serializers import CitySerializer
moscow = City.objects.get(pk=10)
city_serializer = CitySerializer(
instance=moscow,
data={'country': 'USA'},
partial=True
)
if city_serializer.is_valid():
city_serializer.save()
# equivalent to
moscow.country = 'USA'
moscow.save()
SQL representation for previous example will be:
UPDATE city SET name='Moscow', country='USA' WHERE id=1;
Django's save method has keyword argument update_fields.
Only the fields named in that list will be updated:
moscow.country = 'USA'
moscow.save(update_fields=['country'])
SQL representation for example with update_fields usage will be:
UPDATE city SET country='USA' WHERE id=1;
To use update_fields for every partial update you should mixin PartialUpdateSerializerMixin to your serializer:
from rest_framework_extensions.serializers import (
PartialUpdateSerializerMixin
)
class CitySerializer(PartialUpdateSerializerMixin,
serializers.ModelSerializer):
class Meta:
model = City
Fields
Set of serializer fields that extends default fields functionality.
ResourceUriField
Represents a hyperlinking uri that points to the detail view for that object.
from rest_framework_extensions.fields import ResourceUriField
class CitySerializer(serializers.ModelSerializer):
resource_uri = ResourceUriField(view_name='city-detail')
class Meta:
model = City
Request example:
# Request
GET /cities/268/ HTTP/1.1
Accept: application/json
# Response
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
{
id: 268,
resource_uri: "http://localhost:8000/v1/cities/268/",
name: "Serpuhov"
}
Permissions
Extensions for permissions.
Object permissions
New in DRF-extensions 0.2.2
Django Rest Framework allows you to use DjangoObjectPermissions out of the box. But it has one limitation - if user has no permissions for viewing resource he will get 404 as response code. In most cases it's good approach because it solves security issues by default. But what if you wanted to return 401 or 403? What if you wanted to say to user - "You need to be logged in for viewing current resource" or "You don't have permissions for viewing current resource"?
ExtenedDjangoObjectPermissions will help you to be more flexible. By default it behaves as standard DjangoObjectPermissions. For example, it is safe to replace DjangoObjectPermissions with extended permissions class:
from rest_framework_extensions.permissions import (
ExtendedDjangoObjectPermissions as DjangoObjectPermissions
)
class CommentView(viewsets.ModelViewSet):
permission_classes = (DjangoObjectPermissions,)
Now every request from unauthorized user will get 404 response:
# Request
GET /comments/1/ HTTP/1.1
Accept: application/json
# Response
HTTP/1.1 404 NOT FOUND
Content-Type: application/json; charset=UTF-8
{"detail": "Not found"}
With ExtenedDjangoObjectPermissions you can disable hiding forbidden for read objects by changing hide_forbidden_for_read_objects attribute:
from rest_framework_extensions.permissions import (
ExtendedDjangoObjectPermissions
)
class CommentViewObjectPermissions(ExtendedDjangoObjectPermissions):
hide_forbidden_for_read_objects = False
class CommentView(viewsets.ModelViewSet):
permission_classes = (CommentViewObjectPermissions,)
Now lets see request response for user that has no permissions for viewing CommentView object:
# Request
GET /comments/1/ HTTP/1.1
Accept: application/json
# Response
HTTP/1.1 403 FORBIDDEN
Content-Type: application/json; charset=UTF-8
{u'detail': u'You do not have permission to perform this action.'}
ExtenedDjangoObjectPermissions could be used only with Django Rest Framework >= 2.3.8, because DjangoObjectPermissions was introduced in 2.3.8 version.
Caching
To cache something is to save the result of an expensive calculation so that you don't have to perform the calculation next time. Here's some pseudocode explaining how this would work for a dynamically generated api response:
given a URL, try finding that API response in the cache
if the response is in the cache:
return the cached response
else:
generate the response
save the generated response in the cache (for next time)
return the generated response
Cache response
DRF-extensions allows you to cache api responses with simple @cache_response decorator.
There are two requirements for decorated method:
- It should be method of class which is inherited from
rest_framework.views.APIView
- It should return
rest_framework.response.Response instance.
Usage example:
from rest_framework.response import Response
from rest_framework import views
from rest_framework_extensions.cache.decorators import (
cache_response
)
from myapp.models import City
class CityView(views.APIView):
@cache_response()
def get(self, request, *args, **kwargs):
cities = City.objects.all().values_list('name', flat=True)
return Response(cities)
If you request view first time you'll get it from processed SQL query. (~60ms response time):
# Request
GET /cities/ HTTP/1.1
Accept: application/json
# Response
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
['Moscow', 'London', 'Paris']
Second request will hit the cache. No sql evaluation, no database query. (~30 ms response time):
# Request
GET /cities/ HTTP/1.1
Accept: application/json
# Response
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
['Moscow', 'London', 'Paris']
Reduction in response time depends on calculation complexity inside your API method. Sometimes it reduces from 1 second to 10ms, sometimes you win just 10ms.
New in DRF-extensions 0.4.0
The decorator will render and discard the original DRF response in favor of Django's HttpResponse. This allows the cache to retain a smaller memory footprint and eliminates the need to re-render responses on each request. Furthermore it eliminates the risk for users to unknowingly cache whole Serializers and QuerySets.
You can disable this behavior in your test suite by using dummy caching for the DRF-extensions cache (set via DEFAULT_USE_CACHE).
Timeout
You can specify cache timeout in seconds, providing first argument:
class CityView(views.APIView):
@cache_response(60 * 15)
def get(self, request, *args, **kwargs):
...
In the above example, the result of the get() view will be cached for 15 minutes.
If you don't specify timeout argument then value from REST_FRAMEWORK_EXTENSIONS settings will be used. By default it's None, which means "cache forever". You can change this default in settings:
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_CACHE_RESPONSE_TIMEOUT': 60 * 15
}
Usage of the specific cache
New in DRF-extensions 0.2.3
@cache_response can also take an optional keyword argument, cache, which directs the decorator
to use a specific cache (from your CACHES setting) when caching results.
By default, the default cache will be used, but you can specify any cache you want:
class CityView(views.APIView):
@cache_response(60 * 15, cache='special_cache')
def get(self, request, *args, **kwargs):
...
You can specify what cache to use by default in settings:
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_USE_CACHE': 'special_cache'
}
Cache key
By default every cached data from @cache_response decorator stored by key, which calculated
with DefaultKeyConstructor.
You can change cache key by providing key_func argument, which must be callable:
def calculate_cache_key(view_instance, view_method,
request, args, kwargs):
return '.'.join([
len(args),
len(kwargs)
])
class CityView(views.APIView):
@cache_response(60 * 15, key_func=calculate_cache_key)
def get(self, request, *args, **kwargs):
...
You can implement view method and use it for cache key calculation by specifying key_func argument as string:
class CityView(views.APIView):
@cache_response(60 * 15, key_func='calculate_cache_key')
def get(self, request, *args, **kwargs):
...
def calculate_cache_key(self, view_instance, view_method,
request, args, kwargs):
return '.'.join([
len(args),
len(kwargs)
])
Key calculation function will be called with next parameters:
- view_instance - view instance of decorated method
- view_method - decorated method
- request - decorated method request
- args - decorated method positional arguments
- kwargs - decorated method keyword arguments
Default key function
If @cache_response decorator used without key argument then default key function will be used. You can change this function in
settings:
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_CACHE_KEY_FUNC':
'rest_framework_extensions.utils.default_cache_key_func'
}
default_cache_key_func uses DefaultKeyConstructor as a base for key calculation.
Caching errors
New in DRF-extensions 0.2.7
By default every response is cached, even failed. For example:
class CityView(views.APIView):
@cache_response()
def get(self, request, *args, **kwargs):
raise Exception("500 error comes from here")
First request to CityView.get will fail with 500 status code error and next requests to this endpoint will
return 500 error from cache.
You can change this behaviour by turning off caching error responses:
class CityView(views.APIView):
@cache_response(cache_errors=False)
def get(self, request, *args, **kwargs):
raise Exception("500 error comes from here")
You can change default behaviour by changing DEFAULT_CACHE_ERRORS setting:
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_CACHE_ERRORS': False
}
CacheResponseMixin
It is common to cache standard viewset retrieve and list
methods. That is why CacheResponseMixin exists. Just mix it into viewset implementation and those methods will
use functions, defined in REST_FRAMEWORK_EXTENSIONS settings:
- "DEFAULT_OBJECT_CACHE_KEY_FUNC" for
retrieve method
- "DEFAULT_LIST_CACHE_KEY_FUNC" for
list method
By default those functions are using DefaultKeyConstructor and extends it:
- With
RetrieveSqlQueryKeyBit for "DEFAULT_OBJECT_CACHE_KEY_FUNC"
- With
ListSqlQueryKeyBit and PaginationKeyBit for "DEFAULT_LIST_CACHE_KEY_FUNC"
You can change those settings for custom cache key generation:
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_OBJECT_CACHE_KEY_FUNC':
'rest_framework_extensions.utils.default_object_cache_key_func',
'DEFAULT_LIST_CACHE_KEY_FUNC':
'rest_framework_extensions.utils.default_list_cache_key_func',
'DEFAULT_CACHE_RESPONSE_TIMEOUT': None,
}
Mixin example usage:
from myapps.serializers import UserSerializer
from rest_framework_extensions.cache.mixins import CacheResponseMixin
class UserViewSet(CacheResponseMixin, viewsets.ModelViewSet):
serializer_class = UserSerializer
You can change cache key function by providing object_cache_key_func or
list_cache_key_func methods in view class:
class UserViewSet(CacheResponseMixin, viewsets.ModelViewSet):
serializer_class = UserSerializer
def object_cache_key_func(self, **kwargs):
return 'some key for object'
def list_cache_key_func(self, **kwargs):
return 'some key for list'
Of course you can use custom key constructor:
from yourapp.key_constructors import (
CustomObjectKeyConstructor,
CustomListKeyConstructor,
)
class UserViewSet(CacheResponseMixin, viewsets.ModelViewSet):
serializer_class = UserSerializer
object_cache_key_func = CustomObjectKeyConstructor()
list_cache_key_func = CustomListKeyConstructor()
New in DRF-extensions development
You can change cache timeout by providing object_cache_timeout or
list_cache_timeout properties in view class:
class UserViewSet(CacheResponseMixin, viewsets.ModelViewSet):
serializer_class = UserSerializer
object_cache_timeout = 3600 # one hours (in seconds)
list_cache_timeout = 60 # one minute (in seconds)
If you want to cache only retrieve method then you could use rest_framework_extensions.cache.mixins.RetrieveCacheResponseMixin.
If you want to cache only list method then you could use rest_framework_extensions.cache.mixins.ListCacheResponseMixin.
Key constructors
As you could see from previous section cache key calculation might seem fairly simple operation. But let's see next example. We make ordinary HTTP request to cities resource:
# Request
GET /cities/ HTTP/1.1
Accept: application/json
# Response
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
['Moscow', 'London', 'Paris']
By the moment all goes fine - response returned and cached. Let's make the same request requiring XML response:
# Request
GET /cities/ HTTP/1.1
Accept: application/xml
# Response
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
['Moscow', 'London', 'Paris']
What is that? Oh, we forgot about format negotiations. We can add format to key bits:
def calculate_cache_key(view_instance, view_method,
request, args, kwargs):
return '.'.join([
len(args),
len(kwargs),
request.accepted_renderer.format # here it is
])
# Request
GET /cities/ HTTP/1.1
Accept: application/xml
# Response
HTTP/1.1 200 OK
Content-Type: application/xml; charset=UTF-8
<?xml version="1.0" encoding="utf-8"?>
<root>
<list-item>Moscow</list-item>
<list-item>London</list-item>
<list-item>Paris</list-item>
</root>
That's cool now - we have different responses for different formats with different cache keys. But there are many cases, where key should be different for different requests:
- Response format (json, xml);
- User (exact authorized user or anonymous);
- Different request meta data (request.META['REMOTE_ADDR']);
- Language (ru, en);
- Headers;
- Query params. For example,
jsonp resources need callback param, which rendered in response;
- Pagination. We should show different data for different pages;
- Etc...
Of course we can use custom calculate_cache_key methods and reuse them for different API methods, but we can't reuse just parts of them. For example, one method depends on user id and language, but another only on user id. How to be more DRYish? Let's see some magic:
from rest_framework_extensions.key_constructor.constructors import (
KeyConstructor
)
from rest_framework_extensions.key_constructor import bits
from your_app.utils import get_city_by_ip
class CityGetKeyConstructor(KeyConstructor):
unique_method_id = bits.UniqueMethodIdKeyBit()
format = bits.FormatKeyBit()
language = bits.LanguageKeyBit()
class CityHeadKeyConstructor(CityGetKeyConstructor):
user = bits.UserKeyBit()
request_meta = bits.RequestMetaKeyBit(params=['REMOTE_ADDR'])
class CityView(views.APIView):
@cache_response(key_func=CityGetKeyConstructor())
def get(self, request, *args, **kwargs):
cities = City.objects.all().values_list('name', flat=True)
return Response(cities)
@cache_response(key_func=CityHeadKeyConstructor())
def head(self, request, *args, **kwargs):
city = ''
user = self.request.user
if user.is_authenticated and user.city:
city = Response(user.city.name)
if not city:
city = get_city_by_ip(request.META['REMOTE_ADDR'])
return Response(city)
Firstly, let's revise CityView.get method cache key calculation. It constructs from 3 bits:
- unique_method_id - remember our default key calculation? Here it is. Just one of the cache key bits.
head method has different set of bits and they can't collide with get method bits. But there could be another view class with the same bits.
- format - key would be different for different formats.
- language - key would be different for different languages.
The second method head has the same unique_method_id, format and language bits, buts extends with 2 more:
- user - key would be different for different users. As you can see in response calculation we use
request.user instance. For different users we need different responses.
- request_meta - key would be different for different ip addresses. As you can see in response calculation we are falling back to getting city from ip address if couldn't get it from authorized user model.
All default key bits are listed in this section.
Default key constructor
DefaultKeyConstructor is located in rest_framework_extensions.key_constructor.constructors module and constructs a key
from unique method id, request format and request language. It has the following implementation:
class DefaultKeyConstructor(KeyConstructor):
unique_method_id = bits.UniqueMethodIdKeyBit()
format = bits.FormatKeyBit()
language = bits.LanguageKeyBit()
How key constructor works
Key constructor class works in the same manner as the standard django forms and
key bits used like form fields. Lets go through key construction steps for DefaultKeyConstructor.
Firstly, constructor starts iteration over every key bit:
- unique_method_id
- format
- language
Then constructor gets data from every key bit calling method get_data:
- unique_method_id -
u'your_app.views.SometView.get'
- format -
u'json'
- language -
u'en'
Every key bit get_data method is called with next arguments:
- view_instance - view instance of decorated method
- view_method - decorated method
- request - decorated method request
- args - decorated method positional arguments
- kwargs - decorated method keyword arguments
After this it combines every key bit data to one dict, which keys are a key bits names in constructor, and values are returned data:
{
'unique_method_id': u'your_app.views.SometView.get',
'format': u'json',
'language': u'en'
}
Then constructor dumps resulting dict to json:
'{"unique_method_id": "your_app.views.SometView.get", "language": "en", "format": "json"}'
And finally compresses json with md5 and returns hash value:
'b04f8f03c89df824e0ecd25230a90f0e0ebe184cf8c0114342e9471dd2275baa'
Custom key bit
We are going to create a simple key bit which could be used in real applications with next properties:
- High read rate
- Low write rate
The task is - cache every read request and invalidate all cache data after write to any model, which used in API. This approach
let us don't think about granular cache invalidation - just flush it after any model instance change/creation/deletion.
Lets create models:
# models.py
from django.db import models
class Group(models.Model):
title = models.CharField()
class Profile(models.Model):
name = models.CharField()
group = models.ForeignKey(Group)
Define serializers:
# serializers.py
from yourapp.models import Group, Profile
from rest_framework import serializers
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
class ProfileSerializer(serializers.ModelSerializer):
group = GroupSerializer()
class Meta:
model = Profile
Create views:
# views.py
from yourapp.serializers import GroupSerializer, ProfileSerializer
from yourapp.models import Group, Profile
class GroupViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = GroupSerializer
queryset = Group.objects.all()
class ProfileViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = ProfileSerializer
queryset = Profile.objects.all()
And finally register views in router:
# urls.py
from yourapp.views import GroupViewSet,ProfileViewSet
router = DefaultRouter()
router.register(r'groups', GroupViewSet)
router.register(r'profiles', ProfileViewSet)
urlpatterns = router.urls
At the moment we have API, but it's not cached. Lets cache it and create our custom key bit:
# views.py
import datetime
from django.core.cache import cache
from django.utils.encoding import force_str
from yourapp.serializers import GroupSerializer, ProfileSerializer
from rest_framework_extensions.cache.decorators import cache_response
from rest_framework_extensions.key_constructor.constructors import (
DefaultKeyConstructor
)
from rest_framework_extensions.key_constructor.bits import (
KeyBitBase,
RetrieveSqlQueryKeyBit,
ListSqlQueryKeyBit,
PaginationKeyBit
)
class UpdatedAtKeyBit(KeyBitBase):
def get_data(self, **kwargs):
key = 'api_updated_at_timestamp'
value = cache.get(key, None)
if not value:
value = datetime.datetime.utcnow()
cache.set(key, value=value)
return force_str(value)
class CustomObjectKeyConstructor(DefaultKeyConstructor):
retrieve_sql = RetrieveSqlQueryKeyBit()
updated_at = UpdatedAtKeyBit()
class CustomListKeyConstructor(DefaultKeyConstructor):
list_sql = ListSqlQueryKeyBit()
pagination = PaginationKeyBit()
updated_at = UpdatedAtKeyBit()
class GroupViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = GroupSerializer
@cache_response(key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(GroupViewSet, self).retrieve(*args, **kwargs)
@cache_response(key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(GroupViewSet, self).list(*args, **kwargs)
class ProfileViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = ProfileSerializer
@cache_response(key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(ProfileViewSet, self).retrieve(*args, **kwargs)
@cache_response(key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(ProfileViewSet, self).list(*args, **kwargs)
As you can see UpdatedAtKeyBit just adds to key information when API models has been update last time. If there is no
information about it then new datetime will be used for key bit data.
Lets write cache invalidation. We just connect models to standard signals and change value in cache by key api_updated_at_timestamp:
# models.py
import datetime
from django.db import models
from django.db.models.signals import post_save, post_delete
def change_api_updated_at(sender=None, instance=None, *args, **kwargs):
cache.set('api_updated_at_timestamp', datetime.datetime.utcnow())
class Group(models.Model):
title = models.CharField()
class Profile(models.Model):
name = models.CharField()
group = models.ForeignKey(Group)
for model in [Group, Profile]:
post_save.connect(receiver=change_api_updated_at, sender=model)
post_delete.connect(receiver=change_api_updated_at, sender=model)
And that's it. When any model changes then value in cache by key api_updated_at_timestamp will be changed too. After this every
key constructor, that used UpdatedAtKeyBit, will construct new keys and @cache_response decorator will
cache data in new places.
Key constructor params
New in DRF-extensions 0.2.3
You can change params attribute for specific key bit by providing params dict for key constructor initialization
function. For example, here is custom key constructor, which inherits from DefaultKeyConstructor
and adds geoip key bit:
class CityKeyConstructor(DefaultKeyConstructor):
geoip = bits.RequestMetaKeyBit(params=['GEOIP_CITY'])
If you wanted to use GEOIP_COUNTRY, you could create new key constructor:
class CountryKeyConstructor(DefaultKeyConstructor):
geoip = bits.RequestMetaKeyBit(params=['GEOIP_COUNTRY'])
But there is another way. You can send params in key constructor initialization method. This is the dict attribute, where
keys are bit names and values are bit params attribute value (look at CountryView):
class CityKeyConstructor(DefaultKeyConstructor):
geoip = bits.RequestMetaKeyBit(params=['GEOIP_CITY'])
class CityView(views.APIView):
@cache_response(key_func=CityKeyConstructor())
def get(self, request, *args, **kwargs):
...
class CountryView(views.APIView):
@cache_response(key_func=CityKeyConstructor(
params={'geoip': ['GEOIP_COUNTRY']}
))
def get(self, request, *args, **kwargs):
...
If there is no item provided for key bit then default key bit params value will be used.
Constructor's bits list
You can dynamically change key constructor's bits list in initialization method by altering bits attribute:
class CustomKeyConstructor(DefaultKeyConstructor):
def __init__(self, *args, **kwargs):
super(CustomKeyConstructor, self).__init__(*args, **kwargs)
self.bits['geoip'] = bits.RequestMetaKeyBit(
params=['GEOIP_CITY']
)
Default key bits
Out of the box DRF-extensions has some basic key bits. They are all located in rest_framework_extensions.key_constructor.bits module.
FormatKeyBit
Retrieves format info from request. Usage example:
class MyKeyConstructor(KeyConstructor):
format = FormatKeyBit()
LanguageKeyBit
Retrieves active language for request. Usage example:
class MyKeyConstructor(KeyConstructor):
language = LanguageKeyBit()
UserKeyBit
Retrieves user id from request. If it is anonymous then returns "anonymous" string. Usage example:
class MyKeyConstructor(KeyConstructor):
user = UserKeyBit()
RequestMetaKeyBit
Retrieves data from request.META dict.
Usage example:
class MyKeyConstructor(KeyConstructor):
ip_address_and_user_agent = bits.RequestMetaKeyBit(
['REMOTE_ADDR', 'HTTP_USER_AGENT']
)
You can use * for retrieving all meta data to key bit:
New in DRF-extensions 0.2.7
class MyKeyConstructor(KeyConstructor):
all_request_meta = bits.RequestMetaKeyBit('*')
HeadersKeyBit
Same as RequestMetaKeyBit retrieves data from request.META dict.
The difference is that HeadersKeyBit allows to use normal header names:
class MyKeyConstructor(KeyConstructor):
user_agent_and_geobase_id = bits.HeadersKeyBit(
['user-agent', 'x-geobase-id']
)
# will process request.META['HTTP_USER_AGENT'] and
# request.META['HTTP_X_GEOBASE_ID']
You can use * for retrieving all headers to key bit:
New in DRF-extensions 0.2.7
class MyKeyConstructor(KeyConstructor):
all_headers = bits.HeadersKeyBit('*')
ArgsKeyBit
New in DRF-extensions 0.2.7
Retrieves data from the view's positional arguments.
A list of position indices can be passed to indicate which arguments to use. For retrieving all arguments you can use * which is also the default value:
class MyKeyConstructor(KeyConstructor):
args = bits.ArgsKeyBit() # will use all positional arguments
class MyKeyConstructor(KeyConstructor):
args = bits.ArgsKeyBit('*') # same as above
class MyKeyConstructor(KeyConstructor):
args = bits.ArgsKeyBit([0, 2])
KwargsKeyBit
New in DRF-extensions 0.2.7
Retrieves data from the views's keyword arguments.
A list of keyword argument names can be passed to indicate which kwargs to use. For retrieving all kwargs you can use * which is also the default value:
class MyKeyConstructor(KeyConstructor):
kwargs = bits.KwargsKeyBit() # will use all keyword arguments
class MyKeyConstructor(KeyConstructor):
kwargs = bits.KwargsKeyBit('*') # same as above
class MyKeyConstructor(KeyConstructor):
kwargs = bits.KwargsKeyBit(['user_id', 'city'])
QueryParamsKeyBit
Retrieves data from request.GET dict.
Usage example:
class MyKeyConstructor(KeyConstructor):
part_and_callback = bits.QueryParamsKeyBit(
['part', 'callback']
)
You can use * for retrieving all query params to key bit which is also the default value:
New in DRF-extensions 0.2.7
class MyKeyConstructor(KeyConstructor):
all_query_params = bits.QueryParamsKeyBit('*') # all qs parameters
class MyKeyConstructor(KeyConstructor):
all_query_params = bits.QueryParamsKeyBit() # same as above
PaginationKeyBit
Inherits from QueryParamsKeyBit and returns data from used pagination params.
class MyKeyConstructor(KeyConstructor):
pagination = bits.PaginationKeyBit()
ListSqlQueryKeyBit
Retrieves sql query for view.filter_queryset(view.get_queryset()) filtering.
class MyKeyConstructor(KeyConstructor):
list_sql_query = bits.ListSqlQueryKeyBit()
RetrieveSqlQueryKeyBit
Retrieves sql query for retrieving exact object.
class MyKeyConstructor(KeyConstructor):
retrieve_sql_query = bits.RetrieveSqlQueryKeyBit()
UniqueViewIdKeyBit
Combines data about view module and view class name.
class MyKeyConstructor(KeyConstructor):
unique_view_id = bits.UniqueViewIdKeyBit()
UniqueMethodIdKeyBit
Combines data about view module, view class name and view method name.
class MyKeyConstructor(KeyConstructor):
unique_view_id = bits.UniqueMethodIdKeyBit()
ListModelKeyBit
New in DRF-extensions 0.3.2
Computes the semantic fingerprint of a list of objects returned by view.filter_queryset(view.get_queryset())
using a flat representation of all objects' values.
class MyKeyConstructor(KeyConstructor):
list_model_values = bits.ListModelKeyBit()
RetrieveModelKeyBit
New in DRF-extensions 0.3.2
Computes the semantic fingerprint of a particular objects returned by view.get_object().
class MyKeyConstructor(KeyConstructor):
retrieve_model_values = bits.RetrieveModelKeyBit()
Conditional requests
The etag functionality is pending an overhaul has been temporarily removed since 0.4.0.
See discussion in Issue #177
Bulk operations
New in DRF-extensions 0.2.4
Bulk operations allows you to perform operations over set of objects with one request. There is third-party package
django-rest-framework-bulk with support for all CRUD methods, but it iterates over every
instance in bulk operation, serializes it and only after that executes operation.
It plays nice with create or update
operations, but becomes unacceptable with partial update and delete methods over the QuerySet. Such kind of
QuerySet could contain thousands of objects and should be performed as database query over the set at once.
Please note - DRF-extensions bulk operations applies over QuerySet, not over instances. It means that:
- No serializer's
save or delete methods would be called
- No viewset's
pre_save, post_save, pre_delete and post_delete would be called
- No model signals would be called
Safety
Bulk operations are very dangerous in case of making stupid mistakes. For example you wanted to delete user instance
with DELETE request from your client application.
# Request
DELETE /users/1/ HTTP/1.1
Accept: application/json
# Response
HTTP/1.1 204 NO CONTENT
Content-Type: application/json; charset=UTF-8
That was example of successful deletion. But there is the common situation when client could not get instance id and sends
request to endpoint without it:
# Request
DELETE /users/ HTTP/1.1
Accept: application/json
# Response
HTTP/1.1 204 NO CONTENT
Content-Type: application/json; charset=UTF-8
If you used bulk destroy mixin for /users/ endpoint, then all your user objects would be deleted.
To protect from such confusions DRF-extensions asks you to send X-BULK-OPERATION header
for every bulk operation request. With this protection previous example would not delete any user instances:
# Request
DELETE /users/ HTTP/1.1
Accept: application/json
# Response
HTTP/1.1 400 BAD REQUEST
Content-Type: application/json; charset=UTF-8
{
"detail": "Header 'X-BULK-OPERATION' should be provided for bulk operation."
}
With X-BULK-OPERATION header it works as expected - deletes all user instances:
# Request
DELETE /users/ HTTP/1.1
Accept: application/json
X-BULK-OPERATION: true
# Response
HTTP/1.1 204 NO CONTENT
Content-Type: application/json; charset=UTF-8
You can change bulk operation header name in settings:
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_BULK_OPERATION_HEADER_NAME': 'X-CUSTOM-BULK-OPERATION'
}
To turn off protection you can set DEFAULT_BULK_OPERATION_HEADER_NAME as None.
Bulk destroy
This mixin allows you to delete many instances with one DELETE request.
from rest_framework_extensions.bulk_operations.mixins import ListDestroyModelMixin
class UserViewSet(ListDestroyModelMixin, viewsets.ModelViewSet):
serializer_class = UserSerializer
Bulk destroy example - delete all users which emails ends with gmail.com:
# Request
DELETE /users/?email__endswith=gmail.com HTTP/1.1
Accept: application/json
X-BULK-OPERATION: true
# Response
HTTP/1.1 204 NO CONTENT
Content-Type: application/json; charset=UTF-8
Bulk update
This mixin allows you to update many instances with one PATCH request. Note, that this mixin works only with partial update.
from rest_framework_extensions.mixins import ListUpdateModelMixin
class UserViewSet(ListUpdateModelMixin, viewsets.ModelViewSet):
serializer_class = UserSerializer
Bulk partial update example - set email_provider of every user as google, if it's email ends with gmail.com:
# Request
PATCH /users/?email__endswith=gmail.com HTTP/1.1
Accept: application/json
X-BULK-OPERATION: true
{"email_provider": "google"}
# Response
HTTP/1.1 204 NO CONTENT
Content-Type: application/json; charset=UTF-8
Settings
DRF-extensions follows Django Rest Framework approach in settings implementation.
In Django Rest Framework you specify custom settings by changing REST_FRAMEWORK variable in settings file:
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.YAMLRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.YAMLParser',
)
}
In DRF-extensions there is a magic variable too called REST_FRAMEWORK_EXTENSIONS:
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_CACHE_RESPONSE_TIMEOUT': 60 * 15
}
Accessing settings
If you need to access the values of DRF-extensions API settings in your project, you should use the extensions_api_settings object. For example:
from rest_framework_extensions.settings import extensions_api_settings
print extensions_api_settings.DEFAULT_CACHE_RESPONSE_TIMEOUT
Release notes
You can read about versioning, deprecation policy and upgrading from
Django REST framework documentation.
0.8.0
- Added support for Django 4.2 to 5.2
- DRF 3.15 support added
- Python 3.10 to 3.12 support added
- Removed deprecated libs
- Added handling for ValidationError while filtering queryset in NestedViewSetMixin
- Modified lookupvalue to use lookupurl_kwarg aswell
- Migrated CI to Github actions
0.7.1
- Added support for Django 3.2
- Dropped drf 3.11
0.7.0
- Added support for Django 3.1
- Dropped support below Django 2.2
- Added support for DRF 3.12
- fix(PartialUpdateSerializerMixin): support nesting on same instance
0.6.0
Jan 27, 2020
- Added support for Django 3.0 (#276)
- Dropped support for Django 2.0
- Added support for DRF 3.10 and 3.11 (#261, #279)
- Added support for Python 3.8 (#282)
- Added paginate decorator (#266)
- Added limit/offset and cursor pagination to PaginationKeyBit (#204)
0.5.0
May 10, 2019
- Dropped python 2.7 and 3.4
- Fix possible header mutation issue
- Added ability to use a specific cache timeouts for
CacheResponseMixin
- Test against Django 2.1, DRF 3.9 and django-filter 2.0.0
- Dropped support of older DRF version lower than 3.9
- Django 2.2 support added
0.4.0
Sep 5, 2018
- Added support for django 1.11 and 2.0
- Dropped support for django versions lower then 1.11
- Nested routes with over 2 levels now respect
lookup_value_regex
- Added support for DRF 3.8
- Dropped support of older DRF version lower then 3.8
- Cache only the renered response instead of rendering whole response object
- The etag functionalties are not enabled by default, have to enable it manually
0.3.2
Jan 4, 2017
- Added
rest_framework_extensions.exceptions.PreconditionRequiredException as subclass of rest_framework.exceptions.APIException
- Added
@api_etag decorator function and APIETAGProcessor that uses semantic ETags per API resource, decoupled from views, such that it can be used in optimistic concurrency control
- Added new default key bits
RetrieveModelKeyBit and ListModelKeyBit for computing the semantic fingerprint of a django model instance
- Added
APIETAGMixin to be used in DRF viewsets and views
- Added new settings for default implementation of the API ETag functions:
DEFAULT_API_OBJECT_ETAG_FUNC, DEFAULT_API_LIST_ETAG_FUNC
- Added test application for functional tests and demo as
tests_app/tests/functional/concurrency/conditional_request
- Added unit tests for the
@api_etag decorator
- DRF 3.5.x, Django pre-1.10 compatibility of the key bit construction
- (Test-)Code cleanup
0.3.1
Sep 29, 2016
- Fix
schema_urls ExtendedDefaultRouter compatibility issue introduced by DRF 3.4.0
- Removed deprecated @action() and @link() decorators
- DRF 3.4.x compatibility
- Django 1.9 and 1.10 compatibility
0.2.8
Sep 21, 2015
- Fixed
ListSqlQueryKeyBit and RetrieveSqlQueryKeyBit problems with EmptyResultSet exception (pull).
- All items are now by default in ArgsKeyBit, KwargsKeyBit and QueryParamsKeyBit
- Respect parent lookup regex value for Nested routes (issue).
0.2.7
Feb 2, 2015
- DRF 3.x compatibility
- DetailSerializerMixin is now compatible with DRF 3.0
- Added ArgsKeyBit
- Added KwargsKeyBit
- Fixed PartialUpdateSerializerMixin compatibility issue with DRF 3.x
- Added cache_errors attribute for switching caching for error responses
- Added ability to specify usage of all items for RequestMetaKeyBit, HeadersKeyBit
and QueryParamsKeyBit providing
params='*'
- Collection level controllers is in pending deprecation
- Controller endpoint name is in pending deprecation
0.2.6
Sep 9, 2014
- Usage of django.core.cache.caches for
django >= 1.7
- Documented ETag usage with GZipMiddleware
- Fixed
ListSqlQueryKeyBit and RetrieveSqlQueryKeyBit problems
with EmptyResultSet.
- Fixed cache response compatibility issue
with DRF 2.4.x
0.2.5
July 9, 2014
0.2.4
July 7, 2014
- Added tests for Django REST Framework 2.3.14
- Added Bulk operations
- Fixed extended routers compatibility issue with default controller decorators
- Documented pluggable router mixins
- Added nested routes
0.2.3
Apr. 25, 2014
- Added PartialUpdateSerializerMixin
- Added Key constructor params
- Documented dynamically constructor's bits list altering
- Added ability to use a specific cache for
@cache_response decorator
0.2.2
Mar. 23, 2014
- Added PaginateByMaxMixin
- Added ExtenedDjangoObjectPermissions
- Added tests for django 1.7
0.2.1
Feb. 1, 2014
- Rewritten tests to nose and tox
- New tests directory structure
- Rewritten HTTP documentation requests examples into more raw manner
- Added trailing_slash on extended routers for Django Rest Framework versions
>=2.3.6 (which supports this feature)
- Added caching
- Added key constructor
- Added conditional requests with Etag calculation
- Added Cache/ETAG mixins
- Added CacheResponseMixin
- Added ETAGMixin
- Documented ResourceUriField
- Documented settings customization
0.2
Nov. 5, 2013
- Moved docs from readme to github pages
- Docs generation with Backdoc