pax_global_header 0000666 0000000 0000000 00000000064 15073551743 0014524 g ustar 00root root 0000000 0000000 52 comment=c1877b7cf5371f5399fbf80f5e9e64bd119b9917
beetbox-beets-c1877b7/ 0000775 0000000 0000000 00000000000 15073551743 0014620 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/.git-blame-ignore-revs 0000664 0000000 0000000 00000004535 15073551743 0020727 0 ustar 00root root 0000000 0000000 # 2014
# flake8-cleanliness in missing
e21c04e9125a28ae0452374acf03d93315eb4381
# 2016
# Removed unicode_literals from library, logging and mediafile
43572f50b0eb3522239d94149d91223e67d9a009
# Removed unicode_literals from plugins
53d2c8d9db87be4d4750ad879bf46176537be73f
# reformat flake8 errors
1db46dfeb6607c164afb247d8da82443677795c1
# 2021
# pyupgrade root
e26276658052947e9464d9726b703335304c7c13
# pyupgrade beets dir
6d1316f463cb7c9390f85bf35b220e250a35004a
# pyupgrade autotag dir
f8b8938fd8bbe91898d0982552bc75d35703d3ef
# pyupgrade dbcore dir
d288f872903c79a7ee7c5a7c9cc690809441196e
# pyupgrade ui directory
432fa557258d9ff01e23ed750f9a86a96239599e
# pyupgrade util dir
af102c3e2f1c7a49e99839e2825906fe01780eec
# fix unused import and flake8
910354a6c617ed5aa643cff666205b43e1557373
# pyupgrade beetsplug and tests
1ec87a3bdd737abe46c6e614051bf9e314db4619
# 2022
# Reformat flake8 config comments
abc3dfbf429b179fac25bd1dff72d577cd4d04c7
# 2023
# Apply formatting tools to all files
a6e5201ff3fad4c69bf24d17bace2ef744b9f51b
# 2024
# Reformat the codebase
85a17ee5039628a6f3cdcb7a03d7d1bd530fbe89
# Fix lint issues
f36bc497c8c8f89004f3f6879908d3f0b25123e1
# Remove some lint exclusions and fix the issues
5f78d1b82b2292d5ce0c99623ba0ec444b80d24c
# 2025
# Fix formatting
c490ac5810b70f3cf5fd8649669838e8fdb19f4d
# Importer restructure
9147577b2b19f43ca827e9650261a86fb0450cef
# Copy paste query, types from library to dbcore
1a045c91668c771686f4c871c84f1680af2e944b
# Library restructure (split library.py into multiple modules)
0ad4e19d4f870db757373f44d12ff3be2441363a
# Docs: fix linting issues
769dcdc88a1263638ae25944ba6b2be3e8933666
# Reformat all docs using docstrfmt
ab5acaabb3cd24c482adb7fa4800c89fd6a2f08d
# Replace format calls with f-strings
4a361bd501e85de12c91c2474c423559ca672852
# Replace percent formatting
9352a79e4108bd67f7e40b1e944c01e0a7353272
# Replace string concatenation (' + ')
1c16b2b3087e9c3635d68d41c9541c4319d0bdbe
# Do not use backslashes to deal with long strings
2fccf64efe82851861e195b521b14680b480a42a
# Do not use explicit indices for logging args when not needed
d93ddf8dd43e4f9ed072a03829e287c78d2570a2
# Moved dev docs
07549ed896d9649562d40b75cd30702e6fa6e975
# Moved plugin docs Further Reading chapter
33f1a5d0bef8ca08be79ee7a0d02a018d502680d
# Moved art.py utility module from beets into beetsplug
28aee0fde463f1e18dfdba1994e2bdb80833722f
beetbox-beets-c1877b7/.github/ 0000775 0000000 0000000 00000000000 15073551743 0016160 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/.github/CODEOWNERS 0000664 0000000 0000000 00000000202 15073551743 0017545 0 ustar 00root root 0000000 0000000 # assign the entire repo to the maintainers team
* @beetbox/maintainers
# Specific ownerships:
/beets/metadata_plugins.py @semohr beetbox-beets-c1877b7/.github/ISSUE_TEMPLATE/ 0000775 0000000 0000000 00000000000 15073551743 0020343 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/.github/ISSUE_TEMPLATE/bug-report.md 0000664 0000000 0000000 00000001447 15073551743 0022761 0 ustar 00root root 0000000 0000000 ---
name: "\U0001F41B Bug report"
about: Report a problem with beets
---
### Problem
Running this command in verbose (`-vv`) mode:
```sh
$ beet -vv (... paste here ...)
```
Led to this problem:
```
(paste here)
```
Here's a link to the music files that trigger the bug (if relevant):
### Setup
* OS:
* Python version:
* beets version:
* Turning off plugins made problem go away (yes/no):
My configuration (output of `beet config`) is:
```yaml
(paste here)
```
beetbox-beets-c1877b7/.github/ISSUE_TEMPLATE/config.yml 0000664 0000000 0000000 00000000534 15073551743 0022335 0 ustar 00root root 0000000 0000000 blank_issues_enabled: false
contact_links:
- name: đź’ˇ Have an idea for a new feature?
url: https://github.com/beetbox/beets/discussions
about: Create a new idea discussion!
- name: 🙇 Need help with beets?
url: https://github.com/beetbox/beets/discussions
about: Create a new help discussion if it hasn't been asked before!
beetbox-beets-c1877b7/.github/ISSUE_TEMPLATE/feature-request.md 0000664 0000000 0000000 00000001203 15073551743 0024002 0 ustar 00root root 0000000 0000000 ---
name: "\U0001F680 Feature request"
about: "Formalize a feature request from GitHub Discussions"
---
### Proposed solution
### Objective
#### Goals
#### Non-goals
#### Anti-goals
beetbox-beets-c1877b7/.github/problem-matchers/ 0000775 0000000 0000000 00000000000 15073551743 0021424 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/.github/problem-matchers/sphinx-build.json 0000664 0000000 0000000 00000000427 15073551743 0024730 0 ustar 00root root 0000000 0000000 {
"problemMatcher": [
{
"owner": "sphinx-build",
"severity": "error",
"pattern": [
{
"regexp": "^(/[^:]+):((\\d+):)?(\\sWARNING:)?\\s*(.+)$",
"file": 1,
"line": 3,
"message": 5
}
]
}
]
}
beetbox-beets-c1877b7/.github/problem-matchers/sphinx-lint.json 0000664 0000000 0000000 00000000453 15073551743 0024576 0 ustar 00root root 0000000 0000000 {
"problemMatcher": [
{
"owner": "sphinx-lint",
"severity": "error",
"pattern": [
{
"regexp": "^([^:]+):(\\d+):\\s+(.*)\\s\\(([a-z-]+)\\)$",
"file": 1,
"line": 2,
"message": 3,
"code": 4
}
]
}
]
}
beetbox-beets-c1877b7/.github/pull_request_template.md 0000664 0000000 0000000 00000002076 15073551743 0023126 0 ustar 00root root 0000000 0000000 ## Description
Fixes #X.
(...)
## To Do
- [ ] Documentation. (If you've added a new command-line flag, for example, find the appropriate page under `docs/` to describe it.)
- [ ] Changelog. (Add an entry to `docs/changelog.rst` to the bottom of one of the lists near the top of the document.)
- [ ] Tests. (Very much encouraged but not strictly required.)
beetbox-beets-c1877b7/.github/stale.yml 0000664 0000000 0000000 00000001510 15073551743 0020010 0 ustar 00root root 0000000 0000000 # Configuration for probot-stale - https://github.com/probot/stale
daysUntilClose: 7
staleLabel: stale
issues:
daysUntilStale: 60
onlyLabels:
- needinfo
markComment: >
Is this still relevant? If so, what is blocking it?
Is there anything you can do to help move it forward?
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
pulls:
daysUntilStale: 120
markComment: >
Is this still relevant? If so, what is blocking it?
Is there anything you can do to help move it forward?
This pull request has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
beetbox-beets-c1877b7/.github/workflows/ 0000775 0000000 0000000 00000000000 15073551743 0020215 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/.github/workflows/changelog_reminder.yaml 0000664 0000000 0000000 00000001744 15073551743 0024723 0 ustar 00root root 0000000 0000000 name: Verify changelog updated
on:
pull_request_target:
types:
- opened
- ready_for_review
jobs:
check_changes:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Get all updated Python files
id: changed-python-files
uses: tj-actions/changed-files@v46
with:
files: |
**.py
- name: Check for the changelog update
id: changelog-update
uses: tj-actions/changed-files@v46
with:
files: docs/changelog.rst
- name: Comment under the PR with a reminder
if: steps.changed-python-files.outputs.any_changed == 'true' && steps.changelog-update.outputs.any_changed == 'false'
uses: thollander/actions-comment-pull-request@v2
with:
message: 'Thank you for the PR! The changelog has not been updated, so here is a friendly reminder to check if you need to add an entry.'
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
beetbox-beets-c1877b7/.github/workflows/ci.yaml 0000664 0000000 0000000 00000006417 15073551743 0021504 0 ustar 00root root 0000000 0000000 name: Test
on:
pull_request:
push:
branches:
- master
concurrency:
# Cancel previous workflow run when a new commit is pushed to a feature branch
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
env:
PY_COLORS: 1
jobs:
test:
name: Run tests
strategy:
fail-fast: false
matrix:
platform: [ubuntu-latest, windows-latest]
python-version: ["3.9", "3.10", "3.11", "3.12"]
runs-on: ${{ matrix.platform }}
env:
IS_MAIN_PYTHON: ${{ matrix.python-version == '3.9' && matrix.platform == 'ubuntu-latest' }}
steps:
- uses: actions/checkout@v4
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- name: Setup Python with poetry caching
# poetry cache requires poetry to already be installed, weirdly
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: poetry
- name: Install PyGobject and release script dependencies on Ubuntu
if: matrix.platform == 'ubuntu-latest'
run: |
sudo apt update
sudo apt install --yes --no-install-recommends ffmpeg gobject-introspection gstreamer1.0-plugins-base python3-gst-1.0 libcairo2-dev libgirepository-2.0-dev pandoc imagemagick
- name: Get changed lyrics files
id: lyrics-update
uses: tj-actions/changed-files@v46
with:
files: |
beetsplug/lyrics.py
test/plugins/test_lyrics.py
- name: Add pytest annotator
uses: liskin/gh-problem-matcher-wrap@v3
with:
linters: pytest
action: add
- if: ${{ env.IS_MAIN_PYTHON != 'true' }}
name: Test without coverage
run: |
poetry install --without=lint --extras=autobpm --extras=lyrics --extras=replaygain --extras=reflink --extras=fetchart --extras=chroma --extras=sonosupdate
poe test
- if: ${{ env.IS_MAIN_PYTHON == 'true' }}
name: Test with coverage
env:
LYRICS_UPDATED: ${{ steps.lyrics-update.outputs.any_changed }}
run: |
poetry install --extras=autobpm --extras=lyrics --extras=docs --extras=replaygain --extras=reflink --extras=fetchart --extras=chroma --extras=sonosupdate
poe docs
poe test-with-coverage
- if: ${{ !cancelled() }}
name: Upload test results to Codecov
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- if: ${{ env.IS_MAIN_PYTHON == 'true' }}
name: Store the coverage report
uses: actions/upload-artifact@v4
with:
name: coverage-report
path: .reports/coverage.xml
upload-coverage:
name: Upload coverage report
needs: test
runs-on: ubuntu-latest
permissions:
id-token: write
steps:
- uses: actions/checkout@v4
- name: Get the coverage report
uses: actions/download-artifact@v4
with:
name: coverage-report
- name: Upload code coverage
uses: codecov/codecov-action@v5
with:
files: ./coverage.xml
use_oidc: ${{ !(github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork) }}
beetbox-beets-c1877b7/.github/workflows/integration_test.yaml 0000664 0000000 0000000 00000002476 15073551743 0024474 0 ustar 00root root 0000000 0000000 name: integration tests
on:
workflow_dispatch:
schedule:
- cron: "0 0 * * SUN" # run every Sunday at midnight
jobs:
test_integration:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- uses: actions/setup-python@v5
with:
python-version: 3.9
cache: poetry
- name: Install dependencies
run: poetry install
- name: Test
env:
INTEGRATION_TEST: 1
run: poe test
- name: Check external links in docs
run: poe check-docs-links
- name: Notify on failure
if: ${{ failure() }}
env:
ZULIP_BOT_CREDENTIALS: ${{ secrets.ZULIP_BOT_CREDENTIALS }}
run: |
if [ -z "${ZULIP_BOT_CREDENTIALS}" ]; then
echo "Skipping notify, ZULIP_BOT_CREDENTIALS is unset"
exit 0
fi
curl -X POST https://beets.zulipchat.com/api/v1/messages \
-u "${ZULIP_BOT_CREDENTIALS}" \
-d "type=stream" \
-d "to=github" \
-d "subject=${GITHUB_WORKFLOW} - $(date -u +%Y-%m-%d)" \
-d "content=[${GITHUB_WORKFLOW}#${GITHUB_RUN_NUMBER}](${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}) failed."
beetbox-beets-c1877b7/.github/workflows/lint.yml 0000664 0000000 0000000 00000010577 15073551743 0021720 0 ustar 00root root 0000000 0000000 name: Lint check
run-name: Lint code
on:
pull_request:
push:
branches:
- master
concurrency:
# Cancel previous workflow run when a new commit is pushed to a feature branch
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
env:
PYTHON_VERSION: 3.9
jobs:
changed-files:
runs-on: ubuntu-latest
name: Get changed files
outputs:
any_docs_changed: ${{ steps.changed-doc-files.outputs.any_changed }}
any_python_changed: ${{ steps.raw-changed-python-files.outputs.any_changed }}
changed_doc_files: ${{ steps.changed-doc-files.outputs.all_changed_files }}
changed_python_files: ${{ steps.changed-python-files.outputs.all_changed_files }}
steps:
- uses: actions/checkout@v4
- name: Get changed docs files
id: changed-doc-files
uses: tj-actions/changed-files@v46
with:
files: |
docs/**
- name: Get changed python files
id: raw-changed-python-files
uses: tj-actions/changed-files@v46
with:
files: |
**.py
poetry.lock
- name: Check changed python files
id: changed-python-files
env:
CHANGED_PYTHON_FILES: ${{ steps.raw-changed-python-files.outputs.all_changed_files }}
run: |
if [[ " $CHANGED_PYTHON_FILES " == *" poetry.lock "* ]]; then
# if poetry.lock is changed, we need to check everything
CHANGED_PYTHON_FILES="."
fi
echo "all_changed_files=$CHANGED_PYTHON_FILES" >> "$GITHUB_OUTPUT"
format:
if: needs.changed-files.outputs.any_python_changed == 'true'
runs-on: ubuntu-latest
name: Check formatting
needs: changed-files
steps:
- uses: actions/checkout@v4
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: poetry
- name: Install dependencies
run: poetry install --only=lint
- name: Check code formatting
# the job output will contain colored diffs with what needs adjusting
run: poe check-format
lint:
if: needs.changed-files.outputs.any_python_changed == 'true'
runs-on: ubuntu-latest
name: Check linting
needs: changed-files
steps:
- uses: actions/checkout@v4
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: poetry
- name: Install dependencies
run: poetry install --only=lint
- name: Lint code
run: poe lint --output-format=github ${{ needs.changed-files.outputs.changed_python_files }}
mypy:
if: needs.changed-files.outputs.any_python_changed == 'true'
runs-on: ubuntu-latest
name: Check types with mypy
needs: changed-files
steps:
- uses: actions/checkout@v4
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: poetry
- name: Install dependencies
run: poetry install --only=typing
- name: Type check code
uses: liskin/gh-problem-matcher-wrap@v3
with:
linters: mypy
run: poe check-types --show-column-numbers --no-error-summary .
docs:
if: needs.changed-files.outputs.any_docs_changed == 'true'
runs-on: ubuntu-latest
name: Check docs
needs: changed-files
steps:
- uses: actions/checkout@v4
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: poetry
- name: Install dependencies
run: poetry install --extras=docs
- name: Add Sphinx problem matchers
run: |
echo "::add-matcher::.github/problem-matchers/sphinx-build.json"
echo "::add-matcher::.github/problem-matchers/sphinx-lint.json"
- name: Check docs formatting
run: poe format-docs --check
- name: Lint docs
run: poe lint-docs
- name: Build docs
run: poe docs -- -e 'SPHINXOPTS=--fail-on-warning --keep-going'
beetbox-beets-c1877b7/.github/workflows/make_release.yaml 0000664 0000000 0000000 00000007251 15073551743 0023523 0 ustar 00root root 0000000 0000000 name: Make a Beets Release
on:
workflow_dispatch:
inputs:
version:
description: 'Version of the new release, just as a number with no prepended "v"'
required: true
env:
PYTHON_VERSION: 3.9
NEW_VERSION: ${{ inputs.version }}
NEW_TAG: v${{ inputs.version }}
jobs:
increment-version:
name: Bump version, commit and create tag
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: poetry
- name: Install dependencies
run: poetry install --with=release --extras=docs
- name: Bump project version
run: poe bump "${{ env.NEW_VERSION }}"
- uses: EndBug/add-and-commit@v9
id: commit_and_tag
name: Commit the changes and create tag
with:
message: "Increment version to ${{ env.NEW_VERSION }}"
tag: "${{ env.NEW_TAG }} --force"
build:
name: Get changelog and build the distribution package
runs-on: ubuntu-latest
needs: increment-version
outputs:
changelog: ${{ steps.generate_changelog.outputs.changelog }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ env.NEW_TAG }}
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: poetry
- name: Install dependencies
run: poetry install --with=release --extras=docs
- name: Install pandoc
run: sudo apt update && sudo apt install pandoc -y
- name: Obtain the changelog
id: generate_changelog
run: |
poe docs
{
echo 'changelog<> "$GITHUB_OUTPUT"
- name: Build a binary wheel and a source tarball
run: poe build
- name: Store the distribution packages
uses: actions/upload-artifact@v4
with:
name: python-package-distributions
path: dist/
publish-to-pypi:
name: Publish distribution 📦 to PyPI
runs-on: ubuntu-latest
needs: build
environment:
name: pypi
url: https://pypi.org/p/beets
permissions:
id-token: write
steps:
- name: Download all the dists
uses: actions/download-artifact@v4
with:
name: python-package-distributions
path: dist/
- name: Publish distribution 📦 to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
make-github-release:
name: Create GitHub release
runs-on: ubuntu-latest
needs: [build, publish-to-pypi]
env:
CHANGELOG: ${{ needs.build.outputs.changelog }}
steps:
- name: Download all the dists
uses: actions/download-artifact@v4
with:
name: python-package-distributions
path: dist/
- name: Create a GitHub release
id: make_release
uses: ncipollo/release-action@v1
with:
tag: ${{ env.NEW_TAG }}
name: Release ${{ env.NEW_TAG }}
body: ${{ env.CHANGELOG }}
artifacts: dist/*
- name: Send release toot to Fosstodon
uses: cbrgm/mastodon-github-action@v2
continue-on-error: true
with:
access-token: ${{ secrets.MASTODON_ACCESS_TOKEN }}
url: ${{ secrets.MASTODON_URL }}
message: "Version ${{ env.NEW_TAG }} of beets has been released! Check out all of the new changes at ${{ steps.make_release.outputs.html_url }}"
beetbox-beets-c1877b7/.gitignore 0000664 0000000 0000000 00000002206 15073551743 0016610 0 ustar 00root root 0000000 0000000 # general hidden files/directories
.DS_Store
.idea
# file patterns
*~
# Project Specific patterns
man
# The rest is from https://www.gitignore.io/api/python
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
coverage.xml
*,cover
.hypothesis/
.reports
# Flask stuff:
instance/
.webassets-cache
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# pyenv
.python-version
# dotenv
.env
# virtualenv
env/
venv/
.venv/
ENV/
# Spyder project settings
.spyderproject
# Rope project settings
.ropeproject
# PyDev and Eclipse project settings
/.project
/.pydevproject
/.settings
.vscode
# pyright
pyrightconfig.json
beetbox-beets-c1877b7/.pre-commit-config.yaml 0000664 0000000 0000000 00000000707 15073551743 0021105 0 ustar 00root root 0000000 0000000 # See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: local
hooks:
- id: format
name: Format Python files
entry: poe format
language: system
files: '.*.py'
pass_filenames: true
- id: format-docs
name: Format docs
entry: poe format-docs
language: system
files: '.*.rst'
pass_filenames: true
beetbox-beets-c1877b7/.readthedocs.yaml 0000664 0000000 0000000 00000000306 15073551743 0020046 0 ustar 00root root 0000000 0000000 version: 2
build:
os: ubuntu-22.04
tools:
python: "3.11"
sphinx:
configuration: docs/conf.py
python:
install:
- method: pip
path: .
extra_requirements:
- docs
beetbox-beets-c1877b7/CODE_OF_CONDUCT.rst 0000664 0000000 0000000 00000012322 15073551743 0017627 0 ustar 00root root 0000000 0000000 Contributor Covenant Code of Conduct
====================================
Our Pledge
----------
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
Our Standards
-------------
Examples of behavior that contributes to a positive environment for our
community include:
- Demonstrating empathy and kindness toward other people
- Being respectful of differing opinions, viewpoints, and experiences
- Giving and gracefully accepting constructive feedback
- Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
- Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
- The use of sexualized language or imagery, and sexual attention or advances of
any kind
- Trolling, insulting or derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or email address,
without their explicit permission
- Other conduct which could reasonably be considered inappropriate in a
professional setting
Enforcement Responsibilities
----------------------------
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
Scope
-----
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
Enforcement
-----------
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at here on Github.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
Enforcement Guidelines
----------------------
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
1. Correction
~~~~~~~~~~~~~
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
2. Warning
~~~~~~~~~~
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
3. Temporary Ban
~~~~~~~~~~~~~~~~
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
4. Permanent Ban
~~~~~~~~~~~~~~~~
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
Attribution
-----------
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available `here
`_.
Community Impact Guidelines were inspired by Mozilla's code of conduct
enforcement ladder.
For answers to common questions about this code of conduct, see the `FAQ
`_. Translations are available at
`translations `_.
beetbox-beets-c1877b7/CONTRIBUTING.rst 0000664 0000000 0000000 00000040210 15073551743 0017256 0 ustar 00root root 0000000 0000000 Contributing
============
.. contents::
:depth: 3
Thank you!
----------
First off, thank you for considering contributing to beets! It’s people like you
that make beets continue to succeed.
These guidelines describe how you can help most effectively. By following these
guidelines, you can make life easier for the development team as it indicates
you respect the maintainers’ time; in return, the maintainers will reciprocate
by helping to address your issue, review changes, and finalize pull requests.
Types of Contributions
----------------------
We love to get contributions from our community—you! There are many ways to
contribute, whether you’re a programmer or not.
The first thing to do, regardless of how you'd like to contribute to the
project, is to check out our :doc:`Code of Conduct ` and to
keep that in mind while interacting with other contributors and users.
Non-Programming
~~~~~~~~~~~~~~~
- Promote beets! Help get the word out by telling your friends, writing a blog
post, or discussing it on a forum you frequent.
- Improve the documentation_. It’s incredibly easy to contribute here: just find
a page you want to modify and hit the “Edit on GitHub” button in the
upper-right. You can automatically send us a pull request for your changes.
- GUI design. For the time being, beets is a command-line-only affair. But
that’s mostly because we don’t have any great ideas for what a good GUI should
look like. If you have those great ideas, please get in touch.
- Benchmarks. We’d like to have a consistent way of measuring speed improvements
in beets’ tagger and other functionality as well as a way of comparing beets’
performance to other tools. You can help by compiling a library of
freely-licensed music files (preferably with incorrect metadata) for testing
and measurement.
- Think you have a nice config or cool use-case for beets? We’d love to hear
about it! Submit a post to our `discussion board
`__
under the “Show and Tell” category for a chance to get featured in `the docs
`__.
- Consider helping out fellow users by by `responding to support requests
`__ .
Programming
~~~~~~~~~~~
- As a programmer (even if you’re just a beginner!), you have a ton of
opportunities to get your feet wet with beets.
- For developing plugins, or hacking away at beets, there’s some good
information in the `“For Developers” section of the docs
`__.
.. _development-tools:
Development Tools
+++++++++++++++++
In order to develop beets, you will need a few tools installed:
- poetry_ for packaging, virtual environment and dependency management
- poethepoet_ to run tasks, such as linting, formatting, testing
Python community recommends using pipx_ to install stand-alone command-line
applications such as above. pipx_ installs each application in an isolated
virtual environment, where its dependencies will not interfere with your system
and other CLI tools.
If you do not have pipx_ installed in your system, follow `pipx installation
instructions `__ or
.. code-block:: sh
$ python3 -m pip install --user pipx
Install poetry_ and poethepoet_ using pipx_:
::
$ pipx install poetry poethepoet
.. admonition:: Check ``tool.pipx-install`` section in ``pyproject.toml`` to see supported versions
.. code-block:: toml
[tool.pipx-install]
poethepoet = ">=0.26"
poetry = "<2"
.. _getting-the-source:
Getting the Source
++++++++++++++++++
The easiest way to get started with the latest beets source is to clone the
repository and install ``beets`` in a local virtual environment using poetry_.
This can be done with:
.. code-block:: bash
$ git clone https://github.com/beetbox/beets.git
$ cd beets
$ poetry install
This will install ``beets`` and all development dependencies into its own
virtual environment in your ``$POETRY_CACHE_DIR``. See ``poetry install --help``
for installation options, including installing ``extra`` dependencies for
plugins.
In order to run something within this virtual environment, start the command
with ``poetry run`` to them, for example ``poetry run pytest``.
On the other hand, it may get tedious to type ``poetry run`` before every
command. Instead, you can activate the virtual environment in your shell with:
::
$ poetry shell
You should see ``(beets-py3.9)`` prefix in your shell prompt. Now you can run
commands directly, for example:
::
$ (beets-py3.9) pytest
Additionally, poethepoet_ task runner assists us with the most common
operations. Formatting, linting, testing are defined as ``poe`` tasks in
pyproject.toml_. Run:
::
$ poe
to see all available tasks. They can be used like this, for example
.. code-block:: sh
$ poe lint # check code style
$ poe format # fix formatting issues
$ poe test # run tests
# ... fix failing tests
$ poe test --lf # re-run failing tests (note the additional pytest option)
$ poe check-types --pretty # check types with an extra option for mypy
Code Contribution Ideas
+++++++++++++++++++++++
- We maintain a set of `issues marked as “good first issue”
`__. These are
issues that would serve as a good introduction to the codebase. Claim one and
start exploring!
- Like testing? Our `test coverage `__
is somewhat low. You can help out by finding low-coverage modules or checking
out other `testing-related issues
`__.
- There are several ways to improve the tests in general (see :ref:`testing` and
some places to think about performance optimization (see `Optimization
`__).
- Not all of our code is up to our coding conventions. In particular, the
`library API documentation
`__ are currently
quite sparse. You can help by adding to the docstrings in the code and to the
documentation pages themselves. beets follows `PEP-257
`__ for docstrings and in some
places, we also sometimes use `ReST autodoc syntax for Sphinx
`__ to,
for example, refer to a class name.
Your First Contribution
-----------------------
If this is your first time contributing to an open source project, welcome! If
you are confused at all about how to contribute or what to contribute, take a
look at `this great tutorial `__, or stop by our
`discussion board`_ if you have any questions.
We maintain a list of issues we reserved for those new to open source labeled
`first timers only`_. Since the goal of these issues is to get users comfortable
with contributing to an open source project, please do not hesitate to ask any
questions.
.. _first timers only: https://github.com/beetbox/beets/issues?q=is%3Aopen+is%3Aissue+label%3A%22first+timers+only%22
How to Submit Your Work
-----------------------
Do you have a great bug fix, new feature, or documentation expansion you’d like
to contribute? Follow these steps to create a GitHub pull request and your code
will ship in no time.
1. Fork the beets repository and clone it (see above) to create a workspace.
2. Install pre-commit, following the instructions `here
`_.
3. Make your changes.
4. Add tests. If you’ve fixed a bug, write a test to ensure that you’ve actually
fixed it. If there’s a new feature or plugin, please contribute tests that
show that your code does what it says.
5. Add documentation. If you’ve added a new command flag, for example, find the
appropriate page under ``docs/`` where it needs to be listed.
6. Add a changelog entry to ``docs/changelog.rst`` near the top of the document.
7. Run the tests and style checker, see :ref:`testing`.
8. Push to your fork and open a pull request! We’ll be in touch shortly.
9. If you add commits to a pull request, please add a comment or re-request a
review after you push them since GitHub doesn’t automatically notify us when
commits are added.
Remember, code contributions have four parts: the code, the tests, the
documentation, and the changelog entry. Thank you for contributing!
.. admonition:: Ownership
If you are the owner of a plugin, please consider reviewing pull requests
that affect your plugin. If you are not the owner of a plugin, please
consider becoming one! You can do so by adding an entry to
``.github/CODEOWNERS``. This way, you will automatically receive a review
request for pull requests that adjust the code that you own. If you have any
questions, please ask on our `discussion board`_.
The Code
--------
The documentation has a section on the `library API
`__ that serves as an
introduction to beets’ design.
Coding Conventions
------------------
General
~~~~~~~
There are a few coding conventions we use in beets:
- Whenever you access the library database, do so through the provided Library
methods or via a Transaction object. Never call ``lib.conn.*`` directly. For
example, do this:
.. code-block:: python
with g.lib.transaction() as tx:
rows = tx.query("SELECT DISTINCT {field} FROM {model._table} ORDER BY {sort_field}")
To fetch Item objects from the database, use lib.items(…) and supply a query
as an argument. Resist the urge to write raw SQL for your query. If you must
use lower-level queries into the database, do this, for example:
.. code-block:: python
with lib.transaction() as tx:
rows = tx.query("SELECT path FROM items WHERE album_id = ?", (album_id,))
Transaction objects help control concurrent access to the database and assist
in debugging conflicting accesses.
- f-strings should be used instead of the ``%`` operator and ``str.format()``
calls.
- Never ``print`` informational messages; use the `logging
`__ module instead. In
particular, we have our own logging shim, so you’ll see ``from beets import
logging`` in most files.
- The loggers use `str.format
`__-style logging
instead of ``%``-style, so you can type ``log.debug("{}", obj)`` to do your
formatting.
- Exception handlers must use ``except A as B:`` instead of ``except A, B:``.
Style
~~~~~
We use `ruff `__ to format and lint the codebase.
Run ``poe check-format`` and ``poe lint`` to check your code for style and
linting errors. Running ``poe format`` will automatically format your code
according to the specifications required by the project.
Similarly, run ``poe format-docs`` and ``poe lint-docs`` to ensure consistent
documentation formatting and check for any issues.
Handling Paths
~~~~~~~~~~~~~~
A great deal of convention deals with the handling of **paths**. Paths are
stored internally—in the database, for instance—as byte strings (i.e., ``bytes``
instead of ``str`` in Python 3). This is because POSIX operating systems’ path
names are only reliably usable as byte strings—operating systems typically
recommend but do not require that filenames use a given encoding, so violations
of any reported encoding are inevitable. On Windows, the strings are always
encoded with UTF-8; on Unix, the encoding is controlled by the filesystem. Here
are some guidelines to follow:
- If you have a Unicode path or you’re not sure whether something is Unicode or
not, pass it through ``bytestring_path`` function in the ``beets.util`` module
to convert it to bytes.
- Pass every path name through the ``syspath`` function (also in ``beets.util``)
before sending it to any *operating system* file operation (``open``, for
example). This is necessary to use long filenames (which, maddeningly, must be
Unicode) on Windows. This allows us to consistently store bytes in the
database but use the native encoding rule on both POSIX and Windows.
- Similarly, the ``displayable_path`` utility function converts bytestring paths
to a Unicode string for displaying to the user. Every time you want to print
out a string to the terminal or log it with the ``logging`` module, feed it
through this function.
Editor Settings
~~~~~~~~~~~~~~~
Personally, I work on beets with vim_. Here are some ``.vimrc`` lines that might
help with PEP 8-compliant Python coding:
::
filetype indent on
autocmd FileType python setlocal shiftwidth=4 tabstop=4 softtabstop=4 expandtab shiftround autoindent
Consider installing `this alternative Python indentation plugin
`__. I also like `neomake
`__ with its flake8 checker.
.. _testing:
Testing
-------
Running the Tests
~~~~~~~~~~~~~~~~~
Use ``poe`` to run tests:
::
$ poe test [pytest options]
You can disable a hand-selected set of "slow" tests by setting the environment
variable ``SKIP_SLOW_TESTS``, for example:
::
$ SKIP_SLOW_TESTS=1 poe test
Coverage
++++++++
The ``test`` command does not include coverage as it slows down testing. In
order to measure it, use the ``test-with-coverage`` task
$ poe test-with-coverage [pytest options]
You are welcome to explore coverage by opening the HTML report in
``.reports/html/index.html``.
Note that for each covered line the report shows **which tests cover it**
(expand the list on the right-hand side of the affected line).
You can find project coverage status on Codecov_.
Red Flags
+++++++++
The pytest-random_ plugin makes it easy to randomize the order of tests. ``poe
test --random`` will occasionally turn up failing tests that reveal ordering
dependencies—which are bad news!
Test Dependencies
+++++++++++++++++
The tests have a few more dependencies than beets itself. (The additional
dependencies consist of testing utilities and dependencies of non-default
plugins exercised by the test suite.) The dependencies are listed under the
``tool.poetry.group.test.dependencies`` section in pyproject.toml_.
Writing Tests
~~~~~~~~~~~~~
Writing tests is done by adding or modifying files in folder test_. Take a look
at `https://github.com/beetbox/beets/blob/master/test/test_template.py#L224`_ to
get a basic view on how tests are written. Since we are currently migrating the
tests from unittest_ to pytest_, new tests should be written using pytest_.
Contributions migrating existing tests are welcome!
External API requests under test should be mocked with requests-mock_, However,
we still want to know whether external APIs are up and that they return expected
responses, therefore we test them weekly with our `integration test`_ suite.
In order to add such a test, mark your test with the ``integration_test`` marker
.. code-block:: python
@pytest.mark.integration_test
def test_external_api_call(): ...
This way, the test will be run only in the integration test suite.
.. _codecov: https://codecov.io/github/beetbox/beets
.. _discussion board: https://github.com/beetbox/beets/discussions
.. _documentation: https://beets.readthedocs.io/en/stable/
.. _https://github.com/beetbox/beets/blob/master/test/test_template.py#l224: https://github.com/beetbox/beets/blob/master/test/test_template.py#L224
.. _integration test: https://github.com/beetbox/beets/actions?query=workflow%3A%22integration+tests%22
.. _pipx: https://pipx.pypa.io/stable
.. _poethepoet: https://poethepoet.natn.io/index.html
.. _poetry: https://python-poetry.org/docs/
.. _pyproject.toml: https://github.com/beetbox/beets/tree/master/pyproject.toml
.. _pytest: https://docs.pytest.org/en/stable/
.. _pytest-random: https://github.com/klrmn/pytest-random
.. _requests-mock: https://requests-mock.readthedocs.io/en/latest/response.html
.. _test: https://github.com/beetbox/beets/tree/master/test
.. _unittest: https://docs.python.org/3/library/unittest.html
.. _vim: https://www.vim.org/
beetbox-beets-c1877b7/LICENSE 0000664 0000000 0000000 00000002070 15073551743 0015624 0 ustar 00root root 0000000 0000000 The MIT License
Copyright (c) 2010-2016 Adrian Sampson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
beetbox-beets-c1877b7/README.rst 0000664 0000000 0000000 00000012272 15073551743 0016313 0 ustar 00root root 0000000 0000000 .. image:: https://img.shields.io/pypi/v/beets.svg
:target: https://pypi.python.org/pypi/beets
.. image:: https://img.shields.io/codecov/c/github/beetbox/beets.svg
:target: https://codecov.io/github/beetbox/beets
.. image:: https://img.shields.io/github/actions/workflow/status/beetbox/beets/ci.yaml
:target: https://github.com/beetbox/beets/actions
.. image:: https://repology.org/badge/tiny-repos/beets.svg
:target: https://repology.org/project/beets/versions
beets
=====
Beets is the media library management system for obsessive music geeks.
The purpose of beets is to get your music collection right once and for all. It
catalogs your collection, automatically improving its metadata as it goes. It
then provides a suite of tools for manipulating and accessing your music.
Here's an example of beets' brainy tag corrector doing its thing:
::
$ beet import ~/music/ladytron
Tagging:
Ladytron - Witching Hour
(Similarity: 98.4%)
* Last One Standing -> The Last One Standing
* Beauty -> Beauty*2
* White Light Generation -> Whitelightgenerator
* All the Way -> All the Way...
Because beets is designed as a library, it can do almost anything you can
imagine for your music collection. Via plugins_, beets becomes a panacea:
- Fetch or calculate all the metadata you could possibly need: `album art`_,
lyrics_, genres_, tempos_, ReplayGain_ levels, or `acoustic fingerprints`_.
- Get metadata from MusicBrainz_, Discogs_, and Beatport_. Or guess metadata
using songs' filenames or their acoustic fingerprints.
- `Transcode audio`_ to any format you like.
- Check your library for `duplicate tracks and albums`_ or for `albums that are
missing tracks`_.
- Clean up crufty tags left behind by other, less-awesome tools.
- Embed and extract album art from files' metadata.
- Browse your music library graphically through a Web browser and play it in any
browser that supports `HTML5 Audio`_.
- Analyze music files' metadata from the command line.
- Listen to your library with a music player that speaks the MPD_ protocol and
works with a staggering variety of interfaces.
If beets doesn't do what you want yet, `writing your own plugin`_ is shockingly
simple if you know a little Python.
.. _acoustic fingerprints: https://beets.readthedocs.org/page/plugins/chroma.html
.. _album art: https://beets.readthedocs.org/page/plugins/fetchart.html
.. _albums that are missing tracks: https://beets.readthedocs.org/page/plugins/missing.html
.. _beatport: https://www.beatport.com
.. _discogs: https://www.discogs.com/
.. _duplicate tracks and albums: https://beets.readthedocs.org/page/plugins/duplicates.html
.. _genres: https://beets.readthedocs.org/page/plugins/lastgenre.html
.. _html5 audio: https://html.spec.whatwg.org/multipage/media.html#the-audio-element
.. _lyrics: https://beets.readthedocs.org/page/plugins/lyrics.html
.. _mpd: https://www.musicpd.org/
.. _musicbrainz: https://musicbrainz.org/
.. _musicbrainz music collection: https://musicbrainz.org/doc/Collections/
.. _plugins: https://beets.readthedocs.org/page/plugins/
.. _replaygain: https://beets.readthedocs.org/page/plugins/replaygain.html
.. _tempos: https://beets.readthedocs.org/page/plugins/acousticbrainz.html
.. _transcode audio: https://beets.readthedocs.org/page/plugins/convert.html
.. _writing your own plugin: https://beets.readthedocs.org/page/dev/plugins.html
Install
-------
You can install beets by typing ``pip install beets`` or directly from Github
(see details here_). Beets has also been packaged in the `software
repositories`_ of several distributions. Check out the `Getting Started`_ guide
for more information.
.. _getting started: https://beets.readthedocs.org/page/guides/main.html
.. _here: https://beets.readthedocs.io/en/latest/faq.html#run-the-latest-source-version-of-beets
.. _software repositories: https://repology.org/project/beets/versions
Contribute
----------
Thank you for considering contributing to ``beets``! Whether you're a programmer
or not, you should be able to find all the info you need at CONTRIBUTING.rst_.
.. _contributing.rst: https://github.com/beetbox/beets/blob/master/CONTRIBUTING.rst
Read More
---------
Learn more about beets at `its Web site`_. Follow `@b33ts`_ on Mastodon for news
and updates.
.. _@b33ts: https://fosstodon.org/@beets
.. _its web site: https://beets.io/
Contact
-------
- Encountered a bug you'd like to report? Check out our `issue tracker`_!
- If your issue hasn't already been reported, please `open a new ticket`_ and
we'll be in touch with you shortly.
- If you'd like to vote on a feature/bug, simply give a :+1: on issues you'd
like to see prioritized over others.
- Need help/support, would like to start a discussion, have an idea for a new
feature, or would just like to introduce yourself to the team? Check out
`GitHub Discussions`_!
.. _github discussions: https://github.com/beetbox/beets/discussions
.. _issue tracker: https://github.com/beetbox/beets/issues
.. _open a new ticket: https://github.com/beetbox/beets/issues/new/choose
Authors
-------
Beets is by `Adrian Sampson`_ with a supporting cast of thousands.
.. _adrian sampson: https://www.cs.cornell.edu/~asampson/
beetbox-beets-c1877b7/README_kr.rst 0000664 0000000 0000000 00000011132 15073551743 0017001 0 ustar 00root root 0000000 0000000 .. image:: https://img.shields.io/pypi/v/beets.svg
:target: https://pypi.python.org/pypi/beets
.. image:: https://img.shields.io/codecov/c/github/beetbox/beets.svg
:target: https://codecov.io/github/beetbox/beets
.. image:: https://travis-ci.org/beetbox/beets.svg?branch=master
:target: https://travis-ci.org/beetbox/beets
beets
=====
Beets는 ę°•ë°•ě 인 음악을 듣는 사람들을 위한 미디어 라이브러리 관리 시스템이다.
Beetsěť ëŞ©ě 은 음악들을 한ë˛ě— 다 받는 ę˛ěť´ë‹¤. 음악들을 ěą´í로그화 í•ęł , ěžëŹ™ěśĽëˇś ë©”í€ ëŤ°ěť´í„°ëĄĽ ę°śě„ í•śë‹¤. ę·¸ë¦¬ęł ěťŚě•…ě— ě ‘ę·Ľí•ęł ěˇ°ěž‘í•
ě ěžëŠ” 도구들을 ě śęłµí•śë‹¤.
다음은 Beetsěť brainy tag correctorę°€ 한 ěťĽěť ě시이다.
::
$ beet import ~/music/ladytron
Tagging:
Ladytron - Witching Hour
(Similarity: 98.4%)
* Last One Standing -> The Last One Standing
* Beauty -> Beauty*2
* White Light Generation -> Whitelightgenerator
* All the Way -> All the Way...
Beets는 라이브러리로 ë””ěžěť¸ ëě—기 때문ě—, ë‹ąě‹ ěť´ ěťŚě•…ë“¤ě— ëŚ€í•´ ěěí•는 ëŞ¨ë“ ę˛ěť„ í• ě ěžë‹¤. plugins_ ěť„ 통해서 ëŞ¨ë“ ę˛ěť„ í•
ě ěžëŠ” ę˛ěť´ë‹¤!
- í•„ěš”í•는 ë©”í€ ëŤ°ěť´í„°ëĄĽ 계산í•ę±°ë‚ íŚ¨ěą í• ë•Ś: `album art`_, lyrics_, genres_, tempos_,
ReplayGain_ levels, or `acoustic fingerprints`_.
- MusicBrainz_, Discogs_,`Beatport`_로부터 ë©”í€ëŤ°ěť´í„°ëĄĽ ę°€ě ¸ě¤ę±°ë‚, ë…¸ëž ě śëŞ©ěť´ë‚ ěťŚí–Ą 특징으로 ë©”í€ëŤ°ěť´í„°ëĄĽ
추측한다
- `Transcode audio`_ ë‹ąě‹ ěť´ 좋아í•는 ě–´ë–¤ íŹ¬ë§·ěśĽëˇśë“ ëł€ę˛˝í•śë‹¤.
- ë‹ąě‹ ěť ëťĽěť´ë¸Śëź¬ë¦¬ě—서 `duplicate tracks and albums`_ ěť´ë‚ `albums that are missing
tracks`_ 를 검사한다.
- 남이 남기거ë‚, 좋지 않은 도구로 남긴 잡다한 íśę·¸ë“¤ěť„ 지운다.
- íŚŚěťĽěť ë©”í€ëŤ°ěť´í„°ě—서 앨범 아트를 ě‚˝ěž…ěť´ë‚ ě¶”ě¶śí•śë‹¤.
- ë‹ąě‹ ěť ěťŚě•…ë“¤ěť„ `HTML5 Audio`_ 를 ě§€ě›í•는 ě–´ë–¤ ë¸ŚëťĽěš°ě €ë“ ěž¬ěťí• ě ěžęł , 웹 ë¸ŚëťĽěš°ě €ě— í‘śě‹ś í• ě ěžë‹¤.
- ëŞ…ë ąě–´ëˇśë¶€í„° 음악 íŚŚěťĽěť ë©”í€ëŤ°ěť´í„°ëĄĽ ë¶„ě„ťí• ě ěžë‹¤.
- MPD_ í”„ëˇśí† ě˝śěť„ 사용í•ě—¬ 음악 플ë 이어로 음악을 들으면, ě—„ě˛ë‚게 다양한 인터íŽěť´ěŠ¤ëˇś 작동한다.
ë§Śě•˝ Beetsě— ë‹ąě‹ ěť´ ě›í•는게 ě•„ě§ ě—†ë‹¤ë©´, ë‹ąě‹ ěť´ pythoněť„ ě•다면 `writing your own plugin`_ _은 ë†€ëťĽěš¸ě •ëŹ„ëˇś
간단í•다.
.. _acoustic fingerprints: https://beets.readthedocs.org/page/plugins/chroma.html
.. _album art: https://beets.readthedocs.org/page/plugins/fetchart.html
.. _albums that are missing tracks: https://beets.readthedocs.org/page/plugins/missing.html
.. _beatport: https://www.beatport.com
.. _discogs: https://www.discogs.com/
.. _duplicate tracks and albums: https://beets.readthedocs.org/page/plugins/duplicates.html
.. _genres: https://beets.readthedocs.org/page/plugins/lastgenre.html
.. _html5 audio: https://html.spec.whatwg.org/multipage/media.html#the-audio-element
.. _lyrics: https://beets.readthedocs.org/page/plugins/lyrics.html
.. _mpd: https://www.musicpd.org/
.. _musicbrainz: https://musicbrainz.org/
.. _musicbrainz music collection: https://musicbrainz.org/doc/Collections/
.. _plugins: https://beets.readthedocs.org/page/plugins/
.. _replaygain: https://beets.readthedocs.org/page/plugins/replaygain.html
.. _tempos: https://beets.readthedocs.org/page/plugins/acousticbrainz.html
.. _transcode audio: https://beets.readthedocs.org/page/plugins/convert.html
.. _writing your own plugin: https://beets.readthedocs.org/page/dev/plugins.html
설ěą
-------
ë‹ąě‹ ěť€ ``pip install beets`` ěť„ 사용해서 Beets를 설ěąí• ě ěžë‹¤. ę·¸ë¦¬ęł `Getting Started`_ 가이드를
í™•ěť¸í• ě ěžë‹¤.
.. _getting started: https://beets.readthedocs.org/page/guides/main.html
컨트리뷰ě…
----------
어떻게 ëŹ„ěš°ë ¤ëŠ”ě§€ ě•Śęł ě‹¶ë‹¤ë©´ Hacking_ 위키íŽěť´ě§€ëĄĽ 확인í•라. ë‹ąě‹ ěť€ docs ě•ě— `For Developers`_ ě—도 관심이 ěžěť„ě
ěžë‹¤.
.. _for developers: https://beets.readthedocs.io/en/stable/dev/
.. _hacking: https://github.com/beetbox/beets/wiki/Hacking
Read More
---------
`its Web site`_ ě—서 Beetsě— ëŚ€í•´ ěˇ°ę¸ ëŤ” 알아볼 ě ěžë‹¤. 트위터ě—서 `@b33ts`_ 를 팔로우í•ë©´ ě 소식을 볼 ě
ěžë‹¤.
.. _@b33ts: https://twitter.com/b33ts/
.. _its web site: https://beets.io/
ě €ěžë“¤
-------
`Adrian Sampson`_ 와 많은 ě‚¬ëžŚë“¤ěť ě§€ě§€ëĄĽ 받아 Beets를 만들ě—다. ëŹ•ęł ě‹¶ë‹¤ë©´ forum_.를 방문í•ë©´ ëśë‹¤.
.. _adrian sampson: https://www.cs.cornell.edu/~asampson/
.. _forum: https://github.com/beetbox/beets/discussions/
beetbox-beets-c1877b7/SECURITY.md 0000664 0000000 0000000 00000000444 15073551743 0016413 0 ustar 00root root 0000000 0000000 # Security Policy
## Supported Versions
We currently support only the latest release of beets.
## Reporting a Vulnerability
To report a security vulnerability, please send email to [our Zulip team][z].
[z]: mailto:email.218c36e48d78cf125c0a6219a6c2a417.show-sender@streams.zulipchat.com
beetbox-beets-c1877b7/beets/ 0000775 0000000 0000000 00000000000 15073551743 0015722 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beets/__init__.py 0000664 0000000 0000000 00000003217 15073551743 0020036 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from sys import stderr
import confuse
from .util import deprecate_imports
__version__ = "2.5.1"
__author__ = "Adrian Sampson "
def __getattr__(name: str):
"""Handle deprecated imports."""
return deprecate_imports(
old_module=__name__,
new_module_by_name={
"art": "beetsplug._utils",
"vfs": "beetsplug._utils",
},
name=name,
version="3.0.0",
)
class IncludeLazyConfig(confuse.LazyConfig):
"""A version of Confuse's LazyConfig that also merges in data from
YAML files specified in an `include` setting.
"""
def read(self, user=True, defaults=True):
super().read(user, defaults)
try:
for view in self["include"]:
self.set_file(view.as_filename())
except confuse.NotFoundError:
pass
except confuse.ConfigReadError as err:
stderr.write(f"configuration `import` failed: {err.reason}")
config = IncludeLazyConfig("beets", __name__)
beetbox-beets-c1877b7/beets/__main__.py 0000664 0000000 0000000 00000001471 15073551743 0020017 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2017, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The __main__ module lets you run the beets CLI interface by typing
`python -m beets`.
"""
import sys
from .ui import main
if __name__ == "__main__":
main(sys.argv[1:])
beetbox-beets-c1877b7/beets/autotag/ 0000775 0000000 0000000 00000000000 15073551743 0017366 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beets/autotag/__init__.py 0000664 0000000 0000000 00000025671 15073551743 0021512 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Facilities for automatically determining files' correct metadata."""
from __future__ import annotations
import warnings
from importlib import import_module
from typing import TYPE_CHECKING, Union
from beets import config, logging
# Parts of external interface.
from beets.util import unique_list
from ..util import deprecate_imports
from .hooks import AlbumInfo, AlbumMatch, TrackInfo, TrackMatch
from .match import Proposal, Recommendation, tag_album, tag_item
if TYPE_CHECKING:
from collections.abc import Mapping, Sequence
from beets.library import Album, Item, LibModel
def __getattr__(name: str):
if name == "current_metadata":
warnings.warn(
(
f"'beets.autotag.{name}' is deprecated and will be removed in"
" 3.0.0. Use 'beets.util.get_most_common_tags' instead."
),
DeprecationWarning,
stacklevel=2,
)
return import_module("beets.util").get_most_common_tags
return deprecate_imports(
__name__, {"Distance": "beets.autotag.distance"}, name, "3.0.0"
)
__all__ = [
"AlbumInfo",
"AlbumMatch",
"Proposal",
"Recommendation",
"TrackInfo",
"TrackMatch",
"apply_album_metadata",
"apply_item_metadata",
"apply_metadata",
"tag_album",
"tag_item",
]
# Global logger.
log = logging.getLogger("beets")
# Metadata fields that are already hardcoded, or where the tag name changes.
SPECIAL_FIELDS = {
"album": (
"va",
"releasegroup_id",
"artist_id",
"artists_ids",
"album_id",
"mediums",
"tracks",
"year",
"month",
"day",
"artist",
"artists",
"artist_credit",
"artists_credit",
"artist_sort",
"artists_sort",
"data_url",
),
"track": (
"track_alt",
"artist_id",
"artists_ids",
"release_track_id",
"medium",
"index",
"medium_index",
"title",
"artist_credit",
"artists_credit",
"artist_sort",
"artists_sort",
"artist",
"artists",
"track_id",
"medium_total",
"data_url",
"length",
),
}
# Additional utilities for the main interface.
def _apply_metadata(
info: Union[AlbumInfo, TrackInfo],
db_obj: Union[Album, Item],
nullable_fields: Sequence[str] = [],
):
"""Set the db_obj's metadata to match the info."""
special_fields = SPECIAL_FIELDS[
"album" if isinstance(info, AlbumInfo) else "track"
]
for field, value in info.items():
# We only overwrite fields that are not already hardcoded.
if field in special_fields:
continue
# Don't overwrite fields with empty values unless the
# field is explicitly allowed to be overwritten.
if value is None and field not in nullable_fields:
continue
db_obj[field] = value
def correct_list_fields(m: LibModel) -> None:
"""Synchronise single and list values for the list fields that we use.
That is, ensure the same value in the single field and the first element
in the list.
For context, the value we set as, say, ``mb_artistid`` is simply ignored:
Under the current :class:`MediaFile` implementation, fields ``albumtype``,
``mb_artistid`` and ``mb_albumartistid`` are mapped to the first element of
``albumtypes``, ``mb_artistids`` and ``mb_albumartistids`` respectively.
This means setting ``mb_artistid`` has no effect. However, beets
functionality still assumes that ``mb_artistid`` is independent and stores
its value in the database. If ``mb_artistid`` != ``mb_artistids[0]``,
``beet write`` command thinks that ``mb_artistid`` is modified and tries to
update the field in the file. Of course nothing happens, so the same diff
is shown every time the command is run.
We can avoid this issue by ensuring that ``mb_artistid`` has the same value
as ``mb_artistids[0]``, and that's what this function does.
Note: :class:`Album` model does not have ``mb_artistids`` and
``mb_albumartistids`` fields therefore we need to check for their presence.
"""
def ensure_first_value(single_field: str, list_field: str) -> None:
"""Ensure the first ``list_field`` item is equal to ``single_field``."""
single_val, list_val = getattr(m, single_field), getattr(m, list_field)
if single_val:
setattr(m, list_field, unique_list([single_val, *list_val]))
elif list_val:
setattr(m, single_field, list_val[0])
ensure_first_value("albumtype", "albumtypes")
if hasattr(m, "mb_artistids"):
ensure_first_value("mb_artistid", "mb_artistids")
if hasattr(m, "mb_albumartistids"):
ensure_first_value("mb_albumartistid", "mb_albumartistids")
def apply_item_metadata(item: Item, track_info: TrackInfo):
"""Set an item's metadata from its matched TrackInfo object."""
item.artist = track_info.artist
item.artists = track_info.artists
item.artist_sort = track_info.artist_sort
item.artists_sort = track_info.artists_sort
item.artist_credit = track_info.artist_credit
item.artists_credit = track_info.artists_credit
item.title = track_info.title
item.mb_trackid = track_info.track_id
item.mb_releasetrackid = track_info.release_track_id
if track_info.artist_id:
item.mb_artistid = track_info.artist_id
if track_info.artists_ids:
item.mb_artistids = track_info.artists_ids
_apply_metadata(track_info, item)
correct_list_fields(item)
# At the moment, the other metadata is left intact (including album
# and track number). Perhaps these should be emptied?
def apply_album_metadata(album_info: AlbumInfo, album: Album):
"""Set the album's metadata to match the AlbumInfo object."""
_apply_metadata(album_info, album)
correct_list_fields(album)
def apply_metadata(album_info: AlbumInfo, mapping: Mapping[Item, TrackInfo]):
"""Set the items' metadata to match an AlbumInfo object using a
mapping from Items to TrackInfo objects.
"""
for item, track_info in mapping.items():
# Artist or artist credit.
if config["artist_credit"]:
item.artist = (
track_info.artist_credit
or track_info.artist
or album_info.artist_credit
or album_info.artist
)
item.artists = (
track_info.artists_credit
or track_info.artists
or album_info.artists_credit
or album_info.artists
)
item.albumartist = album_info.artist_credit or album_info.artist
item.albumartists = album_info.artists_credit or album_info.artists
else:
item.artist = track_info.artist or album_info.artist
item.artists = track_info.artists or album_info.artists
item.albumartist = album_info.artist
item.albumartists = album_info.artists
# Album.
item.album = album_info.album
# Artist sort and credit names.
item.artist_sort = track_info.artist_sort or album_info.artist_sort
item.artists_sort = track_info.artists_sort or album_info.artists_sort
item.artist_credit = (
track_info.artist_credit or album_info.artist_credit
)
item.artists_credit = (
track_info.artists_credit or album_info.artists_credit
)
item.albumartist_sort = album_info.artist_sort
item.albumartists_sort = album_info.artists_sort
item.albumartist_credit = album_info.artist_credit
item.albumartists_credit = album_info.artists_credit
# Release date.
for prefix in "", "original_":
if config["original_date"] and not prefix:
# Ignore specific release date.
continue
for suffix in "year", "month", "day":
key = f"{prefix}{suffix}"
value = getattr(album_info, key) or 0
# If we don't even have a year, apply nothing.
if suffix == "year" and not value:
break
# Otherwise, set the fetched value (or 0 for the month
# and day if not available).
item[key] = value
# If we're using original release date for both fields,
# also set item.year = info.original_year, etc.
if config["original_date"]:
item[suffix] = value
# Title.
item.title = track_info.title
if config["per_disc_numbering"]:
# We want to let the track number be zero, but if the medium index
# is not provided we need to fall back to the overall index.
if track_info.medium_index is not None:
item.track = track_info.medium_index
else:
item.track = track_info.index
item.tracktotal = track_info.medium_total or len(album_info.tracks)
else:
item.track = track_info.index
item.tracktotal = len(album_info.tracks)
# Disc and disc count.
item.disc = track_info.medium
item.disctotal = album_info.mediums
# MusicBrainz IDs.
item.mb_trackid = track_info.track_id
item.mb_releasetrackid = track_info.release_track_id
item.mb_albumid = album_info.album_id
if track_info.artist_id:
item.mb_artistid = track_info.artist_id
else:
item.mb_artistid = album_info.artist_id
if track_info.artists_ids:
item.mb_artistids = track_info.artists_ids
else:
item.mb_artistids = album_info.artists_ids
item.mb_albumartistid = album_info.artist_id
item.mb_albumartistids = album_info.artists_ids
item.mb_releasegroupid = album_info.releasegroup_id
# Compilation flag.
item.comp = album_info.va
# Track alt.
item.track_alt = track_info.track_alt
_apply_metadata(
album_info,
item,
nullable_fields=config["overwrite_null"]["album"].as_str_seq(),
)
_apply_metadata(
track_info,
item,
nullable_fields=config["overwrite_null"]["track"].as_str_seq(),
)
correct_list_fields(item)
beetbox-beets-c1877b7/beets/autotag/distance.py 0000664 0000000 0000000 00000043515 15073551743 0021542 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import datetime
import re
from functools import cache, total_ordering
from typing import TYPE_CHECKING, Any
from jellyfish import levenshtein_distance
from unidecode import unidecode
from beets import config, metadata_plugins
from beets.util import as_string, cached_classproperty, get_most_common_tags
if TYPE_CHECKING:
from collections.abc import Iterator, Sequence
from beets.library import Item
from .hooks import AlbumInfo, TrackInfo
# Candidate distance scoring.
# Artist signals that indicate "various artists". These are used at the
# album level to determine whether a given release is likely a VA
# release and also on the track level to to remove the penalty for
# differing artists.
VA_ARTISTS = ("", "various artists", "various", "va", "unknown")
# Parameters for string distance function.
# Words that can be moved to the end of a string using a comma.
SD_END_WORDS = ["the", "a", "an"]
# Reduced weights for certain portions of the string.
SD_PATTERNS = [
(r"^the ", 0.1),
(r"[\[\(]?(ep|single)[\]\)]?", 0.0),
(r"[\[\(]?(featuring|feat|ft)[\. :].+", 0.1),
(r"\(.*?\)", 0.3),
(r"\[.*?\]", 0.3),
(r"(, )?(pt\.|part) .+", 0.2),
]
# Replacements to use before testing distance.
SD_REPLACE = [
(r"&", "and"),
]
def _string_dist_basic(str1: str, str2: str) -> float:
"""Basic edit distance between two strings, ignoring
non-alphanumeric characters and case. Comparisons are based on a
transliteration/lowering to ASCII characters. Normalized by string
length.
"""
assert isinstance(str1, str)
assert isinstance(str2, str)
str1 = as_string(unidecode(str1))
str2 = as_string(unidecode(str2))
str1 = re.sub(r"[^a-z0-9]", "", str1.lower())
str2 = re.sub(r"[^a-z0-9]", "", str2.lower())
if not str1 and not str2:
return 0.0
return levenshtein_distance(str1, str2) / float(max(len(str1), len(str2)))
def string_dist(str1: str | None, str2: str | None) -> float:
"""Gives an "intuitive" edit distance between two strings. This is
an edit distance, normalized by the string length, with a number of
tweaks that reflect intuition about text.
"""
if str1 is None and str2 is None:
return 0.0
if str1 is None or str2 is None:
return 1.0
str1 = str1.lower()
str2 = str2.lower()
# Don't penalize strings that move certain words to the end. For
# example, "the something" should be considered equal to
# "something, the".
for word in SD_END_WORDS:
if str1.endswith(f", {word}"):
str1 = f"{word} {str1[: -len(word) - 2]}"
if str2.endswith(f", {word}"):
str2 = f"{word} {str2[: -len(word) - 2]}"
# Perform a couple of basic normalizing substitutions.
for pat, repl in SD_REPLACE:
str1 = re.sub(pat, repl, str1)
str2 = re.sub(pat, repl, str2)
# Change the weight for certain string portions matched by a set
# of regular expressions. We gradually change the strings and build
# up penalties associated with parts of the string that were
# deleted.
base_dist = _string_dist_basic(str1, str2)
penalty = 0.0
for pat, weight in SD_PATTERNS:
# Get strings that drop the pattern.
case_str1 = re.sub(pat, "", str1)
case_str2 = re.sub(pat, "", str2)
if case_str1 != str1 or case_str2 != str2:
# If the pattern was present (i.e., it is deleted in the
# the current case), recalculate the distances for the
# modified strings.
case_dist = _string_dist_basic(case_str1, case_str2)
case_delta = max(0.0, base_dist - case_dist)
if case_delta == 0.0:
continue
# Shift our baseline strings down (to avoid rematching the
# same part of the string) and add a scaled distance
# amount to the penalties.
str1 = case_str1
str2 = case_str2
base_dist = case_dist
penalty += weight * case_delta
return base_dist + penalty
@total_ordering
class Distance:
"""Keeps track of multiple distance penalties. Provides a single
weighted distance for all penalties as well as a weighted distance
for each individual penalty.
"""
def __init__(self) -> None:
self._penalties: dict[str, list[float]] = {}
self.tracks: dict[TrackInfo, Distance] = {}
@cached_classproperty
def _weights(cls) -> dict[str, float]:
"""A dictionary from keys to floating-point weights."""
weights_view = config["match"]["distance_weights"]
weights = {}
for key in weights_view.keys():
weights[key] = weights_view[key].as_number()
return weights
# Access the components and their aggregates.
@property
def distance(self) -> float:
"""Return a weighted and normalized distance across all
penalties.
"""
dist_max = self.max_distance
if dist_max:
return self.raw_distance / self.max_distance
return 0.0
@property
def max_distance(self) -> float:
"""Return the maximum distance penalty (normalization factor)."""
dist_max = 0.0
for key, penalty in self._penalties.items():
dist_max += len(penalty) * self._weights[key]
return dist_max
@property
def raw_distance(self) -> float:
"""Return the raw (denormalized) distance."""
dist_raw = 0.0
for key, penalty in self._penalties.items():
dist_raw += sum(penalty) * self._weights[key]
return dist_raw
def items(self) -> list[tuple[str, float]]:
"""Return a list of (key, dist) pairs, with `dist` being the
weighted distance, sorted from highest to lowest. Does not
include penalties with a zero value.
"""
list_ = []
for key in self._penalties:
dist = self[key]
if dist:
list_.append((key, dist))
# Convert distance into a negative float we can sort items in
# ascending order (for keys, when the penalty is equal) and
# still get the items with the biggest distance first.
return sorted(
list_, key=lambda key_and_dist: (-key_and_dist[1], key_and_dist[0])
)
def __hash__(self) -> int:
return id(self)
def __eq__(self, other) -> bool:
return self.distance == other
# Behave like a float.
def __lt__(self, other) -> bool:
return self.distance < other
def __float__(self) -> float:
return self.distance
def __sub__(self, other) -> float:
return self.distance - other
def __rsub__(self, other) -> float:
return other - self.distance
def __str__(self) -> str:
return f"{self.distance:.2f}"
# Behave like a dict.
def __getitem__(self, key) -> float:
"""Returns the weighted distance for a named penalty."""
dist = sum(self._penalties[key]) * self._weights[key]
dist_max = self.max_distance
if dist_max:
return dist / dist_max
return 0.0
def __iter__(self) -> Iterator[tuple[str, float]]:
return iter(self.items())
def __len__(self) -> int:
return len(self.items())
def keys(self) -> list[str]:
return [key for key, _ in self.items()]
def update(self, dist: Distance):
"""Adds all the distance penalties from `dist`."""
if not isinstance(dist, Distance):
raise ValueError(
f"`dist` must be a Distance object, not {type(dist)}"
)
for key, penalties in dist._penalties.items():
self._penalties.setdefault(key, []).extend(penalties)
# Adding components.
def _eq(self, value1: re.Pattern[str] | Any, value2: Any) -> bool:
"""Returns True if `value1` is equal to `value2`. `value1` may
be a compiled regular expression, in which case it will be
matched against `value2`.
"""
if isinstance(value1, re.Pattern):
return bool(value1.match(value2))
return value1 == value2
def add(self, key: str, dist: float):
"""Adds a distance penalty. `key` must correspond with a
configured weight setting. `dist` must be a float between 0.0
and 1.0, and will be added to any existing distance penalties
for the same key.
"""
if not 0.0 <= dist <= 1.0:
raise ValueError(f"`dist` must be between 0.0 and 1.0, not {dist}")
self._penalties.setdefault(key, []).append(dist)
def add_equality(
self,
key: str,
value: Any,
options: list[Any] | tuple[Any, ...] | Any,
):
"""Adds a distance penalty of 1.0 if `value` doesn't match any
of the values in `options`. If an option is a compiled regular
expression, it will be considered equal if it matches against
`value`.
"""
if not isinstance(options, (list, tuple)):
options = [options]
for opt in options:
if self._eq(opt, value):
dist = 0.0
break
else:
dist = 1.0
self.add(key, dist)
def add_expr(self, key: str, expr: bool):
"""Adds a distance penalty of 1.0 if `expr` evaluates to True,
or 0.0.
"""
if expr:
self.add(key, 1.0)
else:
self.add(key, 0.0)
def add_number(self, key: str, number1: int, number2: int):
"""Adds a distance penalty of 1.0 for each number of difference
between `number1` and `number2`, or 0.0 when there is no
difference. Use this when there is no upper limit on the
difference between the two numbers.
"""
diff = abs(number1 - number2)
if diff:
for i in range(diff):
self.add(key, 1.0)
else:
self.add(key, 0.0)
def add_priority(
self,
key: str,
value: Any,
options: list[Any] | tuple[Any, ...] | Any,
):
"""Adds a distance penalty that corresponds to the position at
which `value` appears in `options`. A distance penalty of 0.0
for the first option, or 1.0 if there is no matching option. If
an option is a compiled regular expression, it will be
considered equal if it matches against `value`.
"""
if not isinstance(options, (list, tuple)):
options = [options]
unit = 1.0 / (len(options) or 1)
for i, opt in enumerate(options):
if self._eq(opt, value):
dist = i * unit
break
else:
dist = 1.0
self.add(key, dist)
def add_ratio(
self,
key: str,
number1: int | float,
number2: int | float,
):
"""Adds a distance penalty for `number1` as a ratio of `number2`.
`number1` is bound at 0 and `number2`.
"""
number = float(max(min(number1, number2), 0))
if number2:
dist = number / number2
else:
dist = 0.0
self.add(key, dist)
def add_string(self, key: str, str1: str | None, str2: str | None):
"""Adds a distance penalty based on the edit distance between
`str1` and `str2`.
"""
dist = string_dist(str1, str2)
self.add(key, dist)
def add_data_source(self, before: str | None, after: str | None) -> None:
if before != after and (
before or len(metadata_plugins.find_metadata_source_plugins()) > 1
):
self.add("data_source", metadata_plugins.get_penalty(after))
@cache
def get_track_length_grace() -> float:
"""Get cached grace period for track length matching."""
return config["match"]["track_length_grace"].as_number()
@cache
def get_track_length_max() -> float:
"""Get cached maximum track length for track length matching."""
return config["match"]["track_length_max"].as_number()
def track_index_changed(item: Item, track_info: TrackInfo) -> bool:
"""Returns True if the item and track info index is different. Tolerates
per disc and per release numbering.
"""
return item.track not in (track_info.medium_index, track_info.index)
def track_distance(
item: Item,
track_info: TrackInfo,
incl_artist: bool = False,
) -> Distance:
"""Determines the significance of a track metadata change. Returns a
Distance object. `incl_artist` indicates that a distance component should
be included for the track artist (i.e., for various-artist releases).
``track_length_grace`` and ``track_length_max`` configuration options are
cached because this function is called many times during the matching
process and their access comes with a performance overhead.
"""
dist = Distance()
# Length.
if info_length := track_info.length:
diff = abs(item.length - info_length) - get_track_length_grace()
dist.add_ratio("track_length", diff, get_track_length_max())
# Title.
dist.add_string("track_title", item.title, track_info.title)
# Artist. Only check if there is actually an artist in the track data.
if (
incl_artist
and track_info.artist
and item.artist.lower() not in VA_ARTISTS
):
dist.add_string("track_artist", item.artist, track_info.artist)
# Track index.
if track_info.index and item.track:
dist.add_expr("track_index", track_index_changed(item, track_info))
# Track ID.
if item.mb_trackid:
dist.add_expr("track_id", item.mb_trackid != track_info.track_id)
# Penalize mismatching disc numbers.
if track_info.medium and item.disc:
dist.add_expr("medium", item.disc != track_info.medium)
dist.add_data_source(item.get("data_source"), track_info.data_source)
return dist
def distance(
items: Sequence[Item],
album_info: AlbumInfo,
mapping: dict[Item, TrackInfo],
) -> Distance:
"""Determines how "significant" an album metadata change would be.
Returns a Distance object. `album_info` is an AlbumInfo object
reflecting the album to be compared. `items` is a sequence of all
Item objects that will be matched (order is not important).
`mapping` is a dictionary mapping Items to TrackInfo objects; the
keys are a subset of `items` and the values are a subset of
`album_info.tracks`.
"""
likelies, _ = get_most_common_tags(items)
dist = Distance()
# Artist, if not various.
if not album_info.va:
dist.add_string("artist", likelies["artist"], album_info.artist)
# Album.
dist.add_string("album", likelies["album"], album_info.album)
preferred_config = config["match"]["preferred"]
# Current or preferred media.
if album_info.media:
# Preferred media options.
media_patterns: Sequence[str] = preferred_config["media"].as_str_seq()
options = [
re.compile(rf"(\d+x)?({pat})", re.I) for pat in media_patterns
]
if options:
dist.add_priority("media", album_info.media, options)
# Current media.
elif likelies["media"]:
dist.add_equality("media", album_info.media, likelies["media"])
# Mediums.
if likelies["disctotal"] and album_info.mediums:
dist.add_number("mediums", likelies["disctotal"], album_info.mediums)
# Prefer earliest release.
if album_info.year and preferred_config["original_year"]:
# Assume 1889 (earliest first gramophone discs) if we don't know the
# original year.
original = album_info.original_year or 1889
diff = abs(album_info.year - original)
diff_max = abs(datetime.date.today().year - original)
dist.add_ratio("year", diff, diff_max)
# Year.
elif likelies["year"] and album_info.year:
if likelies["year"] in (album_info.year, album_info.original_year):
# No penalty for matching release or original year.
dist.add("year", 0.0)
elif album_info.original_year:
# Prefer matchest closest to the release year.
diff = abs(likelies["year"] - album_info.year)
diff_max = abs(
datetime.date.today().year - album_info.original_year
)
dist.add_ratio("year", diff, diff_max)
else:
# Full penalty when there is no original year.
dist.add("year", 1.0)
# Preferred countries.
country_patterns: Sequence[str] = preferred_config["countries"].as_str_seq()
options = [re.compile(pat, re.I) for pat in country_patterns]
if album_info.country and options:
dist.add_priority("country", album_info.country, options)
# Country.
elif likelies["country"] and album_info.country:
dist.add_string("country", likelies["country"], album_info.country)
# Label.
if likelies["label"] and album_info.label:
dist.add_string("label", likelies["label"], album_info.label)
# Catalog number.
if likelies["catalognum"] and album_info.catalognum:
dist.add_string(
"catalognum", likelies["catalognum"], album_info.catalognum
)
# Disambiguation.
if likelies["albumdisambig"] and album_info.albumdisambig:
dist.add_string(
"albumdisambig", likelies["albumdisambig"], album_info.albumdisambig
)
# Album ID.
if likelies["mb_albumid"]:
dist.add_equality(
"album_id", likelies["mb_albumid"], album_info.album_id
)
# Tracks.
dist.tracks = {}
for item, track in mapping.items():
dist.tracks[track] = track_distance(item, track, album_info.va)
dist.add("tracks", dist.tracks[track].distance)
# Missing tracks.
for _ in range(len(album_info.tracks) - len(mapping)):
dist.add("missing_tracks", 1.0)
# Unmatched tracks.
for _ in range(len(items) - len(mapping)):
dist.add("unmatched_tracks", 1.0)
dist.add_data_source(likelies["data_source"], album_info.data_source)
return dist
beetbox-beets-c1877b7/beets/autotag/hooks.py 0000664 0000000 0000000 00000016317 15073551743 0021073 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Glue between metadata sources and the matching logic."""
from __future__ import annotations
from copy import deepcopy
from typing import TYPE_CHECKING, Any, NamedTuple, TypeVar
from typing_extensions import Self
if TYPE_CHECKING:
from beets.library import Item
from .distance import Distance
V = TypeVar("V")
# Classes used to represent candidate options.
class AttrDict(dict[str, V]):
"""Mapping enabling attribute-style access to stored metadata values."""
def copy(self) -> Self:
return deepcopy(self)
def __getattr__(self, attr: str) -> V:
if attr in self:
return self[attr]
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{attr}'"
)
def __setattr__(self, key: str, value: V) -> None:
self.__setitem__(key, value)
def __hash__(self) -> int: # type: ignore[override]
return id(self)
class Info(AttrDict[Any]):
"""Container for metadata about a musical entity."""
def __init__(
self,
album: str | None = None,
artist_credit: str | None = None,
artist_id: str | None = None,
artist: str | None = None,
artists_credit: list[str] | None = None,
artists_ids: list[str] | None = None,
artists: list[str] | None = None,
artist_sort: str | None = None,
artists_sort: list[str] | None = None,
data_source: str | None = None,
data_url: str | None = None,
genre: str | None = None,
media: str | None = None,
**kwargs,
) -> None:
self.album = album
self.artist = artist
self.artist_credit = artist_credit
self.artist_id = artist_id
self.artists = artists or []
self.artists_credit = artists_credit or []
self.artists_ids = artists_ids or []
self.artist_sort = artist_sort
self.artists_sort = artists_sort or []
self.data_source = data_source
self.data_url = data_url
self.genre = genre
self.media = media
self.update(kwargs)
class AlbumInfo(Info):
"""Metadata snapshot representing a single album candidate.
Aggregates track entries and album-wide context gathered from an external
provider. Used during matching to evaluate similarity against a group of
user items, and later to drive tagging decisions once selected.
"""
def __init__(
self,
tracks: list[TrackInfo],
*,
album_id: str | None = None,
albumdisambig: str | None = None,
albumstatus: str | None = None,
albumtype: str | None = None,
albumtypes: list[str] | None = None,
asin: str | None = None,
barcode: str | None = None,
catalognum: str | None = None,
country: str | None = None,
day: int | None = None,
discogs_albumid: str | None = None,
discogs_artistid: str | None = None,
discogs_labelid: str | None = None,
label: str | None = None,
language: str | None = None,
mediums: int | None = None,
month: int | None = None,
original_day: int | None = None,
original_month: int | None = None,
original_year: int | None = None,
release_group_title: str | None = None,
releasegroup_id: str | None = None,
releasegroupdisambig: str | None = None,
script: str | None = None,
style: str | None = None,
va: bool = False,
year: int | None = None,
**kwargs,
) -> None:
self.tracks = tracks
self.album_id = album_id
self.albumdisambig = albumdisambig
self.albumstatus = albumstatus
self.albumtype = albumtype
self.albumtypes = albumtypes or []
self.asin = asin
self.barcode = barcode
self.catalognum = catalognum
self.country = country
self.day = day
self.discogs_albumid = discogs_albumid
self.discogs_artistid = discogs_artistid
self.discogs_labelid = discogs_labelid
self.label = label
self.language = language
self.mediums = mediums
self.month = month
self.original_day = original_day
self.original_month = original_month
self.original_year = original_year
self.release_group_title = release_group_title
self.releasegroup_id = releasegroup_id
self.releasegroupdisambig = releasegroupdisambig
self.script = script
self.style = style
self.va = va
self.year = year
super().__init__(**kwargs)
class TrackInfo(Info):
"""Metadata snapshot for a single track candidate.
Captures identifying details and creative credits used to compare against
a user's item. Instances often originate within an AlbumInfo but may also
stand alone for singleton matching.
"""
def __init__(
self,
*,
arranger: str | None = None,
bpm: str | None = None,
composer: str | None = None,
composer_sort: str | None = None,
disctitle: str | None = None,
index: int | None = None,
initial_key: str | None = None,
length: float | None = None,
lyricist: str | None = None,
mb_workid: str | None = None,
medium: int | None = None,
medium_index: int | None = None,
medium_total: int | None = None,
release_track_id: str | None = None,
title: str | None = None,
track_alt: str | None = None,
track_id: str | None = None,
work: str | None = None,
work_disambig: str | None = None,
**kwargs,
) -> None:
self.arranger = arranger
self.bpm = bpm
self.composer = composer
self.composer_sort = composer_sort
self.disctitle = disctitle
self.index = index
self.initial_key = initial_key
self.length = length
self.lyricist = lyricist
self.mb_workid = mb_workid
self.medium = medium
self.medium_index = medium_index
self.medium_total = medium_total
self.release_track_id = release_track_id
self.title = title
self.track_alt = track_alt
self.track_id = track_id
self.work = work
self.work_disambig = work_disambig
super().__init__(**kwargs)
# Structures that compose all the information for a candidate match.
class AlbumMatch(NamedTuple):
distance: Distance
info: AlbumInfo
mapping: dict[Item, TrackInfo]
extra_items: list[Item]
extra_tracks: list[TrackInfo]
class TrackMatch(NamedTuple):
distance: Distance
info: TrackInfo
beetbox-beets-c1877b7/beets/autotag/match.py 0000664 0000000 0000000 00000032744 15073551743 0021046 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Matches existing metadata with canonical information to identify
releases and tracks.
"""
from __future__ import annotations
from enum import IntEnum
from typing import TYPE_CHECKING, Any, NamedTuple, TypeVar
import lap
import numpy as np
from beets import config, logging, metadata_plugins
from beets.autotag import AlbumInfo, AlbumMatch, TrackInfo, TrackMatch, hooks
from beets.util import get_most_common_tags
from .distance import VA_ARTISTS, distance, track_distance
if TYPE_CHECKING:
from collections.abc import Iterable, Sequence
from beets.library import Item
# Global logger.
log = logging.getLogger("beets")
# Recommendation enumeration.
class Recommendation(IntEnum):
"""Indicates a qualitative suggestion to the user about what should
be done with a given match.
"""
none = 0
low = 1
medium = 2
strong = 3
# A structure for holding a set of possible matches to choose between. This
# consists of a list of possible candidates (i.e., AlbumInfo or TrackInfo
# objects) and a recommendation value.
class Proposal(NamedTuple):
candidates: Sequence[AlbumMatch | TrackMatch]
recommendation: Recommendation
# Primary matching functionality.
def assign_items(
items: Sequence[Item],
tracks: Sequence[TrackInfo],
) -> tuple[dict[Item, TrackInfo], list[Item], list[TrackInfo]]:
"""Given a list of Items and a list of TrackInfo objects, find the
best mapping between them. Returns a mapping from Items to TrackInfo
objects, a set of extra Items, and a set of extra TrackInfo
objects. These "extra" objects occur when there is an unequal number
of objects of the two types.
"""
log.debug("Computing track assignment...")
# Construct the cost matrix.
costs = [[float(track_distance(i, t)) for t in tracks] for i in items]
# Assign items to tracks
_, _, assigned_item_idxs = lap.lapjv(np.array(costs), extend_cost=True)
log.debug("...done.")
# Each item in `assigned_item_idxs` list corresponds to a track in the
# `tracks` list. Each value is either an index into the assigned item in
# `items` list, or -1 if that track has no match.
mapping = {
items[iidx]: t
for iidx, t in zip(assigned_item_idxs, tracks)
if iidx != -1
}
extra_items = list(set(items) - mapping.keys())
extra_items.sort(key=lambda i: (i.disc, i.track, i.title))
extra_tracks = list(set(tracks) - set(mapping.values()))
extra_tracks.sort(key=lambda t: (t.index, t.title))
return mapping, extra_items, extra_tracks
def match_by_id(items: Iterable[Item]) -> AlbumInfo | None:
"""If the items are tagged with an external source ID, return an
AlbumInfo object for the corresponding album. Otherwise, returns
None.
"""
albumids = (item.mb_albumid for item in items if item.mb_albumid)
# Did any of the items have an MB album ID?
try:
first = next(albumids)
except StopIteration:
log.debug("No album ID found.")
return None
# Is there a consensus on the MB album ID?
for other in albumids:
if other != first:
log.debug("No album ID consensus.")
return None
# If all album IDs are equal, look up the album.
log.debug("Searching for discovered album ID: {}", first)
return metadata_plugins.album_for_id(first)
def _recommendation(
results: Sequence[AlbumMatch | TrackMatch],
) -> Recommendation:
"""Given a sorted list of AlbumMatch or TrackMatch objects, return a
recommendation based on the results' distances.
If the recommendation is higher than the configured maximum for
an applied penalty, the recommendation will be downgraded to the
configured maximum for that penalty.
"""
if not results:
# No candidates: no recommendation.
return Recommendation.none
# Basic distance thresholding.
min_dist = results[0].distance
if min_dist < config["match"]["strong_rec_thresh"].as_number():
# Strong recommendation level.
rec = Recommendation.strong
elif min_dist <= config["match"]["medium_rec_thresh"].as_number():
# Medium recommendation level.
rec = Recommendation.medium
elif len(results) == 1:
# Only a single candidate.
rec = Recommendation.low
elif (
results[1].distance - min_dist
>= config["match"]["rec_gap_thresh"].as_number()
):
# Gap between first two candidates is large.
rec = Recommendation.low
else:
# No conclusion. Return immediately. Can't be downgraded any further.
return Recommendation.none
# Downgrade to the max rec if it is lower than the current rec for an
# applied penalty.
keys = set(min_dist.keys())
if isinstance(results[0], hooks.AlbumMatch):
for track_dist in min_dist.tracks.values():
keys.update(list(track_dist.keys()))
max_rec_view = config["match"]["max_rec"]
for key in keys:
if key in list(max_rec_view.keys()):
max_rec = max_rec_view[key].as_choice(
{
"strong": Recommendation.strong,
"medium": Recommendation.medium,
"low": Recommendation.low,
"none": Recommendation.none,
}
)
rec = min(rec, max_rec)
return rec
AnyMatch = TypeVar("AnyMatch", TrackMatch, AlbumMatch)
def _sort_candidates(candidates: Iterable[AnyMatch]) -> Sequence[AnyMatch]:
"""Sort candidates by distance."""
return sorted(candidates, key=lambda match: match.distance)
def _add_candidate(
items: Sequence[Item],
results: dict[Any, AlbumMatch],
info: AlbumInfo,
):
"""Given a candidate AlbumInfo object, attempt to add the candidate
to the output dictionary of AlbumMatch objects. This involves
checking the track count, ordering the items, checking for
duplicates, and calculating the distance.
"""
log.debug("Candidate: {0.artist} - {0.album} ({0.album_id})", info)
# Discard albums with zero tracks.
if not info.tracks:
log.debug("No tracks.")
return
# Prevent duplicates.
if info.album_id and info.album_id in results:
log.debug("Duplicate.")
return
# Discard matches without required tags.
required_tags: Sequence[str] = config["match"]["required"].as_str_seq()
for req_tag in required_tags:
if getattr(info, req_tag) is None:
log.debug("Ignored. Missing required tag: {}", req_tag)
return
# Find mapping between the items and the track info.
mapping, extra_items, extra_tracks = assign_items(items, info.tracks)
# Get the change distance.
dist = distance(items, info, mapping)
# Skip matches with ignored penalties.
penalties = [key for key, _ in dist]
ignored_tags: Sequence[str] = config["match"]["ignored"].as_str_seq()
for penalty in ignored_tags:
if penalty in penalties:
log.debug("Ignored. Penalty: {}", penalty)
return
log.debug("Success. Distance: {}", dist)
results[info.album_id] = hooks.AlbumMatch(
dist, info, mapping, extra_items, extra_tracks
)
def tag_album(
items,
search_artist: str | None = None,
search_album: str | None = None,
search_ids: list[str] = [],
) -> tuple[str, str, Proposal]:
"""Return a tuple of the current artist name, the current album
name, and a `Proposal` containing `AlbumMatch` candidates.
The artist and album are the most common values of these fields
among `items`.
The `AlbumMatch` objects are generated by searching the metadata
backends. By default, the metadata of the items is used for the
search. This can be customized by setting the parameters.
`search_ids` is a list of metadata backend IDs: if specified,
it will restrict the candidates to those IDs, ignoring
`search_artist` and `search album`. The `mapping` field of the
album has the matched `items` as keys.
The recommendation is calculated from the match quality of the
candidates.
"""
# Get current metadata.
likelies, consensus = get_most_common_tags(items)
cur_artist: str = likelies["artist"]
cur_album: str = likelies["album"]
log.debug("Tagging {} - {}", cur_artist, cur_album)
# The output result, keys are the MB album ID.
candidates: dict[Any, AlbumMatch] = {}
# Search by explicit ID.
if search_ids:
for search_id in search_ids:
log.debug("Searching for album ID: {}", search_id)
if info := metadata_plugins.album_for_id(search_id):
_add_candidate(items, candidates, info)
# Use existing metadata or text search.
else:
# Try search based on current ID.
if info := match_by_id(items):
_add_candidate(items, candidates, info)
rec = _recommendation(list(candidates.values()))
log.debug("Album ID match recommendation is {}", rec)
if candidates and not config["import"]["timid"]:
# If we have a very good MBID match, return immediately.
# Otherwise, this match will compete against metadata-based
# matches.
if rec == Recommendation.strong:
log.debug("ID match.")
return (
cur_artist,
cur_album,
Proposal(list(candidates.values()), rec),
)
# Search terms.
if not (search_artist and search_album):
# No explicit search terms -- use current metadata.
search_artist, search_album = cur_artist, cur_album
log.debug("Search terms: {} - {}", search_artist, search_album)
# Is this album likely to be a "various artist" release?
va_likely = (
(not consensus["artist"])
or (search_artist.lower() in VA_ARTISTS)
or any(item.comp for item in items)
)
log.debug("Album might be VA: {}", va_likely)
# Get the results from the data sources.
for matched_candidate in metadata_plugins.candidates(
items, search_artist, search_album, va_likely
):
_add_candidate(items, candidates, matched_candidate)
log.debug("Evaluating {} candidates.", len(candidates))
# Sort and get the recommendation.
candidates_sorted = _sort_candidates(candidates.values())
rec = _recommendation(candidates_sorted)
return cur_artist, cur_album, Proposal(candidates_sorted, rec)
def tag_item(
item,
search_artist: str | None = None,
search_title: str | None = None,
search_ids: list[str] | None = None,
) -> Proposal:
"""Find metadata for a single track. Return a `Proposal` consisting
of `TrackMatch` objects.
`search_artist` and `search_title` may be used to override the item
metadata in the search query. `search_ids` may be used for restricting the
search to a list of metadata backend IDs.
"""
# Holds candidates found so far: keys are MBIDs; values are
# (distance, TrackInfo) pairs.
candidates = {}
rec: Recommendation | None = None
# First, try matching by the external source ID.
trackids = search_ids or [t for t in [item.mb_trackid] if t]
if trackids:
for trackid in trackids:
log.debug("Searching for track ID: {}", trackid)
if info := metadata_plugins.track_for_id(trackid):
dist = track_distance(item, info, incl_artist=True)
candidates[info.track_id] = hooks.TrackMatch(dist, info)
# If this is a good match, then don't keep searching.
rec = _recommendation(_sort_candidates(candidates.values()))
if (
rec == Recommendation.strong
and not config["import"]["timid"]
):
log.debug("Track ID match.")
return Proposal(_sort_candidates(candidates.values()), rec)
# If we're searching by ID, don't proceed.
if search_ids:
if candidates:
assert rec is not None
return Proposal(_sort_candidates(candidates.values()), rec)
else:
return Proposal([], Recommendation.none)
# Search terms.
search_artist = search_artist or item.artist
search_title = search_title or item.title
log.debug("Item search terms: {} - {}", search_artist, search_title)
# Get and evaluate candidate metadata.
for track_info in metadata_plugins.item_candidates(
item, search_artist, search_title
):
dist = track_distance(item, track_info, incl_artist=True)
candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info)
# Sort by distance and return with recommendation.
log.debug("Found {} candidates.", len(candidates))
candidates_sorted = _sort_candidates(candidates.values())
rec = _recommendation(candidates_sorted)
return Proposal(candidates_sorted, rec)
beetbox-beets-c1877b7/beets/config_default.yaml 0000664 0000000 0000000 00000010767 15073551743 0021572 0 ustar 00root root 0000000 0000000 # --------------- Main ---------------
library: library.db
directory: ~/Music
statefile: state.pickle
# --------------- Plugins ---------------
plugins: [musicbrainz]
pluginpath: []
# --------------- Import ---------------
clutter: ["Thumbs.DB", ".DS_Store"]
ignore: [".*", "*~", "System Volume Information", "lost+found"]
ignore_hidden: yes
import:
# common options
write: yes
copy: yes
move: no
timid: no
quiet: no
log:
# other options
default_action: apply
languages: []
quiet_fallback: skip
none_rec_action: ask
# rare options
link: no
hardlink: no
reflink: no
delete: no
resume: ask
incremental: no
incremental_skip_later: no
from_scratch: no
autotag: yes
singletons: no
detail: no
flat: no
group_albums: no
pretend: false
search_ids: []
duplicate_keys:
album: albumartist album
item: artist title
duplicate_action: ask
duplicate_verbose_prompt: no
bell: no
set_fields: {}
ignored_alias_types: []
singleton_album_disambig: yes
# --------------- Paths ---------------
path_sep_replace: _
drive_sep_replace: _
asciify_paths: false
art_filename: cover
max_filename_length: 0
replace:
# Replace bad characters with _
# prohibited in many filesystem paths
'[<>:\?\*\|]': _
# double quotation mark "
'\"': _
# path separators: \ or /
'[\\/]': _
# starting and closing periods
'^\.': _
'\.$': _
# control characters
'[\x00-\x1f]': _
# dash at the start of a filename (causes command line ambiguity)
'^-': _
# Replace bad characters with nothing
# starting and closing whitespace
'\s+$': ''
'^\s+': ''
aunique:
keys: albumartist album
disambiguators: albumtype year label catalognum albumdisambig releasegroupdisambig
bracket: '[]'
sunique:
keys: artist title
disambiguators: year trackdisambig
bracket: '[]'
# --------------- Tagging ---------------
per_disc_numbering: no
original_date: no
artist_credit: no
id3v23: no
va_name: "Various Artists"
paths:
default: $albumartist/$album%aunique{}/$track $title
singleton: Non-Album/$artist/$title
comp: Compilations/$album%aunique{}/$track $title
# --------------- Performance ---------------
threaded: yes
timeout: 5.0
# --------------- UI ---------------
verbose: 0
terminal_encoding:
ui:
terminal_width: 80
length_diff_thresh: 10.0
color: yes
colors:
text_success: ['bold', 'green']
text_warning: ['bold', 'yellow']
text_error: ['bold', 'red']
text_highlight: ['bold', 'red']
text_highlight_minor: ['white']
action_default: ['bold', 'cyan']
action: ['bold', 'cyan']
# New Colors
text_faint: ['faint']
import_path: ['bold', 'blue']
import_path_items: ['bold', 'blue']
changed: ['yellow']
text_diff_added: ['bold', 'green']
text_diff_removed: ['bold', 'red']
action_description: ['white']
import:
indentation:
match_header: 2
match_details: 2
match_tracklist: 5
layout: column
# --------------- Search ---------------
format_item: $artist - $album - $title
format_album: $albumartist - $album
time_format: '%Y-%m-%d %H:%M:%S'
format_raw_length: no
sort_album: albumartist+ album+
sort_item: artist+ album+ disc+ track+
sort_case_insensitive: yes
# --------------- Autotagger ---------------
overwrite_null:
album: []
track: []
match:
strong_rec_thresh: 0.04
medium_rec_thresh: 0.25
rec_gap_thresh: 0.25
max_rec:
missing_tracks: medium
unmatched_tracks: medium
distance_weights:
data_source: 2.0
artist: 3.0
album: 3.0
media: 1.0
mediums: 1.0
year: 1.0
country: 0.5
label: 0.5
catalognum: 0.5
albumdisambig: 0.5
album_id: 5.0
tracks: 2.0
missing_tracks: 0.9
unmatched_tracks: 0.6
track_title: 3.0
track_artist: 2.0
track_index: 1.0
track_length: 2.0
track_id: 5.0
medium: 1.0
preferred:
countries: []
media: []
original_year: no
ignored: []
required: []
ignored_media: []
ignore_data_tracks: yes
ignore_video_tracks: yes
track_length_grace: 10
track_length_max: 30
album_disambig_fields: data_source media year country label catalognum albumdisambig
singleton_disambig_fields: data_source index track_alt album
beetbox-beets-c1877b7/beets/dbcore/ 0000775 0000000 0000000 00000000000 15073551743 0017160 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beets/dbcore/__init__.py 0000664 0000000 0000000 00000002364 15073551743 0021276 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""DBCore is an abstract database package that forms the basis for beets'
Library.
"""
from .db import Database, Model, Results
from .query import (
AndQuery,
FieldQuery,
InvalidQueryError,
MatchQuery,
OrQuery,
Query,
)
from .queryparse import (
parse_sorted_query,
query_from_strings,
sort_from_strings,
)
from .types import Type
__all__ = [
"AndQuery",
"Database",
"FieldQuery",
"InvalidQueryError",
"MatchQuery",
"Model",
"OrQuery",
"Query",
"Results",
"Type",
"parse_sorted_query",
"query_from_strings",
"sort_from_strings",
]
beetbox-beets-c1877b7/beets/dbcore/db.py 0000775 0000000 0000000 00000127652 15073551743 0020137 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The central Model and Database constructs for DBCore."""
from __future__ import annotations
import contextlib
import functools
import os
import re
import sqlite3
import sys
import threading
import time
from abc import ABC
from collections import defaultdict
from collections.abc import Generator, Iterable, Iterator, Mapping, Sequence
from sqlite3 import Connection, sqlite_version_info
from typing import TYPE_CHECKING, Any, AnyStr, Callable, Generic
from typing_extensions import TypeVar # default value support
from unidecode import unidecode
import beets
from ..util import cached_classproperty, functemplate
from . import types
from .query import (
FieldQueryType,
FieldSort,
MatchQuery,
NullSort,
Query,
Sort,
TrueQuery,
)
if TYPE_CHECKING:
from types import TracebackType
from .query import SQLiteType
D = TypeVar("D", bound="Database", default=Any)
FlexAttrs = dict[str, str]
class DBAccessError(Exception):
"""The SQLite database became inaccessible.
This can happen when trying to read or write the database when, for
example, the database file is deleted or otherwise disappears. There
is probably no way to recover from this error.
"""
class DBCustomFunctionError(Exception):
"""A sqlite function registered by beets failed."""
def __init__(self):
super().__init__(
"beets defined SQLite function failed; "
"see the other errors above for details"
)
class FormattedMapping(Mapping[str, str]):
"""A `dict`-like formatted view of a model.
The accessor `mapping[key]` returns the formatted version of
`model[key]` as a unicode string.
The `included_keys` parameter allows filtering the fields that are
returned. By default all fields are returned. Limiting to specific keys can
avoid expensive per-item database queries.
If `for_path` is true, all path separators in the formatted values
are replaced.
"""
ALL_KEYS = "*"
def __init__(
self,
model: Model,
included_keys: str = ALL_KEYS,
for_path: bool = False,
):
self.for_path = for_path
self.model = model
if included_keys == self.ALL_KEYS:
# Performance note: this triggers a database query.
self.model_keys = self.model.keys(True)
else:
self.model_keys = included_keys
def __getitem__(self, key: str) -> str:
if key in self.model_keys:
return self._get_formatted(self.model, key)
else:
raise KeyError(key)
def __iter__(self) -> Iterator[str]:
return iter(self.model_keys)
def __len__(self) -> int:
return len(self.model_keys)
# The following signature is incompatible with `Mapping[str, str]`, since
# the return type doesn't include `None` (but `default` can be `None`).
def get( # type: ignore
self,
key: str,
default: str | None = None,
) -> str:
"""Similar to Mapping.get(key, default), but always formats to str."""
if default is None:
default = self.model._type(key).format(None)
return super().get(key, default)
def _get_formatted(self, model: Model, key: str) -> str:
value = model._type(key).format(model.get(key))
if isinstance(value, bytes):
value = value.decode("utf-8", "ignore")
if self.for_path:
sep_repl: str = beets.config["path_sep_replace"].as_str()
sep_drive: str = beets.config["drive_sep_replace"].as_str()
if re.match(r"^\w:", value):
value = re.sub(r"(?<=^\w):", sep_drive, value)
for sep in (os.path.sep, os.path.altsep):
if sep:
value = value.replace(sep, sep_repl)
return value
# NOTE: This seems like it should be a `Mapping`, i.e.
# ```
# class LazyConvertDict(Mapping[str, Any])
# ```
# but there are some conflicts with the `Mapping` protocol such that we
# can't do this without changing behaviour: In particular, iterators returned
# by some methods build intermediate lists, such that modification of the
# `LazyConvertDict` becomes safe during iteration. Some code does in fact rely
# on this.
class LazyConvertDict:
"""Lazily convert types for attributes fetched from the database"""
def __init__(self, model_cls: Model):
"""Initialize the object empty"""
# FIXME: Dict[str, SQLiteType]
self._data: dict[str, Any] = {}
self.model_cls = model_cls
self._converted: dict[str, Any] = {}
def init(self, data: dict[str, Any]):
"""Set the base data that should be lazily converted"""
self._data = data
def _convert(self, key: str, value: Any):
"""Convert the attribute type according to the SQL type"""
return self.model_cls._type(key).from_sql(value)
def __setitem__(self, key: str, value: Any):
"""Set an attribute value, assume it's already converted"""
self._converted[key] = value
def __getitem__(self, key: str) -> Any:
"""Get an attribute value, converting the type on demand
if needed
"""
if key in self._converted:
return self._converted[key]
elif key in self._data:
value = self._convert(key, self._data[key])
self._converted[key] = value
return value
def __delitem__(self, key: str):
"""Delete both converted and base data"""
if key in self._converted:
del self._converted[key]
if key in self._data:
del self._data[key]
def keys(self) -> list[str]:
"""Get a list of available field names for this object."""
return list(self._converted.keys()) + list(self._data.keys())
def copy(self) -> LazyConvertDict:
"""Create a copy of the object."""
new = self.__class__(self.model_cls)
new._data = self._data.copy()
new._converted = self._converted.copy()
return new
# Act like a dictionary.
def update(self, values: Mapping[str, Any]):
"""Assign all values in the given dict."""
for key, value in values.items():
self[key] = value
def items(self) -> Iterable[tuple[str, Any]]:
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
for key in self:
yield key, self[key]
def get(self, key: str, default: Any | None = None):
"""Get the value for a given key or `default` if it does not
exist.
"""
if key in self:
return self[key]
else:
return default
def __contains__(self, key: Any) -> bool:
"""Determine whether `key` is an attribute on this object."""
return key in self._converted or key in self._data
def __iter__(self) -> Iterator[str]:
"""Iterate over the available field names (excluding computed
fields).
"""
# NOTE: It would be nice to use the following:
# yield from self._converted
# yield from self._data
# but that won't work since some code relies on modifying `self`
# during iteration.
return iter(self.keys())
def __len__(self) -> int:
# FIXME: This is incorrect due to duplication of keys
return len(self._converted) + len(self._data)
# Abstract base for model classes.
class Model(ABC, Generic[D]):
"""An abstract object representing an object in the database. Model
objects act like dictionaries (i.e., they allow subscript access like
``obj['field']``). The same field set is available via attribute
access as a shortcut (i.e., ``obj.field``). Three kinds of attributes are
available:
* **Fixed attributes** come from a predetermined list of field
names. These fields correspond to SQLite table columns and are
thus fast to read, write, and query.
* **Flexible attributes** are free-form and do not need to be listed
ahead of time.
* **Computed attributes** are read-only fields computed by a getter
function provided by a plugin.
Access to all three field types is uniform: ``obj.field`` works the
same regardless of whether ``field`` is fixed, flexible, or
computed.
Model objects can optionally be associated with a `Library` object,
in which case they can be loaded and stored from the database. Dirty
flags are used to track which fields need to be stored.
"""
# Abstract components (to be provided by subclasses).
_table: str
"""The main SQLite table name.
"""
_flex_table: str
"""The flex field SQLite table name.
"""
_fields: dict[str, types.Type] = {}
"""A mapping indicating available "fixed" fields on this type. The
keys are field names and the values are `Type` objects.
"""
_search_fields: Sequence[str] = ()
"""The fields that should be queried by default by unqualified query
terms.
"""
@cached_classproperty
def _types(cls) -> dict[str, types.Type]:
"""Optional types for non-fixed (flexible and computed) fields."""
return {}
_sorts: dict[str, type[FieldSort]] = {}
"""Optional named sort criteria. The keys are strings and the values
are subclasses of `Sort`.
"""
@cached_classproperty
def _queries(cls) -> dict[str, FieldQueryType]:
"""Named queries that use a field-like `name:value` syntax but which
do not relate to any specific field.
"""
return {}
_always_dirty = False
"""By default, fields only become "dirty" when their value actually
changes. Enabling this flag marks fields as dirty even when the new
value is the same as the old value (e.g., `o.f = o.f`).
"""
_revision = -1
"""A revision number from when the model was loaded from or written
to the database.
"""
@cached_classproperty
def _relation(cls):
"""The model that this model is closely related to."""
return cls
@cached_classproperty
def relation_join(cls) -> str:
"""Return the join required to include the related table in the query.
This is intended to be used as a FROM clause in the SQL query.
"""
return ""
@cached_classproperty
def all_db_fields(cls) -> set[str]:
return cls._fields.keys() | cls._relation._fields.keys()
@cached_classproperty
def shared_db_fields(cls) -> set[str]:
return cls._fields.keys() & cls._relation._fields.keys()
@cached_classproperty
def other_db_fields(cls) -> set[str]:
"""Fields in the related table."""
return cls._relation._fields.keys() - cls.shared_db_fields
@classmethod
def _getters(cls: type[Model]):
"""Return a mapping from field names to getter functions."""
# We could cache this if it becomes a performance problem to
# gather the getter mapping every time.
raise NotImplementedError()
def _template_funcs(self) -> Mapping[str, Callable[[str], str]]:
"""Return a mapping from function names to text-transformer
functions.
"""
# As above: we could consider caching this result.
raise NotImplementedError()
# Basic operation.
def __init__(self, db: D | None = None, **values):
"""Create a new object with an optional Database association and
initial field values.
"""
self._db = db
self._dirty: set[str] = set()
self._values_fixed = LazyConvertDict(self)
self._values_flex = LazyConvertDict(self)
# Initial contents.
self.update(values)
self.clear_dirty()
@classmethod
def _awaken(
cls: type[AnyModel],
db: D | None = None,
fixed_values: dict[str, Any] = {},
flex_values: dict[str, Any] = {},
) -> AnyModel:
"""Create an object with values drawn from the database.
This is a performance optimization: the checks involved with
ordinary construction are bypassed.
"""
obj = cls(db)
obj._values_fixed.init(fixed_values)
obj._values_flex.init(flex_values)
return obj
def __repr__(self) -> str:
return (
f"{type(self).__name__}"
f"({', '.join(f'{k}={v!r}' for k, v in dict(self).items())})"
)
def clear_dirty(self):
"""Mark all fields as *clean* (i.e., not needing to be stored to
the database). Also update the revision.
"""
self._dirty = set()
if self._db:
self._revision = self._db.revision
def _check_db(self, need_id: bool = True) -> D:
"""Ensure that this object is associated with a database row: it
has a reference to a database (`_db`) and an id. A ValueError
exception is raised otherwise.
"""
if not self._db:
raise ValueError(f"{type(self).__name__} has no database")
if need_id and not self.id:
raise ValueError(f"{type(self).__name__} has no id")
return self._db
def copy(self) -> Model:
"""Create a copy of the model object.
The field values and other state is duplicated, but the new copy
remains associated with the same database as the old object.
(A simple `copy.deepcopy` will not work because it would try to
duplicate the SQLite connection.)
"""
new = self.__class__()
new._db = self._db
new._values_fixed = self._values_fixed.copy()
new._values_flex = self._values_flex.copy()
new._dirty = self._dirty.copy()
return new
# Essential field accessors.
@classmethod
def _type(cls, key) -> types.Type:
"""Get the type of a field, a `Type` instance.
If the field has no explicit type, it is given the base `Type`,
which does no conversion.
"""
return cls._fields.get(key) or cls._types.get(key) or types.DEFAULT
def _get(self, key, default: Any = None, raise_: bool = False):
"""Get the value for a field, or `default`. Alternatively,
raise a KeyError if the field is not available.
"""
getters = self._getters()
if key in getters: # Computed.
return getters[key](self)
elif key in self._fields: # Fixed.
if key in self._values_fixed:
return self._values_fixed[key]
else:
return self._type(key).null
elif key in self._values_flex: # Flexible.
return self._values_flex[key]
elif raise_:
raise KeyError(key)
else:
return default
get = _get
def __getitem__(self, key):
"""Get the value for a field. Raise a KeyError if the field is
not available.
"""
return self._get(key, raise_=True)
def _setitem(self, key, value):
"""Assign the value for a field, return whether new and old value
differ.
"""
# Choose where to place the value.
if key in self._fields:
source = self._values_fixed
else:
source = self._values_flex
# If the field has a type, filter the value.
value = self._type(key).normalize(value)
# Assign value and possibly mark as dirty.
old_value = source.get(key)
source[key] = value
changed = old_value != value
if self._always_dirty or changed:
self._dirty.add(key)
return changed
def __setitem__(self, key, value):
"""Assign the value for a field."""
self._setitem(key, value)
def __delitem__(self, key):
"""Remove a flexible attribute from the model."""
if key in self._values_flex: # Flexible.
del self._values_flex[key]
self._dirty.add(key) # Mark for dropping on store.
elif key in self._fields: # Fixed
setattr(self, key, self._type(key).null)
elif key in self._getters(): # Computed.
raise KeyError(f"computed field {key} cannot be deleted")
else:
raise KeyError(f"no such field {key}")
def keys(self, computed: bool = False):
"""Get a list of available field names for this object. The
`computed` parameter controls whether computed (plugin-provided)
fields are included in the key list.
"""
base_keys = list(self._fields) + list(self._values_flex.keys())
if computed:
return base_keys + list(self._getters().keys())
else:
return base_keys
@classmethod
def all_keys(cls):
"""Get a list of available keys for objects of this type.
Includes fixed and computed fields.
"""
return list(cls._fields) + list(cls._getters().keys())
# Act like a dictionary.
def update(self, values):
"""Assign all values in the given dict."""
for key, value in values.items():
self[key] = value
def items(self) -> Iterator[tuple[str, Any]]:
"""Iterate over (key, value) pairs that this object contains.
Computed fields are not included.
"""
for key in self:
yield key, self[key]
def __contains__(self, key) -> bool:
"""Determine whether `key` is an attribute on this object."""
return key in self.keys(computed=True)
def __iter__(self) -> Iterator[str]:
"""Iterate over the available field names (excluding computed
fields).
"""
return iter(self.keys())
# Convenient attribute access.
def __getattr__(self, key):
if key.startswith("_"):
raise AttributeError(f"model has no attribute {key!r}")
else:
try:
return self[key]
except KeyError:
raise AttributeError(f"no such field {key!r}")
def __setattr__(self, key, value):
if key.startswith("_"):
super().__setattr__(key, value)
else:
self[key] = value
def __delattr__(self, key):
if key.startswith("_"):
super().__delattr__(key)
else:
del self[key]
# Database interaction (CRUD methods).
def store(self, fields: Iterable[str] | None = None):
"""Save the object's metadata into the library database.
:param fields: the fields to be stored. If not specified, all fields
will be.
"""
if fields is None:
fields = self._fields
db = self._check_db()
# Build assignments for query.
assignments = []
subvars: list[SQLiteType] = []
for key in fields:
if key != "id" and key in self._dirty:
self._dirty.remove(key)
assignments.append(f"{key}=?")
value = self._type(key).to_sql(self[key])
subvars.append(value)
with db.transaction() as tx:
# Main table update.
if assignments:
query = f"UPDATE {self._table} SET {','.join(assignments)} WHERE id=?"
subvars.append(self.id)
tx.mutate(query, subvars)
# Modified/added flexible attributes.
for key, value in self._values_flex.items():
if key in self._dirty:
self._dirty.remove(key)
value = self._type(key).to_sql(value)
tx.mutate(
f"INSERT INTO {self._flex_table} "
"(entity_id, key, value) "
"VALUES (?, ?, ?);",
(self.id, key, value),
)
# Deleted flexible attributes.
for key in self._dirty:
tx.mutate(
f"DELETE FROM {self._flex_table} WHERE entity_id=? AND key=?",
(self.id, key),
)
self.clear_dirty()
def load(self):
"""Refresh the object's metadata from the library database.
If check_revision is true, the database is only queried loaded when a
transaction has been committed since the item was last loaded.
"""
db = self._check_db()
if not self._dirty and db.revision == self._revision:
# Exit early
return
stored_obj = db._get(type(self), self.id)
assert stored_obj is not None, f"object {self.id} not in DB"
self._values_fixed = LazyConvertDict(self)
self._values_flex = LazyConvertDict(self)
self.update(dict(stored_obj))
self.clear_dirty()
def remove(self):
"""Remove the object's associated rows from the database."""
db = self._check_db()
with db.transaction() as tx:
tx.mutate(f"DELETE FROM {self._table} WHERE id=?", (self.id,))
tx.mutate(
f"DELETE FROM {self._flex_table} WHERE entity_id=?", (self.id,)
)
def add(self, db: D | None = None):
"""Add the object to the library database. This object must be
associated with a database; you can provide one via the `db`
parameter or use the currently associated database.
The object's `id` and `added` fields are set along with any
current field values.
"""
if db:
self._db = db
db = self._check_db(False)
with db.transaction() as tx:
new_id = tx.mutate(f"INSERT INTO {self._table} DEFAULT VALUES")
self.id = new_id
self.added = time.time()
# Mark every non-null field as dirty and store.
for key in self:
if self[key] is not None:
self._dirty.add(key)
self.store()
# Formatting and templating.
_formatter = FormattedMapping
def formatted(
self,
included_keys: str = _formatter.ALL_KEYS,
for_path: bool = False,
):
"""Get a mapping containing all values on this object formatted
as human-readable unicode strings.
"""
return self._formatter(self, included_keys, for_path)
def evaluate_template(
self,
template: str | functemplate.Template,
for_path: bool = False,
) -> str:
"""Evaluate a template (a string or a `Template` object) using
the object's fields. If `for_path` is true, then no new path
separators will be added to the template.
"""
# Perform substitution.
if isinstance(template, str):
t = functemplate.template(template)
else:
# Help out mypy
t = template
return t.substitute(
self.formatted(for_path=for_path), self._template_funcs()
)
# Parsing.
@classmethod
def _parse(cls, key, string: str) -> Any:
"""Parse a string as a value for the given key."""
if not isinstance(string, str):
raise TypeError("_parse() argument must be a string")
return cls._type(key).parse(string)
def set_parse(self, key, string: str):
"""Set the object's key to a value represented by a string."""
self[key] = self._parse(key, string)
def __getstate__(self):
"""Return the state of the object for pickling.
Remove the database connection as sqlite connections are not
picklable.
"""
state = self.__dict__.copy()
state["_db"] = None
return state
# Database controller and supporting interfaces.
AnyModel = TypeVar("AnyModel", bound=Model)
class Results(Generic[AnyModel]):
"""An item query result set. Iterating over the collection lazily
constructs Model objects that reflect database rows.
"""
def __init__(
self,
model_class: type[AnyModel],
rows: list[sqlite3.Row],
db: D,
flex_rows,
query: Query | None = None,
sort=None,
):
"""Create a result set that will construct objects of type
`model_class`.
`model_class` is a subclass of `Model` that will be
constructed. `rows` is a query result: a list of mappings. The
new objects will be associated with the database `db`.
If `query` is provided, it is used as a predicate to filter the
results for a "slow query" that cannot be evaluated by the
database directly. If `sort` is provided, it is used to sort the
full list of results before returning. This means it is a "slow
sort" and all objects must be built before returning the first
one.
"""
self.model_class = model_class
self.rows = rows
self.db = db
self.query = query
self.sort = sort
self.flex_rows = flex_rows
# We keep a queue of rows we haven't yet consumed for
# materialization. We preserve the original total number of
# rows.
self._rows = rows
self._row_count = len(rows)
# The materialized objects corresponding to rows that have been
# consumed.
self._objects: list[AnyModel] = []
def _get_objects(self) -> Iterator[AnyModel]:
"""Construct and generate Model objects for they query. The
objects are returned in the order emitted from the database; no
slow sort is applied.
For performance, this generator caches materialized objects to
avoid constructing them more than once. This way, iterating over
a `Results` object a second time should be much faster than the
first.
"""
# Index flexible attributes by the item ID, so we have easier access
flex_attrs = self._get_indexed_flex_attrs()
index = 0 # Position in the materialized objects.
while index < len(self._objects) or self._rows:
# Are there previously-materialized objects to produce?
if index < len(self._objects):
yield self._objects[index]
index += 1
# Otherwise, we consume another row, materialize its object
# and produce it.
else:
while self._rows:
row = self._rows.pop(0)
obj = self._make_model(row, flex_attrs.get(row["id"], {}))
# If there is a slow-query predicate, ensurer that the
# object passes it.
if not self.query or self.query.match(obj):
self._objects.append(obj)
index += 1
yield obj
break
def __iter__(self) -> Iterator[AnyModel]:
"""Construct and generate Model objects for all matching
objects, in sorted order.
"""
if self.sort:
# Slow sort. Must build the full list first.
objects = self.sort.sort(list(self._get_objects()))
return iter(objects)
else:
# Objects are pre-sorted (i.e., by the database).
return self._get_objects()
def _get_indexed_flex_attrs(self) -> dict[int, FlexAttrs]:
"""Index flexible attributes by the entity id they belong to"""
flex_values: dict[int, FlexAttrs] = {}
for row in self.flex_rows:
if row["entity_id"] not in flex_values:
flex_values[row["entity_id"]] = {}
flex_values[row["entity_id"]][row["key"]] = row["value"]
return flex_values
def _make_model(
self, row: sqlite3.Row, flex_values: FlexAttrs = {}
) -> AnyModel:
"""Create a Model object for the given row"""
cols = dict(row)
values = {k: v for (k, v) in cols.items() if not k[:4] == "flex"}
# Construct the Python object
obj = self.model_class._awaken(self.db, values, flex_values)
return obj
def __len__(self) -> int:
"""Get the number of matching objects."""
if not self._rows:
# Fully materialized. Just count the objects.
return len(self._objects)
elif self.query:
# A slow query. Fall back to testing every object.
count = 0
for obj in self:
count += 1
return count
else:
# A fast query. Just count the rows.
return self._row_count
def __nonzero__(self) -> bool:
"""Does this result contain any objects?"""
return self.__bool__()
def __bool__(self) -> bool:
"""Does this result contain any objects?"""
return bool(len(self))
def __getitem__(self, n):
"""Get the nth item in this result set. This is inefficient: all
items up to n are materialized and thrown away.
"""
if not self._rows and not self.sort:
# Fully materialized and already in order. Just look up the
# object.
return self._objects[n]
it = iter(self)
try:
for i in range(n):
next(it)
return next(it)
except StopIteration:
raise IndexError(f"result index {n} out of range")
def get(self) -> AnyModel | None:
"""Return the first matching object, or None if no objects
match.
"""
it = iter(self)
try:
return next(it)
except StopIteration:
return None
class Transaction:
"""A context manager for safe, concurrent access to the database.
All SQL commands should be executed through a transaction.
"""
_mutated = False
"""A flag storing whether a mutation has been executed in the
current transaction.
"""
def __init__(self, db: Database):
self.db = db
def __enter__(self) -> Transaction:
"""Begin a transaction. This transaction may be created while
another is active in a different thread.
"""
with self.db._tx_stack() as stack:
first = not stack
stack.append(self)
if first:
# Beginning a "root" transaction, which corresponds to an
# SQLite transaction.
self.db._db_lock.acquire()
return self
def __exit__(
self,
exc_type: type[Exception],
exc_value: Exception,
traceback: TracebackType,
):
"""Complete a transaction. This must be the most recently
entered but not yet exited transaction. If it is the last active
transaction, the database updates are committed.
"""
# Beware of races; currently secured by db._db_lock
self.db.revision += self._mutated
with self.db._tx_stack() as stack:
assert stack.pop() is self
empty = not stack
if empty:
# Ending a "root" transaction. End the SQLite transaction.
self.db._connection().commit()
self._mutated = False
self.db._db_lock.release()
if (
isinstance(exc_value, sqlite3.OperationalError)
and exc_value.args[0] == "user-defined function raised exception"
):
raise DBCustomFunctionError()
def query(
self, statement: str, subvals: Sequence[SQLiteType] = ()
) -> list[sqlite3.Row]:
"""Execute an SQL statement with substitution values and return
a list of rows from the database.
"""
cursor = self.db._connection().execute(statement, subvals)
return cursor.fetchall()
def mutate(self, statement: str, subvals: Sequence[SQLiteType] = ()) -> Any:
"""Execute an SQL statement with substitution values and return
the row ID of the last affected row.
"""
try:
cursor = self.db._connection().execute(statement, subvals)
except sqlite3.OperationalError as e:
# In two specific cases, SQLite reports an error while accessing
# the underlying database file. We surface these exceptions as
# DBAccessError so the application can abort.
if e.args[0] in (
"attempt to write a readonly database",
"unable to open database file",
):
raise DBAccessError(e.args[0])
else:
raise
else:
self._mutated = True
return cursor.lastrowid
def script(self, statements: str):
"""Execute a string containing multiple SQL statements."""
# We don't know whether this mutates, but quite likely it does.
self._mutated = True
self.db._connection().executescript(statements)
class Database:
"""A container for Model objects that wraps an SQLite database as
the backend.
"""
_models: Sequence[type[Model]] = ()
"""The Model subclasses representing tables in this database.
"""
supports_extensions = hasattr(sqlite3.Connection, "enable_load_extension")
"""Whether or not the current version of SQLite supports extensions"""
revision = 0
"""The current revision of the database. To be increased whenever
data is written in a transaction.
"""
def __init__(self, path, timeout: float = 5.0):
if sqlite3.threadsafety == 0:
raise RuntimeError(
"sqlite3 must be compiled with multi-threading support"
)
# Print tracebacks for exceptions in user defined functions
# See also `self.add_functions` and `DBCustomFunctionError`.
#
# `if`: use feature detection because PyPy doesn't support this.
if hasattr(sqlite3, "enable_callback_tracebacks"):
sqlite3.enable_callback_tracebacks(True)
self.path = path
self.timeout = timeout
self._connections: dict[int, sqlite3.Connection] = {}
self._tx_stacks: defaultdict[int, list[Transaction]] = defaultdict(list)
self._extensions: list[str] = []
# A lock to protect the _connections and _tx_stacks maps, which
# both map thread IDs to private resources.
self._shared_map_lock = threading.Lock()
# A lock to protect access to the database itself. SQLite does
# allow multiple threads to access the database at the same
# time, but many users were experiencing crashes related to this
# capability: where SQLite was compiled without HAVE_USLEEP, its
# backoff algorithm in the case of contention was causing
# whole-second sleeps (!) that would trigger its internal
# timeout. Using this lock ensures only one SQLite transaction
# is active at a time.
self._db_lock = threading.Lock()
# Set up database schema.
for model_cls in self._models:
self._make_table(model_cls._table, model_cls._fields)
self._make_attribute_table(model_cls._flex_table)
# Primitive access control: connections and transactions.
def _connection(self) -> Connection:
"""Get a SQLite connection object to the underlying database.
One connection object is created per thread.
"""
thread_id = threading.current_thread().ident
# Help the type checker: ident can only be None if the thread has not
# been started yet; but since this results from current_thread(), that
# can't happen
assert thread_id is not None
with self._shared_map_lock:
if thread_id in self._connections:
return self._connections[thread_id]
else:
conn = self._create_connection()
self._connections[thread_id] = conn
return conn
def _create_connection(self) -> Connection:
"""Create a SQLite connection to the underlying database.
Makes a new connection every time. If you need to configure the
connection settings (e.g., add custom functions), override this
method.
"""
# Make a new connection. The `sqlite3` module can't use
# bytestring paths here on Python 3, so we need to
# provide a `str` using `os.fsdecode`.
conn = sqlite3.connect(
os.fsdecode(self.path),
timeout=self.timeout,
# We have our own same-thread checks in _connection(), but need to
# call conn.close() in _close()
check_same_thread=False,
)
self.add_functions(conn)
if self.supports_extensions:
conn.enable_load_extension(True)
# Load any extension that are already loaded for other connections.
for path in self._extensions:
conn.load_extension(path)
# Access SELECT results like dictionaries.
conn.row_factory = sqlite3.Row
return conn
def add_functions(self, conn):
def regexp(value, pattern):
if isinstance(value, bytes):
value = value.decode()
return re.search(pattern, str(value)) is not None
def bytelower(bytestring: AnyStr | None) -> AnyStr | None:
"""A custom ``bytelower`` sqlite function so we can compare
bytestrings in a semi case insensitive fashion.
This is to work around sqlite builds are that compiled with
``-DSQLITE_LIKE_DOESNT_MATCH_BLOBS``. See
``https://github.com/beetbox/beets/issues/2172`` for details.
"""
if bytestring is not None:
return bytestring.lower()
return bytestring
create_function = conn.create_function
if sys.version_info >= (3, 8) and sqlite_version_info >= (3, 8, 3):
# Let sqlite make extra optimizations
create_function = functools.partial(
conn.create_function, deterministic=True
)
create_function("regexp", 2, regexp)
create_function("unidecode", 1, unidecode)
create_function("bytelower", 1, bytelower)
def _close(self):
"""Close the all connections to the underlying SQLite database
from all threads. This does not render the database object
unusable; new connections can still be opened on demand.
"""
with self._shared_map_lock:
while self._connections:
_thread_id, conn = self._connections.popitem()
conn.close()
@contextlib.contextmanager
def _tx_stack(self) -> Generator[list[Transaction]]:
"""A context manager providing access to the current thread's
transaction stack. The context manager synchronizes access to
the stack map. Transactions should never migrate across threads.
"""
thread_id = threading.current_thread().ident
# Help the type checker: ident can only be None if the thread has not
# been started yet; but since this results from current_thread(), that
# can't happen
assert thread_id is not None
with self._shared_map_lock:
yield self._tx_stacks[thread_id]
def transaction(self) -> Transaction:
"""Get a :class:`Transaction` object for interacting directly
with the underlying SQLite database.
"""
return Transaction(self)
def load_extension(self, path: str):
"""Load an SQLite extension into all open connections."""
if not self.supports_extensions:
raise ValueError(
"this sqlite3 installation does not support extensions"
)
self._extensions.append(path)
# Load the extension into every open connection.
for conn in self._connections.values():
conn.load_extension(path)
# Schema setup and migration.
def _make_table(self, table: str, fields: Mapping[str, types.Type]):
"""Set up the schema of the database. `fields` is a mapping
from field names to `Type`s. Columns are added if necessary.
"""
# Get current schema.
with self.transaction() as tx:
rows = tx.query(f"PRAGMA table_info({table})")
current_fields = {row[1] for row in rows}
field_names = set(fields.keys())
if current_fields.issuperset(field_names):
# Table exists and has all the required columns.
return
if not current_fields:
# No table exists.
columns = []
for name, typ in fields.items():
columns.append(f"{name} {typ.sql}")
setup_sql = f"CREATE TABLE {table} ({', '.join(columns)});\n"
else:
# Table exists does not match the field set.
setup_sql = ""
for name, typ in fields.items():
if name in current_fields:
continue
setup_sql += (
f"ALTER TABLE {table} ADD COLUMN {name} {typ.sql};\n"
)
with self.transaction() as tx:
tx.script(setup_sql)
def _make_attribute_table(self, flex_table: str):
"""Create a table and associated index for flexible attributes
for the given entity (if they don't exist).
"""
with self.transaction() as tx:
tx.script(f"""
CREATE TABLE IF NOT EXISTS {flex_table} (
id INTEGER PRIMARY KEY,
entity_id INTEGER,
key TEXT,
value TEXT,
UNIQUE(entity_id, key) ON CONFLICT REPLACE);
CREATE INDEX IF NOT EXISTS {flex_table}_by_entity
ON {flex_table} (entity_id);
""")
# Querying.
def _fetch(
self,
model_cls: type[AnyModel],
query: Query | None = None,
sort: Sort | None = None,
) -> Results[AnyModel]:
"""Fetch the objects of type `model_cls` matching the given
query. The query may be given as a string, string sequence, a
Query object, or None (to fetch everything). `sort` is an
`Sort` object.
"""
query = query or TrueQuery() # A null query.
sort = sort or NullSort() # Unsorted.
where, subvals = query.clause()
order_by = sort.order_clause()
table = model_cls._table
_from = table
if query.field_names & model_cls.other_db_fields:
_from += f" {model_cls.relation_join}"
# group by id to avoid duplicates when joining with the relation
sql = (
f"SELECT {table}.* "
f"FROM ({_from}) "
f"WHERE {where or 1} "
f"GROUP BY {table}.id"
)
# Fetch flexible attributes for items matching the main query.
# Doing the per-item filtering in python is faster than issuing
# one query per item to sqlite.
flex_sql = (
"SELECT * "
f"FROM {model_cls._flex_table} "
f"WHERE entity_id IN (SELECT id FROM ({sql}))"
)
if order_by:
# the sort field may exist in both 'items' and 'albums' tables
# (when they are joined), causing ambiguous column OperationalError
# if we try to order directly.
# Since the join is required only for filtering, we can filter in
# a subquery and order the result, which returns unique fields.
sql = f"SELECT * FROM ({sql}) ORDER BY {order_by}"
with self.transaction() as tx:
rows = tx.query(sql, subvals)
flex_rows = tx.query(flex_sql, subvals)
return Results(
model_cls,
rows,
self,
flex_rows,
None if where else query, # Slow query component.
sort if sort.is_slow() else None, # Slow sort component.
)
def _get(
self,
model_cls: type[AnyModel],
id,
) -> AnyModel | None:
"""Get a Model object by its id or None if the id does not
exist.
"""
return self._fetch(model_cls, MatchQuery("id", id)).get()
beetbox-beets-c1877b7/beets/dbcore/query.py 0000664 0000000 0000000 00000107211 15073551743 0020701 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The Query type hierarchy for DBCore."""
from __future__ import annotations
import os
import re
import unicodedata
from abc import ABC, abstractmethod
from collections.abc import Iterator, MutableSequence, Sequence
from datetime import datetime, timedelta
from functools import cached_property, reduce
from operator import mul, or_
from re import Pattern
from typing import TYPE_CHECKING, Any, Generic, TypeVar, Union
from beets import util
from beets.util.units import raw_seconds_short
if TYPE_CHECKING:
from beets.dbcore.db import AnyModel, Model
P = TypeVar("P", default=Any)
else:
P = TypeVar("P")
# To use the SQLite "blob" type, it doesn't suffice to provide a byte
# string; SQLite treats that as encoded text. Wrapping it in a
# `memoryview` tells it that we actually mean non-text data.
# needs to be defined in here due to circular import.
# TODO: remove it from this module and define it in dbcore/types.py instead
BLOB_TYPE = memoryview
class ParsingError(ValueError):
"""Abstract class for any unparsable user-requested album/query
specification.
"""
class InvalidQueryError(ParsingError):
"""Represent any kind of invalid query.
The query should be a unicode string or a list, which will be space-joined.
"""
def __init__(self, query, explanation):
if isinstance(query, list):
query = " ".join(query)
message = f"'{query}': {explanation}"
super().__init__(message)
class InvalidQueryArgumentValueError(ParsingError):
"""Represent a query argument that could not be converted as expected.
It exists to be caught in upper stack levels so a meaningful (i.e. with the
query) InvalidQueryError can be raised.
"""
def __init__(self, what, expected, detail=None):
message = f"'{what}' is not {expected}"
if detail:
message = f"{message}: {detail}"
super().__init__(message)
class Query(ABC):
"""An abstract class representing a query into the database."""
@property
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return set()
@abstractmethod
def clause(self) -> tuple[str | None, Sequence[Any]]:
"""Generate an SQLite expression implementing the query.
Return (clause, subvals) where clause is a valid sqlite
WHERE clause implementing the query and subvals is a list of
items to be substituted for ?s in the clause.
The default implementation returns None, falling back to a slow query
using `match()`.
"""
@abstractmethod
def match(self, obj: Model):
"""Check whether this query matches a given Model. Can be used to
perform queries on arbitrary sets of Model.
"""
def __and__(self, other: Query) -> AndQuery:
return AndQuery([self, other])
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
def __eq__(self, other) -> bool:
return type(self) is type(other)
def __hash__(self) -> int:
"""Minimalistic default implementation of a hash.
Given the implementation if __eq__ above, this is
certainly correct.
"""
return hash(type(self))
SQLiteType = Union[str, bytes, float, int, memoryview, None]
AnySQLiteType = TypeVar("AnySQLiteType", bound=SQLiteType)
FieldQueryType = type["FieldQuery"]
class FieldQuery(Query, Generic[P]):
"""An abstract query that searches in a specific field for a
pattern. Subclasses must provide a `value_match` class method, which
determines whether a certain pattern string matches a certain value
string. Subclasses may also provide `col_clause` to implement the
same matching functionality in SQLite.
"""
@property
def field(self) -> str:
return (
f"{self.table}.{self.field_name}" if self.table else self.field_name
)
@property
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return {self.field_name}
def __init__(self, field_name: str, pattern: P, fast: bool = True):
self.table, _, self.field_name = field_name.rpartition(".")
self.pattern = pattern
self.fast = fast
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
raise NotImplementedError
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
if self.fast:
return self.col_clause()
else:
# Matching a flexattr. This is a slow query.
return None, ()
@classmethod
def value_match(cls, pattern: P, value: Any):
"""Determine whether the value matches the pattern."""
raise NotImplementedError
def match(self, obj: Model) -> bool:
return self.value_match(self.pattern, obj.get(self.field_name))
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}({self.field_name!r}, {self.pattern!r}, "
f"fast={self.fast})"
)
def __eq__(self, other) -> bool:
return (
super().__eq__(other)
and self.field_name == other.field_name
and self.pattern == other.pattern
)
def __hash__(self) -> int:
return hash((self.field_name, hash(self.pattern)))
class MatchQuery(FieldQuery[AnySQLiteType]):
"""A query that looks for exact matches in an Model field."""
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
return f"{self.field} = ?", [self.pattern]
@classmethod
def value_match(cls, pattern: AnySQLiteType, value: Any) -> bool:
return pattern == value
class NoneQuery(FieldQuery[None]):
"""A query that checks whether a field is null."""
def __init__(self, field, fast: bool = True):
super().__init__(field, None, fast)
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
return f"{self.field} IS NULL", ()
def match(self, obj: Model) -> bool:
return obj.get(self.field_name) is None
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.field_name!r}, {self.fast})"
class StringFieldQuery(FieldQuery[P]):
"""A FieldQuery that converts values to strings before matching
them.
"""
@classmethod
def value_match(cls, pattern: P, value: Any):
"""Determine whether the value matches the pattern. The value
may have any type.
"""
return cls.string_match(pattern, util.as_string(value))
@classmethod
def string_match(
cls,
pattern: P,
value: str,
) -> bool:
"""Determine whether the value matches the pattern. Both
arguments are strings. Subclasses implement this method.
"""
raise NotImplementedError
class StringQuery(StringFieldQuery[str]):
"""A query that matches a whole string in a specific Model field."""
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
search = (
self.pattern.replace("\\", "\\\\")
.replace("%", "\\%")
.replace("_", "\\_")
)
clause = f"{self.field} like ? escape '\\'"
subvals = [search]
return clause, subvals
@classmethod
def string_match(cls, pattern: str, value: str) -> bool:
return pattern.lower() == value.lower()
class SubstringQuery(StringFieldQuery[str]):
"""A query that matches a substring in a specific Model field."""
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
pattern = (
self.pattern.replace("\\", "\\\\")
.replace("%", "\\%")
.replace("_", "\\_")
)
search = f"%{pattern}%"
clause = f"{self.field} like ? escape '\\'"
subvals = [search]
return clause, subvals
@classmethod
def string_match(cls, pattern: str, value: str) -> bool:
return pattern.lower() in value.lower()
class PathQuery(FieldQuery[bytes]):
"""A query that matches all items under a given path.
Matching can either be case-insensitive or case-sensitive. By
default, the behavior depends on the OS: case-insensitive on Windows
and case-sensitive otherwise.
"""
def __init__(self, field: str, pattern: bytes, fast: bool = True) -> None:
"""Create a path query.
`pattern` must be a path, either to a file or a directory.
"""
path = util.normpath(pattern)
# Case sensitivity depends on the filesystem that the query path is located on.
self.case_sensitive = util.case_sensitive(path)
# Use a normalized-case pattern for case-insensitive matches.
if not self.case_sensitive:
# We need to lowercase the entire path, not just the pattern.
# In particular, on Windows, the drive letter is otherwise not
# lowercased.
# This also ensures that the `match()` method below and the SQL
# from `col_clause()` do the same thing.
path = path.lower()
super().__init__(field, path, fast)
@cached_property
def dir_path(self) -> bytes:
return os.path.join(self.pattern, b"")
@staticmethod
def is_path_query(query_part: str) -> bool:
"""Try to guess whether a unicode query part is a path query.
The path query must
1. precede the colon in the query, if a colon is present
2. contain either ``os.sep`` or ``os.altsep`` (Windows)
3. this path must exist on the filesystem.
"""
query_part = query_part.split(":")[0]
return (
# make sure the query part contains a path separator
bool(set(query_part) & {os.sep, os.altsep})
and os.path.exists(util.normpath(query_part))
)
def match(self, obj: Model) -> bool:
"""Check whether a model object's path matches this query.
Performs either an exact match against the pattern or checks if the path
starts with the given directory path. Case sensitivity depends on the object's
filesystem as determined during initialization.
"""
path = obj.path if self.case_sensitive else obj.path.lower()
return (path == self.pattern) or path.startswith(self.dir_path)
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
"""Generate an SQL clause that implements path matching in the database.
Returns a tuple of SQL clause string and parameter values list that matches
paths either exactly or by directory prefix. Handles case sensitivity
appropriately using BYTELOWER for case-insensitive matches.
"""
if self.case_sensitive:
left, right = self.field, "?"
else:
left, right = f"BYTELOWER({self.field})", "BYTELOWER(?)"
return f"({left} = {right}) || (substr({left}, 1, ?) = {right})", [
BLOB_TYPE(self.pattern),
len(dir_blob := BLOB_TYPE(self.dir_path)),
dir_blob,
]
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}({self.field!r}, {self.pattern!r}, "
f"fast={self.fast}, case_sensitive={self.case_sensitive})"
)
class RegexpQuery(StringFieldQuery[Pattern[str]]):
"""A query that matches a regular expression in a specific Model field.
Raises InvalidQueryError when the pattern is not a valid regular
expression.
"""
def __init__(self, field_name: str, pattern: str, fast: bool = True):
pattern = self._normalize(pattern)
try:
pattern_re = re.compile(pattern)
except re.error as exc:
# Invalid regular expression.
raise InvalidQueryArgumentValueError(
pattern, "a regular expression", format(exc)
)
super().__init__(field_name, pattern_re, fast)
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
return f" regexp({self.field}, ?)", [self.pattern.pattern]
@staticmethod
def _normalize(s: str) -> str:
"""Normalize a Unicode string's representation (used on both
patterns and matched values).
"""
return unicodedata.normalize("NFC", s)
@classmethod
def string_match(cls, pattern: Pattern[str], value: str) -> bool:
return pattern.search(cls._normalize(value)) is not None
class BooleanQuery(MatchQuery[int]):
"""Matches a boolean field. Pattern should either be a boolean or a
string reflecting a boolean.
"""
def __init__(
self,
field_name: str,
pattern: bool,
fast: bool = True,
):
if isinstance(pattern, str):
pattern = util.str2bool(pattern)
pattern_int = int(pattern)
super().__init__(field_name, pattern_int, fast)
class NumericQuery(FieldQuery[str]):
"""Matches numeric fields. A syntax using Ruby-style range ellipses
(``..``) lets users specify one- or two-sided ranges. For example,
``year:2001..`` finds music released since the turn of the century.
Raises InvalidQueryError when the pattern does not represent an int or
a float.
"""
def _convert(self, s: str) -> float | int | None:
"""Convert a string to a numeric type (float or int).
Return None if `s` is empty.
Raise an InvalidQueryError if the string cannot be converted.
"""
# This is really just a bit of fun premature optimization.
if not s:
return None
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
raise InvalidQueryArgumentValueError(s, "an int or a float")
def __init__(self, field_name: str, pattern: str, fast: bool = True):
super().__init__(field_name, pattern, fast)
parts = pattern.split("..", 1)
if len(parts) == 1:
# No range.
self.point = self._convert(parts[0])
self.rangemin = None
self.rangemax = None
else:
# One- or two-sided range.
self.point = None
self.rangemin = self._convert(parts[0])
self.rangemax = self._convert(parts[1])
def match(self, obj: Model) -> bool:
if self.field_name not in obj:
return False
value = obj[self.field_name]
if isinstance(value, str):
value = self._convert(value)
if self.point is not None:
return value == self.point
else:
if self.rangemin is not None and value < self.rangemin:
return False
if self.rangemax is not None and value > self.rangemax:
return False
return True
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
if self.point is not None:
return f"{self.field}=?", (self.point,)
else:
if self.rangemin is not None and self.rangemax is not None:
return (
f"{self.field} >= ? AND {self.field} <= ?",
(self.rangemin, self.rangemax),
)
elif self.rangemin is not None:
return f"{self.field} >= ?", (self.rangemin,)
elif self.rangemax is not None:
return f"{self.field} <= ?", (self.rangemax,)
else:
return "1", ()
class InQuery(Generic[AnySQLiteType], FieldQuery[Sequence[AnySQLiteType]]):
"""Query which matches values in the given set."""
field_name: str
pattern: Sequence[AnySQLiteType]
fast: bool = True
@property
def subvals(self) -> Sequence[SQLiteType]:
return self.pattern
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
placeholders = ", ".join(["?"] * len(self.subvals))
return f"{self.field_name} IN ({placeholders})", self.subvals
@classmethod
def value_match(
cls, pattern: Sequence[AnySQLiteType], value: AnySQLiteType
) -> bool:
return value in pattern
class CollectionQuery(Query):
"""An abstract query class that aggregates other queries. Can be
indexed like a list to access the sub-queries.
"""
@property
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return reduce(or_, (sq.field_names for sq in self.subqueries))
def __init__(self, subqueries: Sequence[Query] = ()):
self.subqueries = subqueries
# Act like a sequence.
def __len__(self) -> int:
return len(self.subqueries)
def __getitem__(self, key):
return self.subqueries[key]
def __iter__(self) -> Iterator[Query]:
return iter(self.subqueries)
def __contains__(self, subq) -> bool:
return subq in self.subqueries
def clause_with_joiner(
self,
joiner: str,
) -> tuple[str | None, Sequence[SQLiteType]]:
"""Return a clause created by joining together the clauses of
all subqueries with the string joiner (padded by spaces).
"""
clause_parts = []
subvals: list[SQLiteType] = []
for subq in self.subqueries:
subq_clause, subq_subvals = subq.clause()
if not subq_clause:
# Fall back to slow query.
return None, ()
clause_parts.append(f"({subq_clause})")
subvals += subq_subvals
clause = f" {joiner} ".join(clause_parts)
return clause, subvals
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.subqueries!r})"
def __eq__(self, other) -> bool:
return super().__eq__(other) and self.subqueries == other.subqueries
def __hash__(self) -> int:
"""Since subqueries are mutable, this object should not be hashable.
However and for conveniences purposes, it can be hashed.
"""
return reduce(mul, map(hash, self.subqueries), 1)
class MutableCollectionQuery(CollectionQuery):
"""A collection query whose subqueries may be modified after the
query is initialized.
"""
subqueries: MutableSequence[Query]
def __setitem__(self, key, value):
self.subqueries[key] = value
def __delitem__(self, key):
del self.subqueries[key]
class AndQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
return self.clause_with_joiner("and")
def match(self, obj: Model) -> bool:
return all(q.match(obj) for q in self.subqueries)
class OrQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
return self.clause_with_joiner("or")
def match(self, obj: Model) -> bool:
return any(q.match(obj) for q in self.subqueries)
class NotQuery(Query):
"""A query that matches the negation of its `subquery`, as a shortcut for
performing `not(subquery)` without using regular expressions.
"""
@property
def field_names(self) -> set[str]:
"""Return a set with field names that this query operates on."""
return self.subquery.field_names
def __init__(self, subquery):
self.subquery = subquery
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
clause, subvals = self.subquery.clause()
if clause:
return f"not ({clause})", subvals
else:
# If there is no clause, there is nothing to negate. All the logic
# is handled by match() for slow queries.
return clause, subvals
def match(self, obj: Model) -> bool:
return not self.subquery.match(obj)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.subquery!r})"
def __eq__(self, other) -> bool:
return super().__eq__(other) and self.subquery == other.subquery
def __hash__(self) -> int:
return hash(("not", hash(self.subquery)))
class TrueQuery(Query):
"""A query that always matches."""
def clause(self) -> tuple[str, Sequence[SQLiteType]]:
return "1", ()
def match(self, obj: Model) -> bool:
return True
class FalseQuery(Query):
"""A query that never matches."""
def clause(self) -> tuple[str, Sequence[SQLiteType]]:
return "0", ()
def match(self, obj: Model) -> bool:
return False
# Time/date queries.
def _parse_periods(pattern: str) -> tuple[Period | None, Period | None]:
"""Parse a string containing two dates separated by two dots (..).
Return a pair of `Period` objects.
"""
parts = pattern.split("..", 1)
if len(parts) == 1:
instant = Period.parse(parts[0])
return (instant, instant)
else:
start = Period.parse(parts[0])
end = Period.parse(parts[1])
return (start, end)
class Period:
"""A period of time given by a date, time and precision.
Example: 2014-01-01 10:50:30 with precision 'month' represents all
instants of time during January 2014.
"""
precisions = ("year", "month", "day", "hour", "minute", "second")
date_formats = (
("%Y",), # year
("%Y-%m",), # month
("%Y-%m-%d",), # day
("%Y-%m-%dT%H", "%Y-%m-%d %H"), # hour
("%Y-%m-%dT%H:%M", "%Y-%m-%d %H:%M"), # minute
("%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"), # second
)
relative_units = {"y": 365, "m": 30, "w": 7, "d": 1}
relative_re = "(?P[+|-]?)(?P[0-9]+)(?P[y|m|w|d])"
def __init__(self, date: datetime, precision: str):
"""Create a period with the given date (a `datetime` object) and
precision (a string, one of "year", "month", "day", "hour", "minute",
or "second").
"""
if precision not in Period.precisions:
raise ValueError(f"Invalid precision {precision}")
self.date = date
self.precision = precision
@classmethod
def parse(cls: type[Period], string: str) -> Period | None:
"""Parse a date and return a `Period` object or `None` if the
string is empty, or raise an InvalidQueryArgumentValueError if
the string cannot be parsed to a date.
The date may be absolute or relative. Absolute dates look like
`YYYY`, or `YYYY-MM-DD`, or `YYYY-MM-DD HH:MM:SS`, etc. Relative
dates have three parts:
- Optionally, a ``+`` or ``-`` sign indicating the future or the
past. The default is the future.
- A number: how much to add or subtract.
- A letter indicating the unit: days, weeks, months or years
(``d``, ``w``, ``m`` or ``y``). A "month" is exactly 30 days
and a "year" is exactly 365 days.
"""
def find_date_and_format(
string: str,
) -> tuple[None, None] | tuple[datetime, int]:
for ord, format in enumerate(cls.date_formats):
for format_option in format:
try:
date = datetime.strptime(string, format_option)
return date, ord
except ValueError:
# Parsing failed.
pass
return (None, None)
if not string:
return None
date: datetime | None
# Check for a relative date.
match_dq = re.match(cls.relative_re, string)
if match_dq:
sign = match_dq.group("sign")
quantity = match_dq.group("quantity")
timespan = match_dq.group("timespan")
# Add or subtract the given amount of time from the current
# date.
multiplier = -1 if sign == "-" else 1
days = cls.relative_units[timespan]
date = (
datetime.now()
+ timedelta(days=int(quantity) * days) * multiplier
)
return cls(date, cls.precisions[5])
# Check for an absolute date.
date, ordinal = find_date_and_format(string)
if date is None or ordinal is None:
raise InvalidQueryArgumentValueError(
string, "a valid date/time string"
)
precision = cls.precisions[ordinal]
return cls(date, precision)
def open_right_endpoint(self) -> datetime:
"""Based on the precision, convert the period to a precise
`datetime` for use as a right endpoint in a right-open interval.
"""
precision = self.precision
date = self.date
if "year" == self.precision:
return date.replace(year=date.year + 1, month=1)
elif "month" == precision:
if date.month < 12:
return date.replace(month=date.month + 1)
else:
return date.replace(year=date.year + 1, month=1)
elif "day" == precision:
return date + timedelta(days=1)
elif "hour" == precision:
return date + timedelta(hours=1)
elif "minute" == precision:
return date + timedelta(minutes=1)
elif "second" == precision:
return date + timedelta(seconds=1)
else:
raise ValueError(f"unhandled precision {precision}")
class DateInterval:
"""A closed-open interval of dates.
A left endpoint of None means since the beginning of time.
A right endpoint of None means towards infinity.
"""
def __init__(self, start: datetime | None, end: datetime | None):
if start is not None and end is not None and not start < end:
raise ValueError(f"start date {start} is not before end date {end}")
self.start = start
self.end = end
@classmethod
def from_periods(
cls,
start: Period | None,
end: Period | None,
) -> DateInterval:
"""Create an interval with two Periods as the endpoints."""
end_date = end.open_right_endpoint() if end is not None else None
start_date = start.date if start is not None else None
return cls(start_date, end_date)
def contains(self, date: datetime) -> bool:
if self.start is not None and date < self.start:
return False
if self.end is not None and date >= self.end:
return False
return True
def __str__(self) -> str:
return f"[{self.start}, {self.end})"
class DateQuery(FieldQuery[str]):
"""Matches date fields stored as seconds since Unix epoch time.
Dates can be specified as ``year-month-day`` strings where only year
is mandatory.
The value of a date field can be matched against a date interval by
using an ellipsis interval syntax similar to that of NumericQuery.
"""
def __init__(self, field_name: str, pattern: str, fast: bool = True):
super().__init__(field_name, pattern, fast)
start, end = _parse_periods(pattern)
self.interval = DateInterval.from_periods(start, end)
def match(self, obj: Model) -> bool:
if self.field_name not in obj:
return False
timestamp = float(obj[self.field_name])
date = datetime.fromtimestamp(timestamp)
return self.interval.contains(date)
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
clause_parts = []
subvals = []
# Convert the `datetime` objects to an integer number of seconds since
# the (local) Unix epoch using `datetime.timestamp()`.
if self.interval.start:
clause_parts.append(f"{self.field} >= ?")
subvals.append(int(self.interval.start.timestamp()))
if self.interval.end:
clause_parts.append(f"{self.field} < ?")
subvals.append(int(self.interval.end.timestamp()))
if clause_parts:
# One- or two-sided interval.
clause = " AND ".join(clause_parts)
else:
# Match any date.
clause = "1"
return clause, subvals
class DurationQuery(NumericQuery):
"""NumericQuery that allow human-friendly (M:SS) time interval formats.
Converts the range(s) to a float value, and delegates on NumericQuery.
Raises InvalidQueryError when the pattern does not represent an int, float
or M:SS time interval.
"""
def _convert(self, s: str) -> float | None:
"""Convert a M:SS or numeric string to a float.
Return None if `s` is empty.
Raise an InvalidQueryError if the string cannot be converted.
"""
if not s:
return None
try:
return raw_seconds_short(s)
except ValueError:
try:
return float(s)
except ValueError:
raise InvalidQueryArgumentValueError(
s, "a M:SS string or a float"
)
class SingletonQuery(FieldQuery[str]):
"""This query is responsible for the 'singleton' lookup.
It is based on the FieldQuery and constructs a SQL clause
'album_id is NULL' which yields the same result as the previous filter
in Python but is more performant since it's done in SQL.
Using util.str2bool ensures that lookups like singleton:true, singleton:1
and singleton:false, singleton:0 are handled consistently.
"""
def __new__(cls, field: str, value: str, *args, **kwargs):
query = NoneQuery("album_id")
if util.str2bool(value):
return query
return NotQuery(query)
# Sorting.
class Sort:
"""An abstract class representing a sort operation for a query into
the database.
"""
def order_clause(self) -> str | None:
"""Generates a SQL fragment to be used in a ORDER BY clause, or
None if no fragment is used (i.e., this is a slow sort).
"""
return None
def sort(self, items: list[AnyModel]) -> list[AnyModel]:
"""Sort the list of objects and return a list."""
return sorted(items)
def is_slow(self) -> bool:
"""Indicate whether this query is *slow*, meaning that it cannot
be executed in SQL and must be executed in Python.
"""
return False
def __hash__(self) -> int:
return 0
def __eq__(self, other) -> bool:
return type(self) is type(other)
def __repr__(self):
return f"{self.__class__.__name__}()"
class MultipleSort(Sort):
"""Sort that encapsulates multiple sub-sorts."""
def __init__(self, sorts: list[Sort] | None = None):
self.sorts = sorts or []
def add_sort(self, sort: Sort):
self.sorts.append(sort)
def order_clause(self) -> str:
"""Return the list SQL clauses for those sub-sorts for which we can be
(at least partially) fast.
A contiguous suffix of fast (SQL-capable) sub-sorts are
executable in SQL. The remaining, even if they are fast
independently, must be executed slowly.
"""
order_strings = []
for sort in reversed(self.sorts):
clause = sort.order_clause()
if clause is None:
break
order_strings.append(clause)
order_strings.reverse()
return ", ".join(order_strings)
def is_slow(self) -> bool:
for sort in self.sorts:
if sort.is_slow():
return True
return False
def sort(self, items):
slow_sorts = []
switch_slow = False
for sort in reversed(self.sorts):
if switch_slow:
slow_sorts.append(sort)
elif sort.order_clause() is None:
switch_slow = True
slow_sorts.append(sort)
else:
pass
for sort in slow_sorts:
items = sort.sort(items)
return items
def __repr__(self):
return f"{self.__class__.__name__}({self.sorts!r})"
def __hash__(self):
return hash(tuple(self.sorts))
def __eq__(self, other):
return super().__eq__(other) and self.sorts == other.sorts
class FieldSort(Sort):
"""An abstract sort criterion that orders by a specific field (of
any kind).
"""
def __init__(
self,
field: str,
ascending: bool = True,
case_insensitive: bool = True,
):
self.field = field
self.ascending = ascending
self.case_insensitive = case_insensitive
def sort(self, objs: list[AnyModel]) -> list[AnyModel]:
# TODO: Conversion and null-detection here. In Python 3,
# comparisons with None fail. We should also support flexible
# attributes with different types without falling over.
def key(obj: Model) -> Any:
field_val = obj.get(self.field, None)
if field_val is None:
if _type := obj._types.get(self.field):
# If the field is typed, use its null value.
field_val = obj._types[self.field].null
else:
# If not, fall back to using an empty string.
field_val = ""
if self.case_insensitive and isinstance(field_val, str):
field_val = field_val.lower()
return field_val
return sorted(objs, key=key, reverse=not self.ascending)
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}"
f"({self.field!r}, ascending={self.ascending!r})"
)
def __hash__(self) -> int:
return hash((self.field, self.ascending))
def __eq__(self, other) -> bool:
return (
super().__eq__(other)
and self.field == other.field
and self.ascending == other.ascending
)
class FixedFieldSort(FieldSort):
"""Sort object to sort on a fixed field."""
def order_clause(self) -> str:
order = "ASC" if self.ascending else "DESC"
if self.case_insensitive:
field = (
"(CASE "
f"WHEN TYPEOF({self.field})='text' THEN LOWER({self.field}) "
f"WHEN TYPEOF({self.field})='blob' THEN LOWER({self.field}) "
f"ELSE {self.field} END)"
)
else:
field = self.field
return f"{field} {order}"
class SlowFieldSort(FieldSort):
"""A sort criterion by some model field other than a fixed field:
i.e., a computed or flexible field.
"""
def is_slow(self) -> bool:
return True
class NullSort(Sort):
"""No sorting. Leave results unsorted."""
def sort(self, items: list[AnyModel]) -> list[AnyModel]:
return items
def __nonzero__(self) -> bool:
return self.__bool__()
def __bool__(self) -> bool:
return False
def __eq__(self, other) -> bool:
return type(self) is type(other) or other is None
def __hash__(self) -> int:
return 0
class SmartArtistSort(FieldSort):
"""Sort by artist (either album artist or track artist),
prioritizing the sort field over the raw field.
"""
def order_clause(self):
order = "ASC" if self.ascending else "DESC"
collate = "COLLATE NOCASE" if self.case_insensitive else ""
field = self.field
return f"COALESCE(NULLIF({field}_sort, ''), {field}) {collate} {order}"
def sort(self, objs: list[AnyModel]) -> list[AnyModel]:
def key(o):
val = o[f"{self.field}_sort"] or o[self.field]
return val.lower() if self.case_insensitive else val
return sorted(objs, key=key, reverse=not self.ascending)
beetbox-beets-c1877b7/beets/dbcore/queryparse.py 0000664 0000000 0000000 00000023304 15073551743 0021734 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Parsing of strings into DBCore queries."""
from __future__ import annotations
import itertools
import re
from typing import TYPE_CHECKING
from . import query
if TYPE_CHECKING:
from collections.abc import Collection, Sequence
from ..library import LibModel
from .query import FieldQueryType, Sort
Prefixes = dict[str, FieldQueryType]
PARSE_QUERY_PART_REGEX = re.compile(
# Non-capturing optional segment for the keyword.
r"(-|\^)?" # Negation prefixes.
r"(?:"
r"(\S+?)" # The field key.
r"(? tuple[str | None, str, FieldQueryType, bool]:
"""Parse a single *query part*, which is a chunk of a complete query
string representing a single criterion.
A query part is a string consisting of:
- A *pattern*: the value to look for.
- Optionally, a *field name* preceding the pattern, separated by a
colon. So in `foo:bar`, `foo` is the field name and `bar` is the
pattern.
- Optionally, a *query prefix* just before the pattern (and after the
optional colon) indicating the type of query that should be used. For
example, in `~foo`, `~` might be a prefix. (The set of prefixes to
look for is given in the `prefixes` parameter.)
- Optionally, a negation indicator, `-` or `^`, at the very beginning.
Both prefixes and the separating `:` character may be escaped with a
backslash to avoid their normal meaning.
The function returns a tuple consisting of:
- The field name: a string or None if it's not present.
- The pattern, a string.
- The query class to use, which inherits from the base
:class:`Query` type.
- A negation flag, a bool.
The three optional parameters determine which query class is used (i.e.,
the third return value). They are:
- `query_classes`, which maps field names to query classes. These
are used when no explicit prefix is present.
- `prefixes`, which maps prefix strings to query classes.
- `default_class`, the fallback when neither the field nor a prefix
indicates a query class.
So the precedence for determining which query class to return is:
prefix, followed by field, and finally the default.
For example, assuming the `:` prefix is used for `RegexpQuery`:
- `'stapler'` -> `(None, 'stapler', SubstringQuery, False)`
- `'color:red'` -> `('color', 'red', SubstringQuery, False)`
- `':^Quiet'` -> `(None, '^Quiet', RegexpQuery, False)`, because
the `^` follows the `:`
- `'color::b..e'` -> `('color', 'b..e', RegexpQuery, False)`
- `'-color:red'` -> `('color', 'red', SubstringQuery, True)`
"""
# Apply the regular expression and extract the components.
part = part.strip()
match = PARSE_QUERY_PART_REGEX.match(part)
assert match # Regex should always match
negate = bool(match.group(1))
key = match.group(2)
term = match.group(3).replace("\\:", ":")
# Check whether there's a prefix in the query and use the
# corresponding query type.
for pre, query_class in prefixes.items():
if term.startswith(pre):
return key, term[len(pre) :], query_class, negate
# No matching prefix, so use either the query class determined by
# the field or the default as a fallback.
query_class = query_classes.get(key, default_class)
return key, term, query_class, negate
def construct_query_part(
model_cls: type[LibModel],
prefixes: Prefixes,
query_part: str,
) -> query.Query:
"""Parse a *query part* string and return a :class:`Query` object.
:param model_cls: The :class:`Model` class that this is a query for.
This is used to determine the appropriate query types for the
model's fields.
:param prefixes: A map from prefix strings to :class:`Query` types.
:param query_part: The string to parse.
See the documentation for `parse_query_part` for more information on
query part syntax.
"""
# A shortcut for empty query parts.
if not query_part:
return query.TrueQuery()
out_query: query.Query
# Use `model_cls` to build up a map from field (or query) names to
# `Query` classes.
query_classes: dict[str, FieldQueryType] = {}
for k, t in itertools.chain(
model_cls._fields.items(), model_cls._types.items()
):
query_classes[k] = t.query
query_classes.update(model_cls._queries) # Non-field queries.
# Parse the string.
key, pattern, query_class, negate = parse_query_part(
query_part, query_classes, prefixes
)
if key is None:
# If there's no key (field name) specified, this is a "match anything"
# query.
out_query = model_cls.any_field_query(pattern, query_class)
else:
# Field queries get constructed according to the name of the field
# they are querying.
out_query = model_cls.field_query(key.lower(), pattern, query_class)
# Apply negation.
if negate:
return query.NotQuery(out_query)
else:
return out_query
# TYPING ERROR
def query_from_strings(
query_cls: type[query.CollectionQuery],
model_cls: type[LibModel],
prefixes: Prefixes,
query_parts: Collection[str],
) -> query.Query:
"""Creates a collection query of type `query_cls` from a list of
strings in the format used by parse_query_part. `model_cls`
determines how queries are constructed from strings.
"""
subqueries = []
for part in query_parts:
subqueries.append(construct_query_part(model_cls, prefixes, part))
if not subqueries: # No terms in query.
subqueries = [query.TrueQuery()]
return query_cls(subqueries)
def construct_sort_part(
model_cls: type[LibModel],
part: str,
case_insensitive: bool = True,
) -> Sort:
"""Create a `Sort` from a single string criterion.
`model_cls` is the `Model` being queried. `part` is a single string
ending in ``+`` or ``-`` indicating the sort. `case_insensitive`
indicates whether or not the sort should be performed in a case
sensitive manner.
"""
assert part, "part must be a field name and + or -"
field = part[:-1]
assert field, "field is missing"
direction = part[-1]
assert direction in ("+", "-"), "part must end with + or -"
is_ascending = direction == "+"
if sort_cls := model_cls._sorts.get(field):
if isinstance(sort_cls, query.SmartArtistSort):
field = "albumartist" if model_cls.__name__ == "Album" else "artist"
elif field in model_cls._fields:
sort_cls = query.FixedFieldSort
else:
# Flexible or computed.
sort_cls = query.SlowFieldSort
return sort_cls(field, is_ascending, case_insensitive)
def sort_from_strings(
model_cls: type[LibModel],
sort_parts: Sequence[str],
case_insensitive: bool = True,
) -> Sort:
"""Create a `Sort` from a list of sort criteria (strings)."""
if not sort_parts:
return query.NullSort()
elif len(sort_parts) == 1:
return construct_sort_part(model_cls, sort_parts[0], case_insensitive)
else:
sort = query.MultipleSort()
for part in sort_parts:
sort.add_sort(
construct_sort_part(model_cls, part, case_insensitive)
)
return sort
def parse_sorted_query(
model_cls: type[LibModel],
parts: list[str],
prefixes: Prefixes = {},
case_insensitive: bool = True,
) -> tuple[query.Query, Sort]:
"""Given a list of strings, create the `Query` and `Sort` that they
represent.
"""
# Separate query token and sort token.
query_parts = []
sort_parts = []
# Split up query in to comma-separated subqueries, each representing
# an AndQuery, which need to be joined together in one OrQuery
subquery_parts = []
for part in parts + [","]:
if part.endswith(","):
# Ensure we can catch "foo, bar" as well as "foo , bar"
last_subquery_part = part[:-1]
if last_subquery_part:
subquery_parts.append(last_subquery_part)
# Parse the subquery in to a single AndQuery
# TODO: Avoid needlessly wrapping AndQueries containing 1 subquery?
query_parts.append(
query_from_strings(
query.AndQuery, model_cls, prefixes, subquery_parts
)
)
del subquery_parts[:]
else:
# Sort parts (1) end in + or -, (2) don't have a field, and
# (3) consist of more than just the + or -.
if part.endswith(("+", "-")) and ":" not in part and len(part) > 1:
sort_parts.append(part)
else:
subquery_parts.append(part)
# Avoid needlessly wrapping single statements in an OR
q = query.OrQuery(query_parts) if len(query_parts) > 1 else query_parts[0]
s = sort_from_strings(model_cls, sort_parts, case_insensitive)
return q, s
beetbox-beets-c1877b7/beets/dbcore/types.py 0000664 0000000 0000000 00000030753 15073551743 0020706 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Representation of type information for DBCore model fields."""
from __future__ import annotations
import re
import time
import typing
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast
import beets
from beets import util
from beets.util.units import human_seconds_short, raw_seconds_short
from . import query
SQLiteType = query.SQLiteType
BLOB_TYPE = query.BLOB_TYPE
class ModelType(typing.Protocol):
"""Protocol that specifies the required constructor for model types,
i.e. a function that takes any argument and attempts to parse it to the
given type.
"""
def __init__(self, value: Any = None): ...
# Generic type variables, used for the value type T and null type N (if
# nullable, else T and N are set to the same type for the concrete subclasses
# of Type).
if TYPE_CHECKING:
N = TypeVar("N", default=Any)
T = TypeVar("T", bound=ModelType, default=Any)
else:
N = TypeVar("N")
T = TypeVar("T", bound=ModelType)
class Type(ABC, Generic[T, N]):
"""An object encapsulating the type of a model field. Includes
information about how to store, query, format, and parse a given
field.
"""
sql: str = "TEXT"
"""The SQLite column type for the value.
"""
query: query.FieldQueryType = query.SubstringQuery
"""The `Query` subclass to be used when querying the field.
"""
model_type: type[T]
"""The Python type that is used to represent the value in the model.
The model is guaranteed to return a value of this type if the field
is accessed. To this end, the constructor is used by the `normalize`
and `from_sql` methods and the `default` property.
"""
@property
def null(self) -> N:
"""The value to be exposed when the underlying value is None."""
# Note that this default implementation only makes sense for T = N.
# It would be better to implement `null()` only in subclasses, or
# have a field null_type similar to `model_type` and use that here.
return cast(N, self.model_type())
def format(self, value: N | T) -> str:
"""Given a value of this type, produce a Unicode string
representing the value. This is used in template evaluation.
"""
if value is None:
value = self.null
# `self.null` might be `None`
if value is None:
return ""
elif isinstance(value, bytes):
return value.decode("utf-8", "ignore")
else:
return str(value)
def parse(self, string: str) -> T | N:
"""Parse a (possibly human-written) string and return the
indicated value of this type.
"""
try:
return self.model_type(string)
except ValueError:
return self.null
def normalize(self, value: Any) -> T | N:
"""Given a value that will be assigned into a field of this
type, normalize the value to have the appropriate type. This
base implementation only reinterprets `None`.
"""
# TYPING ERROR
if value is None:
return self.null
else:
# TODO This should eventually be replaced by
# `self.model_type(value)`
return cast(T, value)
def from_sql(self, sql_value: SQLiteType) -> T | N:
"""Receives the value stored in the SQL backend and return the
value to be stored in the model.
For fixed fields the type of `value` is determined by the column
type affinity given in the `sql` property and the SQL to Python
mapping of the database adapter. For more information see:
https://www.sqlite.org/datatype3.html
https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types
Flexible fields have the type affinity `TEXT`. This means the
`sql_value` is either a `memoryview` or a `unicode` object`
and the method must handle these in addition.
"""
if isinstance(sql_value, memoryview):
sql_value = bytes(sql_value).decode("utf-8", "ignore")
if isinstance(sql_value, str):
return self.parse(sql_value)
else:
return self.normalize(sql_value)
def to_sql(self, model_value: Any) -> SQLiteType:
"""Convert a value as stored in the model object to a value used
by the database adapter.
"""
return model_value
# Reusable types.
class Default(Type[str, None]):
model_type = str
@property
def null(self):
return None
class BaseInteger(Type[int, N]):
"""A basic integer type."""
sql = "INTEGER"
query = query.NumericQuery
model_type = int
def normalize(self, value: Any) -> int | N:
try:
return self.model_type(round(float(value)))
except ValueError:
return self.null
except TypeError:
return self.null
class Integer(BaseInteger[int]):
@property
def null(self) -> int:
return 0
class NullInteger(BaseInteger[None]):
@property
def null(self) -> None:
return None
class BasePaddedInt(BaseInteger[N]):
"""An integer field that is formatted with a given number of digits,
padded with zeroes.
"""
def __init__(self, digits: int):
self.digits = digits
def format(self, value: int | N) -> str:
return f"{value or 0:0{self.digits}d}"
class PaddedInt(BasePaddedInt[int]):
pass
class NullPaddedInt(BasePaddedInt[None]):
"""Same as `PaddedInt`, but does not normalize `None` to `0`."""
@property
def null(self) -> None:
return None
class ScaledInt(Integer):
"""An integer whose formatting operation scales the number by a
constant and adds a suffix. Good for units with large magnitudes.
"""
def __init__(self, unit: int, suffix: str = ""):
self.unit = unit
self.suffix = suffix
def format(self, value: int) -> str:
return f"{(value or 0) // self.unit}{self.suffix}"
class Id(NullInteger):
"""An integer used as the row id or a foreign key in a SQLite table.
This type is nullable: None values are not translated to zero.
"""
@property
def null(self) -> None:
return None
def __init__(self, primary: bool = True):
if primary:
self.sql = "INTEGER PRIMARY KEY"
class BaseFloat(Type[float, N]):
"""A basic floating-point type. The `digits` parameter specifies how
many decimal places to use in the human-readable representation.
"""
sql = "REAL"
query: query.FieldQueryType = query.NumericQuery
model_type = float
def __init__(self, digits: int = 1):
self.digits = digits
def format(self, value: float | N) -> str:
return f"{value or 0:.{self.digits}f}"
class Float(BaseFloat[float]):
"""Floating-point type that normalizes `None` to `0.0`."""
@property
def null(self) -> float:
return 0.0
class NullFloat(BaseFloat[None]):
"""Same as `Float`, but does not normalize `None` to `0.0`."""
@property
def null(self) -> None:
return None
class BaseString(Type[T, N]):
"""A Unicode string type."""
sql = "TEXT"
query = query.SubstringQuery
def normalize(self, value: Any) -> T | N:
if value is None:
return self.null
else:
return self.model_type(value)
class String(BaseString[str, Any]):
"""A Unicode string type."""
model_type = str
class DelimitedString(BaseString[list[str], list[str]]):
"""A list of Unicode strings, represented in-database by a single string
containing delimiter-separated values.
"""
model_type = list[str]
def __init__(self, delimiter: str):
self.delimiter = delimiter
def format(self, value: list[str]):
return self.delimiter.join(value)
def parse(self, string: str):
if not string:
return []
return string.split(self.delimiter)
def to_sql(self, model_value: list[str]):
return self.delimiter.join(model_value)
class Boolean(Type):
"""A boolean type."""
sql = "INTEGER"
query = query.BooleanQuery
model_type = bool
def format(self, value: bool) -> str:
return str(bool(value))
def parse(self, string: str) -> bool:
return util.str2bool(string)
class DateType(Float):
# TODO representation should be `datetime` object
# TODO distinguish between date and time types
query = query.DateQuery
def format(self, value):
return time.strftime(
beets.config["time_format"].as_str(), time.localtime(value or 0)
)
def parse(self, string):
try:
# Try a formatted date string.
return time.mktime(
time.strptime(string, beets.config["time_format"].as_str())
)
except ValueError:
# Fall back to a plain timestamp number.
try:
return float(string)
except ValueError:
return self.null
class BasePathType(Type[bytes, N]):
"""A dbcore type for filesystem paths.
These are represented as `bytes` objects, in keeping with
the Unix filesystem abstraction.
"""
sql = "BLOB"
query = query.PathQuery
model_type = bytes
def parse(self, string: str) -> bytes:
return util.normpath(string)
def normalize(self, value: Any) -> bytes | N:
if isinstance(value, str):
# Paths stored internally as encoded bytes.
return util.bytestring_path(value)
elif isinstance(value, BLOB_TYPE):
# We unwrap buffers to bytes.
return bytes(value)
else:
return value
def from_sql(self, sql_value):
return self.normalize(sql_value)
def to_sql(self, value: bytes) -> BLOB_TYPE:
if isinstance(value, bytes):
value = BLOB_TYPE(value)
return value
class NullPathType(BasePathType[None]):
@property
def null(self) -> None:
return None
def format(self, value: bytes | None) -> str:
return util.displayable_path(value or b"")
class PathType(BasePathType[bytes]):
@property
def null(self) -> bytes:
return b""
def format(self, value: bytes) -> str:
return util.displayable_path(value or b"")
class MusicalKey(String):
"""String representing the musical key of a song.
The standard format is C, Cm, C#, C#m, etc.
"""
ENHARMONIC = {
r"db": "c#",
r"eb": "d#",
r"gb": "f#",
r"ab": "g#",
r"bb": "a#",
}
null = None
def parse(self, key):
key = key.lower()
for flat, sharp in self.ENHARMONIC.items():
key = re.sub(flat, sharp, key)
key = re.sub(r"[\W\s]+minor", "m", key)
key = re.sub(r"[\W\s]+major", "", key)
return key.capitalize()
def normalize(self, key):
if key is None:
return None
else:
return self.parse(key)
class DurationType(Float):
"""Human-friendly (M:SS) representation of a time interval."""
query = query.DurationQuery
def format(self, value):
if not beets.config["format_raw_length"].get(bool):
return human_seconds_short(value or 0.0)
else:
return value
def parse(self, string):
try:
# Try to format back hh:ss to seconds.
return raw_seconds_short(string)
except ValueError:
# Fall back to a plain float.
try:
return float(string)
except ValueError:
return self.null
# Shared instances of common types.
DEFAULT = Default()
INTEGER = Integer()
PRIMARY_ID = Id(True)
FOREIGN_ID = Id(False)
FLOAT = Float()
NULL_FLOAT = NullFloat()
STRING = String()
BOOLEAN = Boolean()
DATE = DateType()
SEMICOLON_SPACE_DSV = DelimitedString(delimiter="; ")
# Will set the proper null char in mediafile
MULTI_VALUE_DSV = DelimitedString(delimiter="\\â€")
beetbox-beets-c1877b7/beets/importer/ 0000775 0000000 0000000 00000000000 15073551743 0017563 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beets/importer/__init__.py 0000664 0000000 0000000 00000002166 15073551743 0021701 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides the basic, interface-agnostic workflow for importing and
autotagging music files.
"""
from .session import ImportAbortError, ImportSession
from .tasks import (
Action,
ArchiveImportTask,
ImportTask,
SentinelImportTask,
SingletonImportTask,
)
# Note: Stages are not exposed to the public API
__all__ = [
"ImportSession",
"ImportAbortError",
"Action",
"ImportTask",
"ArchiveImportTask",
"SentinelImportTask",
"SingletonImportTask",
]
beetbox-beets-c1877b7/beets/importer/session.py 0000664 0000000 0000000 00000025211 15073551743 0021621 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import annotations
import os
import time
from typing import TYPE_CHECKING, Sequence
from beets import config, dbcore, library, logging, plugins, util
from beets.importer.tasks import Action
from beets.util import displayable_path, normpath, pipeline, syspath
from . import stages as stagefuncs
from .state import ImportState
if TYPE_CHECKING:
from beets.util import PathBytes
from .tasks import ImportTask
QUEUE_SIZE = 128
# Global logger.
log = logging.getLogger("beets")
class ImportAbortError(Exception):
"""Raised when the user aborts the tagging operation."""
pass
class ImportSession:
"""Controls an import action. Subclasses should implement methods to
communicate with the user or otherwise make decisions.
"""
logger: logging.Logger
paths: list[PathBytes]
lib: library.Library
_is_resuming: dict[bytes, bool]
_merged_items: set[PathBytes]
_merged_dirs: set[PathBytes]
def __init__(
self,
lib: library.Library,
loghandler: logging.Handler | None,
paths: Sequence[PathBytes] | None,
query: dbcore.Query | None,
):
"""Create a session.
Parameters
----------
lib : library.Library
The library instance to which items will be imported.
loghandler : logging.Handler or None
A logging handler to use for the session's logger. If None, a
NullHandler will be used.
paths : os.PathLike or None
The paths to be imported.
query : dbcore.Query or None
A query to filter items for import.
"""
self.lib = lib
self.logger = self._setup_logging(loghandler)
self.query = query
self._is_resuming = {}
self._merged_items = set()
self._merged_dirs = set()
# Normalize the paths.
self.paths = list(map(normpath, paths or []))
def _setup_logging(self, loghandler: logging.Handler | None):
logger = logging.getLogger(__name__)
logger.propagate = False
if not loghandler:
loghandler = logging.NullHandler()
logger.handlers = [loghandler]
return logger
def set_config(self, config):
"""Set `config` property from global import config and make
implied changes.
"""
# FIXME: Maybe this function should not exist and should instead
# provide "decision wrappers" like "should_resume()", etc.
iconfig = dict(config)
self.config = iconfig
# Incremental and progress are mutually exclusive.
if iconfig["incremental"]:
iconfig["resume"] = False
# When based on a query instead of directories, never
# save progress or try to resume.
if self.query is not None:
iconfig["resume"] = False
iconfig["incremental"] = False
if iconfig["reflink"]:
iconfig["reflink"] = iconfig["reflink"].as_choice(
["auto", True, False]
)
# Copy, move, reflink, link, and hardlink are mutually exclusive.
if iconfig["move"]:
iconfig["copy"] = False
iconfig["link"] = False
iconfig["hardlink"] = False
iconfig["reflink"] = False
elif iconfig["link"]:
iconfig["copy"] = False
iconfig["move"] = False
iconfig["hardlink"] = False
iconfig["reflink"] = False
elif iconfig["hardlink"]:
iconfig["copy"] = False
iconfig["move"] = False
iconfig["link"] = False
iconfig["reflink"] = False
elif iconfig["reflink"]:
iconfig["copy"] = False
iconfig["move"] = False
iconfig["link"] = False
iconfig["hardlink"] = False
# Only delete when copying.
if not iconfig["copy"]:
iconfig["delete"] = False
self.want_resume = config["resume"].as_choice([True, False, "ask"])
def tag_log(self, status, paths: Sequence[PathBytes]):
"""Log a message about a given album to the importer log. The status
should reflect the reason the album couldn't be tagged.
"""
self.logger.info("{} {}", status, displayable_path(paths))
def log_choice(self, task: ImportTask, duplicate=False):
"""Logs the task's current choice if it should be logged. If
``duplicate``, then this is a secondary choice after a duplicate was
detected and a decision was made.
"""
paths = task.paths
if duplicate:
# Duplicate: log all three choices (skip, keep both, and trump).
if task.should_remove_duplicates:
self.tag_log("duplicate-replace", paths)
elif task.choice_flag in (Action.ASIS, Action.APPLY):
self.tag_log("duplicate-keep", paths)
elif task.choice_flag is Action.SKIP:
self.tag_log("duplicate-skip", paths)
else:
# Non-duplicate: log "skip" and "asis" choices.
if task.choice_flag is Action.ASIS:
self.tag_log("asis", paths)
elif task.choice_flag is Action.SKIP:
self.tag_log("skip", paths)
def should_resume(self, path: PathBytes):
raise NotImplementedError
def choose_match(self, task: ImportTask):
raise NotImplementedError
def resolve_duplicate(self, task: ImportTask, found_duplicates):
raise NotImplementedError
def choose_item(self, task: ImportTask):
raise NotImplementedError
def run(self):
"""Run the import task."""
self.logger.info("import started {}", time.asctime())
self.set_config(config["import"])
# Set up the pipeline.
if self.query is None:
stages = [stagefuncs.read_tasks(self)]
else:
stages = [stagefuncs.query_tasks(self)]
# In pretend mode, just log what would otherwise be imported.
if self.config["pretend"]:
stages += [stagefuncs.log_files(self)]
else:
if self.config["group_albums"] and not self.config["singletons"]:
# Split directory tasks into one task for each album.
stages += [stagefuncs.group_albums(self)]
# These stages either talk to the user to get a decision or,
# in the case of a non-autotagged import, just choose to
# import everything as-is. In *both* cases, these stages
# also add the music to the library database, so later
# stages need to read and write data from there.
if self.config["autotag"]:
stages += [
stagefuncs.lookup_candidates(self),
stagefuncs.user_query(self),
]
else:
stages += [stagefuncs.import_asis(self)]
# Plugin stages.
for stage_func in plugins.early_import_stages():
stages.append(stagefuncs.plugin_stage(self, stage_func))
for stage_func in plugins.import_stages():
stages.append(stagefuncs.plugin_stage(self, stage_func))
stages += [stagefuncs.manipulate_files(self)]
pl = pipeline.Pipeline(stages)
# Run the pipeline.
plugins.send("import_begin", session=self)
try:
if config["threaded"]:
pl.run_parallel(QUEUE_SIZE)
else:
pl.run_sequential()
except ImportAbortError:
# User aborted operation. Silently stop.
pass
# Incremental and resumed imports
def already_imported(self, toppath: PathBytes, paths: Sequence[PathBytes]):
"""Returns true if the files belonging to this task have already
been imported in a previous session.
"""
if self.is_resuming(toppath) and all(
[ImportState().progress_has_element(toppath, p) for p in paths]
):
return True
if self.config["incremental"] and tuple(paths) in self.history_dirs:
return True
return False
_history_dirs = None
@property
def history_dirs(self) -> set[tuple[PathBytes, ...]]:
# FIXME: This could be simplified to a cached property
if self._history_dirs is None:
self._history_dirs = ImportState().taghistory
return self._history_dirs
def already_merged(self, paths: Sequence[PathBytes]):
"""Returns true if all the paths being imported were part of a merge
during previous tasks.
"""
for path in paths:
if path not in self._merged_items and path not in self._merged_dirs:
return False
return True
def mark_merged(self, paths: Sequence[PathBytes]):
"""Mark paths and directories as merged for future reimport tasks."""
self._merged_items.update(paths)
dirs = {
os.path.dirname(path) if os.path.isfile(syspath(path)) else path
for path in paths
}
self._merged_dirs.update(dirs)
def is_resuming(self, toppath: PathBytes):
"""Return `True` if user wants to resume import of this path.
You have to call `ask_resume` first to determine the return value.
"""
return self._is_resuming.get(toppath, False)
def ask_resume(self, toppath: PathBytes):
"""If import of `toppath` was aborted in an earlier session, ask
user if they want to resume the import.
Determines the return value of `is_resuming(toppath)`.
"""
if self.want_resume and ImportState().progress_has(toppath):
# Either accept immediately or prompt for input to decide.
if self.want_resume is True or self.should_resume(toppath):
log.warning(
"Resuming interrupted import of {}",
util.displayable_path(toppath),
)
self._is_resuming[toppath] = True
else:
# Clear progress; we're starting from the top.
ImportState().progress_reset(toppath)
beetbox-beets-c1877b7/beets/importer/stages.py 0000664 0000000 0000000 00000031502 15073551743 0021424 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import annotations
import itertools
import logging
from typing import TYPE_CHECKING, Callable
from beets import config, plugins
from beets.util import MoveOperation, displayable_path, pipeline
from .tasks import (
Action,
ImportTask,
ImportTaskFactory,
SentinelImportTask,
SingletonImportTask,
)
if TYPE_CHECKING:
from beets import library
from .session import ImportSession
# Global logger.
log = logging.getLogger("beets")
# ---------------------------- Producer functions ---------------------------- #
# Functions that are called first i.e. they generate import tasks
def read_tasks(session: ImportSession):
"""A generator yielding all the albums (as ImportTask objects) found
in the user-specified list of paths. In the case of a singleton
import, yields single-item tasks instead.
"""
skipped = 0
for toppath in session.paths:
# Check whether we need to resume the import.
session.ask_resume(toppath)
# Generate tasks.
task_factory = ImportTaskFactory(toppath, session)
yield from task_factory.tasks()
skipped += task_factory.skipped
if not task_factory.imported:
log.warning("No files imported from {}", displayable_path(toppath))
# Show skipped directories (due to incremental/resume).
if skipped:
log.info("Skipped {} paths.", skipped)
def query_tasks(session: ImportSession):
"""A generator that works as a drop-in-replacement for read_tasks.
Instead of finding files from the filesystem, a query is used to
match items from the library.
"""
task: ImportTask
if session.config["singletons"]:
# Search for items.
for item in session.lib.items(session.query):
task = SingletonImportTask(None, item)
for task in task.handle_created(session):
yield task
else:
# Search for albums.
for album in session.lib.albums(session.query):
log.debug(
"yielding album {0.id}: {0.albumartist} - {0.album}", album
)
items = list(album.items())
_freshen_items(items)
task = ImportTask(None, [album.item_dir()], items)
for task in task.handle_created(session):
yield task
# ---------------------------------- Stages ---------------------------------- #
# Functions that process import tasks, may transform or filter them
# They are chained together in the pipeline e.g. stage2(stage1(task)) -> task
def group_albums(session: ImportSession):
"""A pipeline stage that groups the items of each task into albums
using their metadata.
Groups are identified using their artist and album fields. The
pipeline stage emits new album tasks for each discovered group.
"""
def group(item):
return (item.albumartist or item.artist, item.album)
task = None
while True:
task = yield task
if task.skip:
continue
tasks = []
sorted_items: list[library.Item] = sorted(task.items, key=group)
for _, items in itertools.groupby(sorted_items, group):
l_items = list(items)
task = ImportTask(task.toppath, [i.path for i in l_items], l_items)
tasks += task.handle_created(session)
tasks.append(SentinelImportTask(task.toppath, task.paths))
task = pipeline.multiple(tasks)
@pipeline.mutator_stage
def lookup_candidates(session: ImportSession, task: ImportTask):
"""A coroutine for performing the initial MusicBrainz lookup for an
album. It accepts lists of Items and yields
(items, cur_artist, cur_album, candidates, rec) tuples. If no match
is found, all of the yielded parameters (except items) are None.
"""
if task.skip:
# FIXME This gets duplicated a lot. We need a better
# abstraction.
return
plugins.send("import_task_start", session=session, task=task)
log.debug("Looking up: {}", displayable_path(task.paths))
# Restrict the initial lookup to IDs specified by the user via the -m
# option. Currently all the IDs are passed onto the tasks directly.
task.lookup_candidates(session.config["search_ids"].as_str_seq())
@pipeline.stage
def user_query(session: ImportSession, task: ImportTask):
"""A coroutine for interfacing with the user about the tagging
process.
The coroutine accepts an ImportTask objects. It uses the
session's `choose_match` method to determine the `action` for
this task. Depending on the action additional stages are executed
and the processed task is yielded.
It emits the ``import_task_choice`` event for plugins. Plugins have
access to the choice via the ``task.choice_flag`` property and may
choose to change it.
"""
if task.skip:
return task
if session.already_merged(task.paths):
return pipeline.BUBBLE
# Ask the user for a choice.
task.choose_match(session)
plugins.send("import_task_choice", session=session, task=task)
# As-tracks: transition to singleton workflow.
if task.choice_flag is Action.TRACKS:
# Set up a little pipeline for dealing with the singletons.
def emitter(task):
for item in task.items:
task = SingletonImportTask(task.toppath, item)
yield from task.handle_created(session)
yield SentinelImportTask(task.toppath, task.paths)
return _extend_pipeline(
emitter(task), lookup_candidates(session), user_query(session)
)
# As albums: group items by albums and create task for each album
if task.choice_flag is Action.ALBUMS:
return _extend_pipeline(
[task],
group_albums(session),
lookup_candidates(session),
user_query(session),
)
_resolve_duplicates(session, task)
if task.should_merge_duplicates:
# Create a new task for tagging the current items
# and duplicates together
duplicate_items = task.duplicate_items(session.lib)
# Duplicates would be reimported so make them look "fresh"
_freshen_items(duplicate_items)
duplicate_paths = [item.path for item in duplicate_items]
# Record merged paths in the session so they are not reimported
session.mark_merged(duplicate_paths)
merged_task = ImportTask(
None, task.paths + duplicate_paths, task.items + duplicate_items
)
return _extend_pipeline(
[merged_task], lookup_candidates(session), user_query(session)
)
_apply_choice(session, task)
return task
@pipeline.mutator_stage
def import_asis(session: ImportSession, task: ImportTask):
"""Select the `action.ASIS` choice for all tasks.
This stage replaces the initial_lookup and user_query stages
when the importer is run without autotagging.
"""
if task.skip:
return
log.info("{}", displayable_path(task.paths))
task.set_choice(Action.ASIS)
_apply_choice(session, task)
@pipeline.mutator_stage
def plugin_stage(
session: ImportSession,
func: Callable[[ImportSession, ImportTask], None],
task: ImportTask,
):
"""A coroutine (pipeline stage) that calls the given function with
each non-skipped import task. These stages occur between applying
metadata changes and moving/copying/writing files.
"""
if task.skip:
return
func(session, task)
# Stage may modify DB, so re-load cached item data.
# FIXME Importer plugins should not modify the database but instead
# the albums and items attached to tasks.
task.reload()
@pipeline.stage
def log_files(session: ImportSession, task: ImportTask):
"""A coroutine (pipeline stage) to log each file to be imported."""
if isinstance(task, SingletonImportTask):
log.info("Singleton: {}", displayable_path(task.item["path"]))
elif task.items:
log.info("Album: {}", displayable_path(task.paths[0]))
for item in task.items:
log.info(" {}", displayable_path(item["path"]))
# --------------------------------- Consumer --------------------------------- #
# Anything that should be placed last in the pipeline
# In theory every stage could be a consumer, but in practice there are some
# functions which are typically placed last in the pipeline
@pipeline.stage
def manipulate_files(session: ImportSession, task: ImportTask):
"""A coroutine (pipeline stage) that performs necessary file
manipulations *after* items have been added to the library and
finalizes each task.
"""
if not task.skip:
if task.should_remove_duplicates:
task.remove_duplicates(session.lib)
if session.config["move"]:
operation = MoveOperation.MOVE
elif session.config["copy"]:
operation = MoveOperation.COPY
elif session.config["link"]:
operation = MoveOperation.LINK
elif session.config["hardlink"]:
operation = MoveOperation.HARDLINK
elif session.config["reflink"] == "auto":
operation = MoveOperation.REFLINK_AUTO
elif session.config["reflink"]:
operation = MoveOperation.REFLINK
else:
operation = None
task.manipulate_files(
session=session,
operation=operation,
write=session.config["write"],
)
# Progress, cleanup, and event.
task.finalize(session)
# ---------------------------- Utility functions ----------------------------- #
# Private functions only used in the stages above
def _apply_choice(session: ImportSession, task: ImportTask):
"""Apply the task's choice to the Album or Item it contains and add
it to the library.
"""
if task.skip:
return
# Change metadata.
if task.apply:
task.apply_metadata()
plugins.send("import_task_apply", session=session, task=task)
task.add(session.lib)
# If ``set_fields`` is set, set those fields to the
# configured values.
# NOTE: This cannot be done before the ``task.add()`` call above,
# because then the ``ImportTask`` won't have an `album` for which
# it can set the fields.
if config["import"]["set_fields"]:
task.set_fields(session.lib)
def _resolve_duplicates(session: ImportSession, task: ImportTask):
"""Check if a task conflicts with items or albums already imported
and ask the session to resolve this.
"""
if task.choice_flag in (Action.ASIS, Action.APPLY, Action.RETAG):
found_duplicates = task.find_duplicates(session.lib)
if found_duplicates:
log.debug("found duplicates: {}", [o.id for o in found_duplicates])
# Get the default action to follow from config.
duplicate_action = config["import"]["duplicate_action"].as_choice(
{
"skip": "s",
"keep": "k",
"remove": "r",
"merge": "m",
"ask": "a",
}
)
log.debug("default action for duplicates: {}", duplicate_action)
if duplicate_action == "s":
# Skip new.
task.set_choice(Action.SKIP)
elif duplicate_action == "k":
# Keep both. Do nothing; leave the choice intact.
pass
elif duplicate_action == "r":
# Remove old.
task.should_remove_duplicates = True
elif duplicate_action == "m":
# Merge duplicates together
task.should_merge_duplicates = True
else:
# No default action set; ask the session.
session.resolve_duplicate(task, found_duplicates)
session.log_choice(task, True)
def _freshen_items(items):
# Clear IDs from re-tagged items so they appear "fresh" when
# we add them back to the library.
for item in items:
item.id = None
item.album_id = None
def _extend_pipeline(tasks, *stages):
# Return pipeline extension for stages with list of tasks
if isinstance(tasks, list):
task_iter = iter(tasks)
else:
task_iter = tasks
ipl = pipeline.Pipeline([task_iter] + list(stages))
return pipeline.multiple(ipl.pull())
beetbox-beets-c1877b7/beets/importer/state.py 0000664 0000000 0000000 00000011164 15073551743 0021260 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import annotations
import logging
import os
import pickle
from bisect import bisect_left, insort
from dataclasses import dataclass
from typing import TYPE_CHECKING
from beets import config
if TYPE_CHECKING:
from beets.util import PathBytes
# Global logger.
log = logging.getLogger("beets")
@dataclass
class ImportState:
"""Representing the progress of an import task.
Opens the state file on creation of the class. If you want
to ensure the state is written to disk, you should use the
context manager protocol.
Tagprogress allows long tagging tasks to be resumed when they pause.
Taghistory is a utility for manipulating the "incremental" import log.
This keeps track of all directories that were ever imported, which
allows the importer to only import new stuff.
Usage
-----
```
# Readonly
progress = ImportState().tagprogress
# Read and write
with ImportState() as state:
state["key"] = "value"
```
"""
tagprogress: dict[PathBytes, list[PathBytes]]
taghistory: set[tuple[PathBytes, ...]]
path: PathBytes
def __init__(self, readonly=False, path: PathBytes | None = None):
self.path = path or os.fsencode(config["statefile"].as_filename())
self.tagprogress = {}
self.taghistory = set()
self._open()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._save()
def _open(
self,
):
try:
with open(self.path, "rb") as f:
state = pickle.load(f)
# Read the states
self.tagprogress = state.get("tagprogress", {})
self.taghistory = state.get("taghistory", set())
except Exception as exc:
# The `pickle` module can emit all sorts of exceptions during
# unpickling, including ImportError. We use a catch-all
# exception to avoid enumerating them all (the docs don't even have a
# full list!).
log.debug("state file could not be read: {}", exc)
def _save(self):
try:
with open(self.path, "wb") as f:
pickle.dump(
{
"tagprogress": self.tagprogress,
"taghistory": self.taghistory,
},
f,
)
except OSError as exc:
log.error("state file could not be written: {}", exc)
# -------------------------------- Tagprogress ------------------------------- #
def progress_add(self, toppath: PathBytes, *paths: PathBytes):
"""Record that the files under all of the `paths` have been imported
under `toppath`.
"""
with self as state:
imported = state.tagprogress.setdefault(toppath, [])
for path in paths:
if imported and imported[-1] <= path:
imported.append(path)
else:
insort(imported, path)
def progress_has_element(self, toppath: PathBytes, path: PathBytes) -> bool:
"""Return whether `path` has been imported in `toppath`."""
imported = self.tagprogress.get(toppath, [])
i = bisect_left(imported, path)
return i != len(imported) and imported[i] == path
def progress_has(self, toppath: PathBytes) -> bool:
"""Return `True` if there exist paths that have already been
imported under `toppath`.
"""
return toppath in self.tagprogress
def progress_reset(self, toppath: PathBytes | None):
"""Reset the progress for `toppath`."""
with self as state:
if toppath in state.tagprogress:
del state.tagprogress[toppath]
# -------------------------------- Taghistory -------------------------------- #
def history_add(self, paths: list[PathBytes]):
"""Add the paths to the history."""
with self as state:
state.taghistory.add(tuple(paths))
beetbox-beets-c1877b7/beets/importer/tasks.py 0000664 0000000 0000000 00000127253 15073551743 0021274 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import annotations
import logging
import os
import re
import shutil
import time
from collections import defaultdict
from enum import Enum
from tempfile import mkdtemp
from typing import TYPE_CHECKING, Any, Callable, Iterable, Sequence
import mediafile
from beets import autotag, config, library, plugins, util
from beets.dbcore.query import PathQuery
from .state import ImportState
if TYPE_CHECKING:
from beets.autotag.match import Recommendation
from .session import ImportSession
# Global logger.
log = logging.getLogger("beets")
SINGLE_ARTIST_THRESH = 0.25
# Usually flexible attributes are preserved (i.e., not updated) during
# reimports. The following two lists (globally) change this behaviour for
# certain fields. To alter these lists only when a specific plugin is in use,
# something like this can be used within that plugin's code:
#
# from beets import importer
# def extend_reimport_fresh_fields_item():
# importer.REIMPORT_FRESH_FIELDS_ITEM.extend(['tidal_track_popularity']
# )
REIMPORT_FRESH_FIELDS_ITEM = [
"data_source",
"bandcamp_album_id",
"spotify_album_id",
"deezer_album_id",
"beatport_album_id",
"tidal_album_id",
"data_url",
]
REIMPORT_FRESH_FIELDS_ALBUM = [*REIMPORT_FRESH_FIELDS_ITEM, "media"]
# Global logger.
log = logging.getLogger("beets")
class ImportAbortError(Exception):
"""Raised when the user aborts the tagging operation."""
pass
class Action(Enum):
"""Enumeration of possible actions for an import task."""
SKIP = "SKIP"
ASIS = "ASIS"
TRACKS = "TRACKS"
APPLY = "APPLY"
ALBUMS = "ALBUMS"
RETAG = "RETAG"
# The RETAG action represents "don't apply any match, but do record
# new metadata". It's not reachable via the standard command prompt but
# can be used by plugins.
class BaseImportTask:
"""An abstract base class for importer tasks.
Tasks flow through the importer pipeline. Each stage can update
them."""
toppath: util.PathBytes | None
paths: list[util.PathBytes]
items: list[library.Item]
def __init__(
self,
toppath: util.PathBytes | None,
paths: Iterable[util.PathBytes] | None,
items: Iterable[library.Item] | None,
):
"""Create a task. The primary fields that define a task are:
* `toppath`: The user-specified base directory that contains the
music for this task. If the task has *no* user-specified base
(for example, when importing based on an -L query), this can
be None. This is used for tracking progress and history.
* `paths`: A list of *specific* paths where the music for this task
came from. These paths can be directories, when their entire
contents are being imported, or files, when the task comprises
individual tracks. This is used for progress/history tracking and
for displaying the task to the user.
* `items`: A list of `Item` objects representing the music being
imported.
These fields should not change after initialization.
"""
self.toppath = toppath
self.paths = list(paths) if paths is not None else []
self.items = list(items) if items is not None else []
class ImportTask(BaseImportTask):
"""Represents a single set of items to be imported along with its
intermediate state. May represent an album or a single item.
The import session and stages call the following methods in the
given order.
* `lookup_candidates()` Sets the `common_artist`, `common_album`,
`candidates`, and `rec` attributes. `candidates` is a list of
`AlbumMatch` objects.
* `choose_match()` Uses the session to set the `match` attribute
from the `candidates` list.
* `find_duplicates()` Returns a list of albums from `lib` with the
same artist and album name as the task.
* `apply_metadata()` Sets the attributes of the items from the
task's `match` attribute.
* `add()` Add the imported items and album to the database.
* `manipulate_files()` Copy, move, and write files depending on the
session configuration.
* `set_fields()` Sets the fields given at CLI or configuration to
the specified values.
* `finalize()` Update the import progress and cleanup the file
system.
"""
choice_flag: Action | None = None
match: autotag.AlbumMatch | autotag.TrackMatch | None = None
# Keep track of the current task item
cur_album: str | None = None
cur_artist: str | None = None
candidates: Sequence[autotag.AlbumMatch | autotag.TrackMatch] = []
rec: Recommendation | None = None
def __init__(
self,
toppath: util.PathBytes | None,
paths: Iterable[util.PathBytes] | None,
items: Iterable[library.Item] | None,
):
super().__init__(toppath, paths, items)
self.should_remove_duplicates = False
self.should_merge_duplicates = False
self.is_album = True
def set_choice(
self, choice: Action | autotag.AlbumMatch | autotag.TrackMatch
):
"""Given an AlbumMatch or TrackMatch object or an action constant,
indicates that an action has been selected for this task.
Album and trackmatch are implemented as tuples, so we can't
use isinstance to check for them.
"""
# Not part of the task structure:
assert choice != Action.APPLY # Only used internally.
if choice in (
Action.SKIP,
Action.ASIS,
Action.TRACKS,
Action.ALBUMS,
Action.RETAG,
):
# TODO: redesign to stricten the type
self.choice_flag = choice # type: ignore[assignment]
self.match = None
else:
self.choice_flag = Action.APPLY # Implicit choice.
self.match = choice # type: ignore[assignment]
def save_progress(self):
"""Updates the progress state to indicate that this album has
finished.
"""
if self.toppath:
ImportState().progress_add(self.toppath, *self.paths)
def save_history(self):
"""Save the directory in the history for incremental imports."""
ImportState().history_add(self.paths)
# Logical decisions.
@property
def apply(self):
return self.choice_flag == Action.APPLY
@property
def skip(self):
return self.choice_flag == Action.SKIP
# Convenient data.
def chosen_info(self):
"""Return a dictionary of metadata about the current choice.
May only be called when the choice flag is ASIS or RETAG
(in which case the data comes from the files' current metadata)
or APPLY (in which case the data comes from the choice).
"""
if self.choice_flag in (Action.ASIS, Action.RETAG):
likelies, consensus = util.get_most_common_tags(self.items)
return likelies
elif self.choice_flag is Action.APPLY and self.match:
return self.match.info.copy()
assert False
def imported_items(self):
"""Return a list of Items that should be added to the library.
If the tasks applies an album match the method only returns the
matched items.
"""
if self.choice_flag in (Action.ASIS, Action.RETAG):
return list(self.items)
elif self.choice_flag == Action.APPLY and isinstance(
self.match, autotag.AlbumMatch
):
return list(self.match.mapping.keys())
else:
assert False
def apply_metadata(self):
"""Copy metadata from match info to the items."""
if config["import"]["from_scratch"]:
for item in self.match.mapping:
item.clear()
autotag.apply_metadata(self.match.info, self.match.mapping)
def duplicate_items(self, lib: library.Library):
duplicate_items = []
for album in self.find_duplicates(lib):
duplicate_items += album.items()
return duplicate_items
def remove_duplicates(self, lib: library.Library):
duplicate_items = self.duplicate_items(lib)
log.debug("removing {} old duplicated items", len(duplicate_items))
for item in duplicate_items:
item.remove()
if lib.directory in util.ancestry(item.path):
log.debug("deleting duplicate {.filepath}", item)
util.remove(item.path)
util.prune_dirs(os.path.dirname(item.path), lib.directory)
def set_fields(self, lib: library.Library):
"""Sets the fields given at CLI or configuration to the specified
values, for both the album and all its items.
"""
items = self.imported_items()
for field, view in config["import"]["set_fields"].items():
value = str(view.get())
log.debug(
"Set field {}={} for {}",
field,
value,
util.displayable_path(self.paths),
)
self.album.set_parse(field, format(self.album, value))
for item in items:
item.set_parse(field, format(item, value))
with lib.transaction():
for item in items:
item.store()
self.album.store()
def finalize(self, session: ImportSession):
"""Save progress, clean up files, and emit plugin event."""
# Update progress.
if session.want_resume:
self.save_progress()
if session.config["incremental"] and not (
# Should we skip recording to incremental list?
self.skip and session.config["incremental_skip_later"]
):
self.save_history()
self.cleanup(
copy=session.config["copy"],
delete=session.config["delete"],
move=session.config["move"],
)
if not self.skip:
self._emit_imported(session.lib)
def cleanup(self, copy=False, delete=False, move=False):
"""Remove and prune imported paths."""
# Do not delete any files or prune directories when skipping.
if self.skip:
return
items = self.imported_items()
# When copying and deleting originals, delete old files.
if copy and delete:
new_paths = [os.path.realpath(item.path) for item in items]
for old_path in self.old_paths:
# Only delete files that were actually copied.
if old_path not in new_paths:
util.remove(old_path, False)
self.prune(old_path)
# When moving, prune empty directories containing the original files.
elif move:
for old_path in self.old_paths:
self.prune(old_path)
def _emit_imported(self, lib: library.Library):
plugins.send("album_imported", lib=lib, album=self.album)
def handle_created(self, session: ImportSession):
"""Send the `import_task_created` event for this task. Return a list of
tasks that should continue through the pipeline. By default, this is a
list containing only the task itself, but plugins can replace the task
with new ones.
"""
tasks = plugins.send("import_task_created", session=session, task=self)
if not tasks:
tasks = [self]
else:
# The plugins gave us a list of lists of tasks. Flatten it.
tasks = [t for inner in tasks for t in inner]
return tasks
def lookup_candidates(self, search_ids: list[str]) -> None:
"""Retrieve and store candidates for this album.
If User-specified ``search_ids`` list is not empty, the lookup is
restricted to only those IDs.
"""
self.cur_artist, self.cur_album, (self.candidates, self.rec) = (
autotag.tag_album(self.items, search_ids=search_ids)
)
def find_duplicates(self, lib: library.Library) -> list[library.Album]:
"""Return a list of albums from `lib` with the same artist and
album name as the task.
"""
info = self.chosen_info()
info["albumartist"] = info["artist"]
if info["artist"] is None:
# As-is import with no artist. Skip check.
return []
# Construct a query to find duplicates with this metadata. We
# use a temporary Album object to generate any computed fields.
tmp_album = library.Album(lib, **info)
keys: list[str] = config["import"]["duplicate_keys"][
"album"
].as_str_seq()
dup_query = tmp_album.duplicates_query(keys)
# Don't count albums with the same files as duplicates.
task_paths = {i.path for i in self.items if i}
duplicates = []
for album in lib.albums(dup_query):
# Check whether the album paths are all present in the task
# i.e. album is being completely re-imported by the task,
# in which case it is not a duplicate (will be replaced).
album_paths = {i.path for i in album.items()}
if not (album_paths <= task_paths):
duplicates.append(album)
return duplicates
def align_album_level_fields(self):
"""Make some album fields equal across `self.items`. For the
RETAG action, we assume that the responsible for returning it
(ie. a plugin) always ensures that the first item contains
valid data on the relevant fields.
"""
changes = {}
if self.choice_flag == Action.ASIS:
# Taking metadata "as-is". Guess whether this album is VA.
plur_albumartist, freq = util.plurality(
[i.albumartist or i.artist for i in self.items]
)
if freq == len(self.items) or (
freq > 1
and float(freq) / len(self.items) >= SINGLE_ARTIST_THRESH
):
# Single-artist album.
changes["albumartist"] = plur_albumartist
changes["comp"] = False
else:
# VA.
changes["albumartist"] = config["va_name"].as_str()
changes["comp"] = True
elif self.choice_flag in (Action.APPLY, Action.RETAG):
# Applying autotagged metadata. Just get AA from the first
# item.
if not self.items[0].albumartist:
changes["albumartist"] = self.items[0].artist
if not self.items[0].albumartists:
changes["albumartists"] = self.items[0].artists
if not self.items[0].mb_albumartistid:
changes["mb_albumartistid"] = self.items[0].mb_artistid
if not self.items[0].mb_albumartistids:
changes["mb_albumartistids"] = self.items[0].mb_artistids
# Apply new metadata.
for item in self.items:
item.update(changes)
def manipulate_files(
self,
session: ImportSession,
operation: util.MoveOperation | None = None,
write=False,
):
"""Copy, move, link, hardlink or reflink (depending on `operation`)
the files as well as write metadata.
`operation` should be an instance of `util.MoveOperation`.
If `write` is `True` metadata is written to the files.
# TODO: Introduce a MoveOperation.NONE or SKIP
"""
items = self.imported_items()
# Save the original paths of all items for deletion and pruning
# in the next step (finalization).
self.old_paths: list[util.PathBytes] = [item.path for item in items]
for item in items:
if operation is not None:
# In copy and link modes, treat re-imports specially:
# move in-library files. (Out-of-library files are
# copied/moved as usual).
old_path = item.path
if (
operation != util.MoveOperation.MOVE
and self.replaced_items[item]
and session.lib.directory in util.ancestry(old_path)
):
item.move()
# We moved the item, so remove the
# now-nonexistent file from old_paths.
self.old_paths.remove(old_path)
else:
# A normal import. Just copy files and keep track of
# old paths.
item.move(operation)
if write and (self.apply or self.choice_flag == Action.RETAG):
item.try_write()
with session.lib.transaction():
for item in self.imported_items():
item.store()
plugins.send("import_task_files", session=session, task=self)
def add(self, lib: library.Library):
"""Add the items as an album to the library and remove replaced items."""
self.align_album_level_fields()
with lib.transaction():
self.record_replaced(lib)
self.remove_replaced(lib)
self.album = lib.add_album(self.imported_items())
if self.choice_flag == Action.APPLY and isinstance(
self.match, autotag.AlbumMatch
):
# Copy album flexible fields to the DB
# TODO: change the flow so we create the `Album` object earlier,
# and we can move this into `self.apply_metadata`, just like
# is done for tracks.
autotag.apply_album_metadata(self.match.info, self.album)
self.album.store()
self.reimport_metadata(lib)
def record_replaced(self, lib: library.Library):
"""Records the replaced items and albums in the `replaced_items`
and `replaced_albums` dictionaries.
"""
self.replaced_items = defaultdict(list)
self.replaced_albums: dict[util.PathBytes, library.Album] = (
defaultdict()
)
replaced_album_ids = set()
for item in self.imported_items():
dup_items = list(lib.items(query=PathQuery("path", item.path)))
self.replaced_items[item] = dup_items
for dup_item in dup_items:
if (
not dup_item.album_id
or dup_item.album_id in replaced_album_ids
):
continue
replaced_album = dup_item._cached_album
if replaced_album:
replaced_album_ids.add(dup_item.album_id)
self.replaced_albums[replaced_album.path] = replaced_album
def reimport_metadata(self, lib: library.Library):
"""For reimports, preserves metadata for reimported items and
albums.
"""
def _reduce_and_log(new_obj, existing_fields, overwrite_keys):
"""Some flexible attributes should be overwritten (rather than
preserved) on reimports; Copies existing_fields, logs and removes
entries that should not be preserved and returns a dict containing
those fields left to actually be preserved.
"""
noun = "album" if isinstance(new_obj, library.Album) else "item"
existing_fields = dict(existing_fields)
overwritten_fields = [
k
for k in existing_fields
if k in overwrite_keys
and new_obj.get(k)
and existing_fields.get(k) != new_obj.get(k)
]
if overwritten_fields:
log.debug(
"Reimported {0} {1.id}. Not preserving flexible attributes {2}. "
"Path: {1.filepath}",
noun,
new_obj,
overwritten_fields,
)
for key in overwritten_fields:
del existing_fields[key]
return existing_fields
if self.is_album:
replaced_album = self.replaced_albums.get(self.album.path)
if replaced_album:
album_fields = _reduce_and_log(
self.album,
replaced_album._values_flex,
REIMPORT_FRESH_FIELDS_ALBUM,
)
self.album.added = replaced_album.added
self.album.update(album_fields)
self.album.artpath = replaced_album.artpath
self.album.store()
log.debug(
"Reimported album {0.album.id}. Preserving attribute ['added']. "
"Path: {0.album.filepath}",
self,
)
log.debug(
"Reimported album {0.album.id}. Preserving flexible"
" attributes {1}. Path: {0.album.filepath}",
self,
list(album_fields.keys()),
)
for item in self.imported_items():
dup_items = self.replaced_items[item]
for dup_item in dup_items:
if dup_item.added and dup_item.added != item.added:
item.added = dup_item.added
log.debug(
"Reimported item {0.id}. Preserving attribute ['added']. "
"Path: {0.filepath}",
item,
)
item_fields = _reduce_and_log(
item, dup_item._values_flex, REIMPORT_FRESH_FIELDS_ITEM
)
item.update(item_fields)
log.debug(
"Reimported item {0.id}. Preserving flexible attributes {1}. "
"Path: {0.filepath}",
item,
list(item_fields.keys()),
)
item.store()
def remove_replaced(self, lib):
"""Removes all the items from the library that have the same
path as an item from this task.
"""
for item in self.imported_items():
for dup_item in self.replaced_items[item]:
log.debug("Replacing item {.id}: {.filepath}", dup_item, item)
dup_item.remove()
log.debug(
"{} of {} items replaced",
sum(bool(v) for v in self.replaced_items.values()),
len(self.imported_items()),
)
def choose_match(self, session):
"""Ask the session which match should apply and apply it."""
choice = session.choose_match(self)
self.set_choice(choice)
session.log_choice(self)
def reload(self):
"""Reload albums and items from the database."""
for item in self.imported_items():
item.load()
self.album.load()
# Utilities.
def prune(self, filename):
"""Prune any empty directories above the given file. If this
task has no `toppath` or the file path provided is not within
the `toppath`, then this function has no effect. Similarly, if
the file still exists, no pruning is performed, so it's safe to
call when the file in question may not have been removed.
"""
if self.toppath and not os.path.exists(util.syspath(filename)):
util.prune_dirs(
os.path.dirname(filename),
self.toppath,
clutter=config["clutter"].as_str_seq(),
)
class SingletonImportTask(ImportTask):
"""ImportTask for a single track that is not associated to an album."""
def __init__(self, toppath: util.PathBytes | None, item: library.Item):
super().__init__(toppath, [item.path], [item])
self.item = item
self.is_album = False
self.paths = [item.path]
def chosen_info(self):
"""Return a dictionary of metadata about the current choice.
May only be called when the choice flag is ASIS or RETAG
(in which case the data comes from the files' current metadata)
or APPLY (in which case the data comes from the choice).
"""
assert self.choice_flag in (Action.ASIS, Action.RETAG, Action.APPLY)
if self.choice_flag in (Action.ASIS, Action.RETAG):
return dict(self.item)
elif self.choice_flag is Action.APPLY:
return self.match.info.copy()
def imported_items(self):
return [self.item]
def apply_metadata(self):
autotag.apply_item_metadata(self.item, self.match.info)
def _emit_imported(self, lib):
for item in self.imported_items():
plugins.send("item_imported", lib=lib, item=item)
def lookup_candidates(self, search_ids: list[str]) -> None:
self.candidates, self.rec = autotag.tag_item(
self.item, search_ids=search_ids
)
def find_duplicates(self, lib: library.Library) -> list[library.Item]: # type: ignore[override] # Need splitting Singleton and Album tasks into separate classes
"""Return a list of items from `lib` that have the same artist
and title as the task.
"""
info = self.chosen_info()
# Query for existing items using the same metadata. We use a
# temporary `Item` object to generate any computed fields.
tmp_item = library.Item(lib, **info)
keys: list[str] = config["import"]["duplicate_keys"][
"item"
].as_str_seq()
dup_query = tmp_item.duplicates_query(keys)
found_items = []
for other_item in lib.items(dup_query):
# Existing items not considered duplicates.
if other_item.path != self.item.path:
found_items.append(other_item)
return found_items
duplicate_items = find_duplicates
def add(self, lib):
with lib.transaction():
self.record_replaced(lib)
self.remove_replaced(lib)
lib.add(self.item)
self.reimport_metadata(lib)
def infer_album_fields(self):
raise NotImplementedError
def choose_match(self, session: ImportSession):
"""Ask the session which match should apply and apply it."""
choice = session.choose_item(self)
self.set_choice(choice)
session.log_choice(self)
def reload(self):
self.item.load()
def set_fields(self, lib):
"""Sets the fields given at CLI or configuration to the specified
values, for the singleton item.
"""
for field, view in config["import"]["set_fields"].items():
value = str(view.get())
log.debug(
"Set field {}={} for {}",
field,
value,
util.displayable_path(self.paths),
)
self.item.set_parse(field, format(self.item, value))
self.item.store()
# FIXME The inheritance relationships are inverted. This is why there
# are so many methods which pass. More responsibility should be delegated to
# the BaseImportTask class.
class SentinelImportTask(ImportTask):
"""A sentinel task marks the progress of an import and does not
import any items itself.
If only `toppath` is set the task indicates the end of a top-level
directory import. If the `paths` argument is also given, the task
indicates the progress in the `toppath` import.
"""
def __init__(self, toppath, paths):
super().__init__(toppath, paths, ())
# TODO Remove the remaining attributes eventually
self.should_remove_duplicates = False
self.is_album = True
self.choice_flag = None
def save_history(self):
pass
def save_progress(self):
if not self.paths:
# "Done" sentinel.
ImportState().progress_reset(self.toppath)
elif self.toppath:
# "Directory progress" sentinel for singletons
super().save_progress()
@property
def skip(self) -> bool:
return True
def set_choice(self, choice):
raise NotImplementedError
def cleanup(self, copy=False, delete=False, move=False):
pass
def _emit_imported(self, lib):
pass
ArchiveHandler = tuple[
Callable[[util.StrPath], bool], Callable[[util.StrPath], Any]
]
class ArchiveImportTask(SentinelImportTask):
"""An import task that represents the processing of an archive.
`toppath` must be a `zip`, `tar`, or `rar` archive. Archive tasks
serve two purposes:
- First, it will unarchive the files to a temporary directory and
return it. The client should read tasks from the resulting
directory and send them through the pipeline.
- Second, it will clean up the temporary directory when it proceeds
through the pipeline. The client should send the archive task
after sending the rest of the music tasks to make this work.
"""
def __init__(self, toppath):
super().__init__(toppath, ())
self.extracted = False
@classmethod
def is_archive(cls, path):
"""Returns true if the given path points to an archive that can
be handled.
"""
if not os.path.isfile(path):
return False
for path_test, _ in cls.handlers:
if path_test(os.fsdecode(path)):
return True
return False
@util.cached_classproperty
def handlers(cls) -> list[ArchiveHandler]:
"""Returns a list of archive handlers.
Each handler is a `(path_test, ArchiveClass)` tuple. `path_test`
is a function that returns `True` if the given path can be
handled by `ArchiveClass`. `ArchiveClass` is a class that
implements the same interface as `tarfile.TarFile`.
"""
_handlers: list[ArchiveHandler] = []
from zipfile import ZipFile, is_zipfile
_handlers.append((is_zipfile, ZipFile))
import tarfile
_handlers.append((tarfile.is_tarfile, tarfile.open))
try:
from rarfile import RarFile, is_rarfile
except ImportError:
pass
else:
_handlers.append((is_rarfile, RarFile))
try:
from py7zr import SevenZipFile, is_7zfile
except ImportError:
pass
else:
_handlers.append((is_7zfile, SevenZipFile))
return _handlers
def cleanup(self, copy=False, delete=False, move=False):
"""Removes the temporary directory the archive was extracted to."""
if self.extracted and self.toppath:
log.debug(
"Removing extracted directory: {}",
util.displayable_path(self.toppath),
)
shutil.rmtree(util.syspath(self.toppath))
def extract(self):
"""Extracts the archive to a temporary directory and sets
`toppath` to that directory.
"""
assert self.toppath is not None, "toppath must be set"
for path_test, handler_class in self.handlers:
if path_test(os.fsdecode(self.toppath)):
break
else:
raise ValueError(f"No handler found for archive: {self.toppath}")
extract_to = mkdtemp()
archive = handler_class(os.fsdecode(self.toppath), mode="r")
try:
archive.extractall(extract_to)
# Adjust the files' mtimes to match the information from the
# archive. Inspired by: https://stackoverflow.com/q/9813243
for f in archive.infolist():
# The date_time will need to adjusted otherwise
# the item will have the current date_time of extraction.
# The (0, 0, -1) is added to date_time because the
# function time.mktime expects a 9-element tuple.
# The -1 indicates that the DST flag is unknown.
date_time = time.mktime(f.date_time + (0, 0, -1))
fullpath = os.path.join(extract_to, f.filename)
os.utime(fullpath, (date_time, date_time))
finally:
archive.close()
self.extracted = True
self.toppath = extract_to
class ImportTaskFactory:
"""Generate album and singleton import tasks for all media files
indicated by a path.
"""
def __init__(self, toppath: util.PathBytes, session: ImportSession):
"""Create a new task factory.
`toppath` is the user-specified path to search for music to
import. `session` is the `ImportSession`, which controls how
tasks are read from the directory.
"""
self.toppath = toppath
self.session = session
self.skipped = 0 # Skipped due to incremental/resume.
self.imported = 0 # "Real" tasks created.
self.is_archive = ArchiveImportTask.is_archive(util.syspath(toppath))
def tasks(self) -> Iterable[ImportTask]:
"""Yield all import tasks for music found in the user-specified
path `self.toppath`. Any necessary sentinel tasks are also
produced.
During generation, update `self.skipped` and `self.imported`
with the number of tasks that were not produced (due to
incremental mode or resumed imports) and the number of concrete
tasks actually produced, respectively.
If `self.toppath` is an archive, it is adjusted to point to the
extracted data.
"""
# Check whether this is an archive.
archive_task: ArchiveImportTask | None = None
if self.is_archive:
archive_task = self.unarchive()
if not archive_task:
return
# Search for music in the directory.
for dirs, paths in self.paths():
if self.session.config["singletons"]:
for path in paths:
tasks = self._create(self.singleton(path))
yield from tasks
yield self.sentinel(dirs)
else:
tasks = self._create(self.album(paths, dirs))
yield from tasks
# Produce the final sentinel for this toppath to indicate that
# it is finished. This is usually just a SentinelImportTask, but
# for archive imports, send the archive task instead (to remove
# the extracted directory).
yield archive_task or self.sentinel()
def _create(self, task: ImportTask | None):
"""Handle a new task to be emitted by the factory.
Emit the `import_task_created` event and increment the
`imported` count if the task is not skipped. Return the same
task. If `task` is None, do nothing.
"""
if task:
tasks = task.handle_created(self.session)
self.imported += len(tasks)
return tasks
return []
def paths(self):
"""Walk `self.toppath` and yield `(dirs, files)` pairs where
`files` are individual music files and `dirs` the set of
containing directories where the music was found.
This can either be a recursive search in the ordinary case, a
single track when `toppath` is a file, a single directory in
`flat` mode.
"""
if not os.path.isdir(util.syspath(self.toppath)):
yield [self.toppath], [self.toppath]
elif self.session.config["flat"]:
paths = []
for dirs, paths_in_dir in albums_in_dir(self.toppath):
paths += paths_in_dir
yield [self.toppath], paths
else:
for dirs, paths in albums_in_dir(self.toppath):
yield dirs, paths
def singleton(self, path: util.PathBytes):
"""Return a `SingletonImportTask` for the music file."""
if self.session.already_imported(self.toppath, [path]):
log.debug(
"Skipping previously-imported path: {}",
util.displayable_path(path),
)
self.skipped += 1
return None
item = self.read_item(path)
if item:
return SingletonImportTask(self.toppath, item)
else:
return None
def album(self, paths: Iterable[util.PathBytes], dirs=None):
"""Return a `ImportTask` with all media files from paths.
`dirs` is a list of parent directories used to record already
imported albums.
"""
if dirs is None:
dirs = list({os.path.dirname(p) for p in paths})
if self.session.already_imported(self.toppath, dirs):
log.debug(
"Skipping previously-imported path: {}",
util.displayable_path(dirs),
)
self.skipped += 1
return None
items: list[library.Item] = [
item for item in map(self.read_item, paths) if item
]
if len(items) > 0:
return ImportTask(self.toppath, dirs, items)
else:
return None
def sentinel(self, paths: Iterable[util.PathBytes] | None = None):
"""Return a `SentinelImportTask` indicating the end of a
top-level directory import.
"""
return SentinelImportTask(self.toppath, paths)
def unarchive(self):
"""Extract the archive for this `toppath`.
Extract the archive to a new directory, adjust `toppath` to
point to the extracted directory, and return an
`ArchiveImportTask`. If extraction fails, return None.
"""
assert self.is_archive
if not (self.session.config["move"] or self.session.config["copy"]):
log.warning(
"Archive importing requires either "
"'copy' or 'move' to be enabled."
)
return
log.debug("Extracting archive: {}", util.displayable_path(self.toppath))
archive_task = ArchiveImportTask(self.toppath)
try:
archive_task.extract()
except Exception as exc:
log.error("extraction failed: {}", exc)
return
# Now read albums from the extracted directory.
self.toppath = archive_task.toppath
log.debug("Archive extracted to: {.toppath}", self)
return archive_task
def read_item(self, path: util.PathBytes):
"""Return an `Item` read from the path.
If an item cannot be read, return `None` instead and log an
error.
"""
try:
return library.Item.from_path(path)
except library.ReadError as exc:
if isinstance(exc.reason, mediafile.FileTypeError):
# Silently ignore non-music files.
pass
elif isinstance(exc.reason, mediafile.UnreadableFileError):
log.warning("unreadable file: {}", util.displayable_path(path))
else:
log.error(
"error reading {}: {}", util.displayable_path(path), exc
)
MULTIDISC_MARKERS = (rb"dis[ck]", rb"cd")
MULTIDISC_PAT_FMT = rb"^(.*%s[\W_]*)\d"
def is_subdir_of_any_in_list(path, dirs):
"""Returns True if path os a subdirectory of any directory in dirs
(a list). In other case, returns False.
"""
ancestors = util.ancestry(path)
return any(d in ancestors for d in dirs)
def albums_in_dir(path: util.PathBytes):
"""Recursively searches the given directory and returns an iterable
of (paths, items) where paths is a list of directories and items is
a list of Items that is probably an album. Specifically, any folder
containing any media files is an album.
"""
collapse_paths: list[util.PathBytes] = []
collapse_items: list[util.PathBytes] = []
collapse_pat = None
ignore: list[str] = config["ignore"].as_str_seq()
ignore_hidden: bool = config["ignore_hidden"].get(bool)
for root, dirs, files in util.sorted_walk(
path, ignore=ignore, ignore_hidden=ignore_hidden, logger=log
):
items = [os.path.join(root, f) for f in files]
# If we're currently collapsing the constituent directories in a
# multi-disc album, check whether we should continue collapsing
# and add the current directory. If so, just add the directory
# and move on to the next directory. If not, stop collapsing.
if collapse_paths:
if (is_subdir_of_any_in_list(root, collapse_paths)) or (
collapse_pat and collapse_pat.match(os.path.basename(root))
):
# Still collapsing.
collapse_paths.append(root)
collapse_items += items
continue
else:
# Collapse finished. Yield the collapsed directory and
# proceed to process the current one.
if collapse_items:
yield collapse_paths, collapse_items
collapse_pat, collapse_paths, collapse_items = None, [], []
# Check whether this directory looks like the *first* directory
# in a multi-disc sequence. There are two indicators: the file
# is named like part of a multi-disc sequence (e.g., "Title Disc
# 1") or it contains no items but only directories that are
# named in this way.
start_collapsing = False
for marker in MULTIDISC_MARKERS:
# We're using replace on %s due to lack of .format() on bytestrings
p = MULTIDISC_PAT_FMT.replace(b"%s", marker)
marker_pat = re.compile(p, re.I)
match = marker_pat.match(os.path.basename(root))
# Is this directory the root of a nested multi-disc album?
if dirs and not items:
# Check whether all subdirectories have the same prefix.
start_collapsing = True
subdir_pat = None
for subdir in dirs:
subdir = util.bytestring_path(subdir)
# The first directory dictates the pattern for
# the remaining directories.
if not subdir_pat:
match = marker_pat.match(subdir)
if match:
match_group = re.escape(match.group(1))
subdir_pat = re.compile(
b"".join([b"^", match_group, rb"\d"]), re.I
)
else:
start_collapsing = False
break
# Subsequent directories must match the pattern.
elif not subdir_pat.match(subdir):
start_collapsing = False
break
# If all subdirectories match, don't check other
# markers.
if start_collapsing:
break
# Is this directory the first in a flattened multi-disc album?
elif match:
start_collapsing = True
# Set the current pattern to match directories with the same
# prefix as this one, followed by a digit.
collapse_pat = re.compile(
b"".join([b"^", re.escape(match.group(1)), rb"\d"]), re.I
)
break
# If either of the above heuristics indicated that this is the
# beginning of a multi-disc album, initialize the collapsed
# directory and item lists and check the next directory.
if start_collapsing:
# Start collapsing; continue to the next iteration.
collapse_paths = [root]
collapse_items = items
continue
# If it's nonempty, yield it.
if items:
yield [root], items
# Clear out any unfinished collapse.
if collapse_paths and collapse_items:
yield collapse_paths, collapse_items
beetbox-beets-c1877b7/beets/library/ 0000775 0000000 0000000 00000000000 15073551743 0017366 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beets/library/__init__.py 0000664 0000000 0000000 00000001340 15073551743 0021475 0 ustar 00root root 0000000 0000000 from beets.util import deprecate_imports
from .exceptions import FileOperationError, ReadError, WriteError
from .library import Library
from .models import Album, Item, LibModel
from .queries import parse_query_parts, parse_query_string
NEW_MODULE_BY_NAME = dict.fromkeys(
("DateType", "DurationType", "MusicalKey", "PathType"), "beets.dbcore.types"
) | dict.fromkeys(
("BLOB_TYPE", "SingletonQuery", "PathQuery"), "beets.dbcore.query"
)
def __getattr__(name: str):
return deprecate_imports(__name__, NEW_MODULE_BY_NAME, name, "3.0.0")
__all__ = [
"Library",
"LibModel",
"Album",
"Item",
"parse_query_parts",
"parse_query_string",
"FileOperationError",
"ReadError",
"WriteError",
]
beetbox-beets-c1877b7/beets/library/exceptions.py 0000664 0000000 0000000 00000002105 15073551743 0022117 0 ustar 00root root 0000000 0000000 from beets import util
class FileOperationError(Exception):
"""Indicate an error when interacting with a file on disk.
Possibilities include an unsupported media type, a permissions
error, and an unhandled Mutagen exception.
"""
def __init__(self, path, reason):
"""Create an exception describing an operation on the file at
`path` with the underlying (chained) exception `reason`.
"""
super().__init__(path, reason)
self.path = path
self.reason = reason
def __str__(self):
"""Get a string representing the error.
Describe both the underlying reason and the file path in question.
"""
return f"{util.displayable_path(self.path)}: {self.reason}"
class ReadError(FileOperationError):
"""An error while reading a file (i.e. in `Item.read`)."""
def __str__(self):
return f"error reading {super()}"
class WriteError(FileOperationError):
"""An error while writing a file (i.e. in `Item.write`)."""
def __str__(self):
return f"error writing {super()}"
beetbox-beets-c1877b7/beets/library/library.py 0000664 0000000 0000000 00000011070 15073551743 0021403 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from typing import TYPE_CHECKING
import platformdirs
import beets
from beets import dbcore
from beets.util import normpath
from .models import Album, Item
from .queries import PF_KEY_DEFAULT, parse_query_parts, parse_query_string
if TYPE_CHECKING:
from beets.dbcore import Results
class Library(dbcore.Database):
"""A database of music containing songs and albums."""
_models = (Item, Album)
def __init__(
self,
path="library.blb",
directory: str | None = None,
path_formats=((PF_KEY_DEFAULT, "$artist/$album/$track $title"),),
replacements=None,
):
timeout = beets.config["timeout"].as_number()
super().__init__(path, timeout=timeout)
self.directory = normpath(directory or platformdirs.user_music_path())
self.path_formats = path_formats
self.replacements = replacements
# Used for template substitution performance.
self._memotable: dict[tuple[str, ...], str] = {}
# Adding objects to the database.
def add(self, obj):
"""Add the :class:`Item` or :class:`Album` object to the library
database.
Return the object's new id.
"""
obj.add(self)
self._memotable = {}
return obj.id
def add_album(self, items):
"""Create a new album consisting of a list of items.
The items are added to the database if they don't yet have an
ID. Return a new :class:`Album` object. The list items must not
be empty.
"""
if not items:
raise ValueError("need at least one item")
# Create the album structure using metadata from the first item.
values = {key: items[0][key] for key in Album.item_keys}
album = Album(self, **values)
# Add the album structure and set the items' album_id fields.
# Store or add the items.
with self.transaction():
album.add(self)
for item in items:
item.album_id = album.id
if item.id is None:
item.add(self)
else:
item.store()
return album
# Querying.
def _fetch(self, model_cls, query, sort=None):
"""Parse a query and fetch.
If an order specification is present in the query string
the `sort` argument is ignored.
"""
# Parse the query, if necessary.
try:
parsed_sort = None
if isinstance(query, str):
query, parsed_sort = parse_query_string(query, model_cls)
elif isinstance(query, (list, tuple)):
query, parsed_sort = parse_query_parts(query, model_cls)
except dbcore.query.InvalidQueryArgumentValueError as exc:
raise dbcore.InvalidQueryError(query, exc)
# Any non-null sort specified by the parsed query overrides the
# provided sort.
if parsed_sort and not isinstance(parsed_sort, dbcore.query.NullSort):
sort = parsed_sort
return super()._fetch(model_cls, query, sort)
@staticmethod
def get_default_album_sort():
"""Get a :class:`Sort` object for albums from the config option."""
return dbcore.sort_from_strings(
Album, beets.config["sort_album"].as_str_seq()
)
@staticmethod
def get_default_item_sort():
"""Get a :class:`Sort` object for items from the config option."""
return dbcore.sort_from_strings(
Item, beets.config["sort_item"].as_str_seq()
)
def albums(self, query=None, sort=None) -> Results[Album]:
"""Get :class:`Album` objects matching the query."""
return self._fetch(Album, query, sort or self.get_default_album_sort())
def items(self, query=None, sort=None) -> Results[Item]:
"""Get :class:`Item` objects matching the query."""
return self._fetch(Item, query, sort or self.get_default_item_sort())
# Convenience accessors.
def get_item(self, id):
"""Fetch a :class:`Item` by its ID.
Return `None` if no match is found.
"""
return self._get(Item, id)
def get_album(self, item_or_id):
"""Given an album ID or an item associated with an album, return
a :class:`Album` object for the album.
If no such album exists, return `None`.
"""
if isinstance(item_or_id, int):
album_id = item_or_id
else:
album_id = item_or_id.album_id
if album_id is None:
return None
return self._get(Album, album_id)
beetbox-beets-c1877b7/beets/library/models.py 0000664 0000000 0000000 00000145621 15073551743 0021234 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import os
import string
import sys
import time
import unicodedata
from functools import cached_property
from pathlib import Path
from typing import TYPE_CHECKING
from mediafile import MediaFile, UnreadableFileError
import beets
from beets import dbcore, logging, plugins, util
from beets.dbcore import types
from beets.util import (
MoveOperation,
bytestring_path,
cached_classproperty,
normpath,
samefile,
syspath,
)
from beets.util.functemplate import Template, template
from .exceptions import FileOperationError, ReadError, WriteError
from .queries import PF_KEY_DEFAULT, parse_query_string
if TYPE_CHECKING:
from ..dbcore.query import FieldQuery, FieldQueryType
from .library import Library # noqa: F401
log = logging.getLogger("beets")
class LibModel(dbcore.Model["Library"]):
"""Shared concrete functionality for Items and Albums."""
# Config key that specifies how an instance should be formatted.
_format_config_key: str
path: bytes
@cached_classproperty
def _types(cls) -> dict[str, types.Type]:
"""Return the types of the fields in this model."""
return {
**plugins.types(cls), # type: ignore[arg-type]
"data_source": types.STRING,
}
@cached_classproperty
def _queries(cls) -> dict[str, FieldQueryType]:
return plugins.named_queries(cls) # type: ignore[arg-type]
@cached_classproperty
def writable_media_fields(cls) -> set[str]:
return set(MediaFile.fields()) & cls._fields.keys()
@property
def filepath(self) -> Path:
"""The path to the entity as pathlib.Path."""
return Path(os.fsdecode(self.path))
def _template_funcs(self):
funcs = DefaultTemplateFunctions(self, self._db).functions()
funcs.update(plugins.template_funcs())
return funcs
def store(self, fields=None):
super().store(fields)
plugins.send("database_change", lib=self._db, model=self)
def remove(self):
super().remove()
plugins.send("database_change", lib=self._db, model=self)
def add(self, lib=None):
# super().add() calls self.store(), which sends `database_change`,
# so don't do it here
super().add(lib)
def __format__(self, spec):
if not spec:
spec = beets.config[self._format_config_key].as_str()
assert isinstance(spec, str)
return self.evaluate_template(spec)
def __str__(self):
return format(self)
def __bytes__(self):
return self.__str__().encode("utf-8")
# Convenient queries.
@classmethod
def field_query(
cls, field: str, pattern: str, query_cls: FieldQueryType
) -> FieldQuery:
"""Get a `FieldQuery` for the given field on this model."""
fast = field in cls.all_db_fields
if field in cls.shared_db_fields:
# This field exists in both tables, so SQLite will encounter
# an OperationalError if we try to use it in a query.
# Using an explicit table name resolves this.
field = f"{cls._table}.{field}"
return query_cls(field, pattern, fast)
@classmethod
def any_field_query(cls, *args, **kwargs) -> dbcore.OrQuery:
return dbcore.OrQuery(
[cls.field_query(f, *args, **kwargs) for f in cls._search_fields]
)
@classmethod
def any_writable_media_field_query(cls, *args, **kwargs) -> dbcore.OrQuery:
fields = cls.writable_media_fields
return dbcore.OrQuery(
[cls.field_query(f, *args, **kwargs) for f in fields]
)
def duplicates_query(self, fields: list[str]) -> dbcore.AndQuery:
"""Return a query for entities with same values in the given fields."""
return dbcore.AndQuery(
[
self.field_query(f, self.get(f), dbcore.MatchQuery)
for f in fields
]
)
class FormattedItemMapping(dbcore.db.FormattedMapping):
"""Add lookup for album-level fields.
Album-level fields take precedence if `for_path` is true.
"""
ALL_KEYS = "*"
def __init__(self, item, included_keys=ALL_KEYS, for_path=False):
# We treat album and item keys specially here,
# so exclude transitive album keys from the model's keys.
super().__init__(item, included_keys=[], for_path=for_path)
self.included_keys = included_keys
if included_keys == self.ALL_KEYS:
# Performance note: this triggers a database query.
self.model_keys = item.keys(computed=True, with_album=False)
else:
self.model_keys = included_keys
self.item = item
@cached_property
def all_keys(self):
return set(self.model_keys).union(self.album_keys)
@cached_property
def album_keys(self):
album_keys = []
if self.album:
if self.included_keys == self.ALL_KEYS:
# Performance note: this triggers a database query.
for key in self.album.keys(computed=True):
if (
key in Album.item_keys
or key not in self.item._fields.keys()
):
album_keys.append(key)
else:
album_keys = self.included_keys
return album_keys
@property
def album(self):
return self.item._cached_album
def _get(self, key):
"""Get the value for a key, either from the album or the item.
Raise a KeyError for invalid keys.
"""
if self.for_path and key in self.album_keys:
return self._get_formatted(self.album, key)
elif key in self.model_keys:
return self._get_formatted(self.model, key)
elif key in self.album_keys:
return self._get_formatted(self.album, key)
else:
raise KeyError(key)
def __getitem__(self, key):
"""Get the value for a key.
`artist` and `albumartist` are fallback values for each other
when not set.
"""
value = self._get(key)
# `artist` and `albumartist` fields fall back to one another.
# This is helpful in path formats when the album artist is unset
# on as-is imports.
try:
if key == "artist" and not value:
return self._get("albumartist")
elif key == "albumartist" and not value:
return self._get("artist")
except KeyError:
pass
return value
def __iter__(self):
return iter(self.all_keys)
def __len__(self):
return len(self.all_keys)
class Album(LibModel):
"""Provide access to information about albums stored in a
library.
Reflects the library's "albums" table, including album art.
"""
artpath: bytes
_table = "albums"
_flex_table = "album_attributes"
_always_dirty = True
_fields = {
"id": types.PRIMARY_ID,
"artpath": types.NullPathType(),
"added": types.DATE,
"albumartist": types.STRING,
"albumartist_sort": types.STRING,
"albumartist_credit": types.STRING,
"albumartists": types.MULTI_VALUE_DSV,
"albumartists_sort": types.MULTI_VALUE_DSV,
"albumartists_credit": types.MULTI_VALUE_DSV,
"album": types.STRING,
"genre": types.STRING,
"style": types.STRING,
"discogs_albumid": types.INTEGER,
"discogs_artistid": types.INTEGER,
"discogs_labelid": types.INTEGER,
"year": types.PaddedInt(4),
"month": types.PaddedInt(2),
"day": types.PaddedInt(2),
"disctotal": types.PaddedInt(2),
"comp": types.BOOLEAN,
"mb_albumid": types.STRING,
"mb_albumartistid": types.STRING,
"mb_albumartistids": types.MULTI_VALUE_DSV,
"albumtype": types.STRING,
"albumtypes": types.SEMICOLON_SPACE_DSV,
"label": types.STRING,
"barcode": types.STRING,
"mb_releasegroupid": types.STRING,
"release_group_title": types.STRING,
"asin": types.STRING,
"catalognum": types.STRING,
"script": types.STRING,
"language": types.STRING,
"country": types.STRING,
"albumstatus": types.STRING,
"albumdisambig": types.STRING,
"releasegroupdisambig": types.STRING,
"rg_album_gain": types.NULL_FLOAT,
"rg_album_peak": types.NULL_FLOAT,
"r128_album_gain": types.NULL_FLOAT,
"original_year": types.PaddedInt(4),
"original_month": types.PaddedInt(2),
"original_day": types.PaddedInt(2),
}
_search_fields = ("album", "albumartist", "genre")
@cached_classproperty
def _types(cls) -> dict[str, types.Type]:
return {**super()._types, "path": types.PathType()}
_sorts = {
"albumartist": dbcore.query.SmartArtistSort,
"artist": dbcore.query.SmartArtistSort,
}
# List of keys that are set on an album's items.
item_keys = [
"added",
"albumartist",
"albumartists",
"albumartist_sort",
"albumartists_sort",
"albumartist_credit",
"albumartists_credit",
"album",
"genre",
"style",
"discogs_albumid",
"discogs_artistid",
"discogs_labelid",
"year",
"month",
"day",
"disctotal",
"comp",
"mb_albumid",
"mb_albumartistid",
"mb_albumartistids",
"albumtype",
"albumtypes",
"label",
"barcode",
"mb_releasegroupid",
"asin",
"catalognum",
"script",
"language",
"country",
"albumstatus",
"albumdisambig",
"releasegroupdisambig",
"release_group_title",
"rg_album_gain",
"rg_album_peak",
"r128_album_gain",
"original_year",
"original_month",
"original_day",
]
_format_config_key = "format_album"
@cached_classproperty
def _relation(cls) -> type[Item]:
return Item
@cached_classproperty
def relation_join(cls) -> str:
"""Return FROM clause which joins on related album items.
Use LEFT join to select all albums, including those that do not have
any items.
"""
return (
f"LEFT JOIN {cls._relation._table} "
f"ON {cls._table}.id = {cls._relation._table}.album_id"
)
@property
def art_filepath(self) -> Path | None:
"""The path to album's cover picture as pathlib.Path."""
return Path(os.fsdecode(self.artpath)) if self.artpath else None
@classmethod
def _getters(cls):
# In addition to plugin-provided computed fields, also expose
# the album's directory as `path`.
getters = plugins.album_field_getters()
getters["path"] = Album.item_dir
getters["albumtotal"] = Album._albumtotal
return getters
def items(self):
"""Return an iterable over the items associated with this
album.
This method conflicts with :meth:`LibModel.items`, which is
inherited from :meth:`beets.dbcore.Model.items`.
Since :meth:`Album.items` predates these methods, and is
likely to be used by plugins, we keep this interface as-is.
"""
return self._db.items(dbcore.MatchQuery("album_id", self.id))
def remove(self, delete=False, with_items=True):
"""Remove this album and all its associated items from the
library.
If delete, then the items' files are also deleted from disk,
along with any album art. The directories containing the album are
also removed (recursively) if empty.
Set with_items to False to avoid removing the album's items.
"""
super().remove()
# Send a 'album_removed' signal to plugins
plugins.send("album_removed", album=self)
# Delete art file.
if delete:
artpath = self.artpath
if artpath:
util.remove(artpath)
# Remove (and possibly delete) the constituent items.
if with_items:
for item in self.items():
item.remove(delete, False)
def move_art(self, operation=MoveOperation.MOVE):
"""Move, copy, link or hardlink (depending on `operation`) any
existing album art so that it remains in the same directory as
the items.
`operation` should be an instance of `util.MoveOperation`.
"""
old_art = self.artpath
if not old_art:
return
if not os.path.exists(syspath(old_art)):
log.error(
"removing reference to missing album art file {}",
util.displayable_path(old_art),
)
self.artpath = None
return
new_art = self.art_destination(old_art)
if new_art == old_art:
return
new_art = util.unique_path(new_art)
log.debug(
"moving album art {} to {}",
util.displayable_path(old_art),
util.displayable_path(new_art),
)
if operation == MoveOperation.MOVE:
util.move(old_art, new_art)
util.prune_dirs(os.path.dirname(old_art), self._db.directory)
elif operation == MoveOperation.COPY:
util.copy(old_art, new_art)
elif operation == MoveOperation.LINK:
util.link(old_art, new_art)
elif operation == MoveOperation.HARDLINK:
util.hardlink(old_art, new_art)
elif operation == MoveOperation.REFLINK:
util.reflink(old_art, new_art, fallback=False)
elif operation == MoveOperation.REFLINK_AUTO:
util.reflink(old_art, new_art, fallback=True)
else:
assert False, "unknown MoveOperation"
self.artpath = new_art
def move(self, operation=MoveOperation.MOVE, basedir=None, store=True):
"""Move, copy, link or hardlink (depending on `operation`)
all items to their destination. Any album art moves along with them.
`basedir` overrides the library base directory for the destination.
`operation` should be an instance of `util.MoveOperation`.
By default, the album is stored to the database, persisting any
modifications to its metadata. If `store` is `False` however,
the album is not stored automatically, and it will have to be manually
stored after invoking this method.
"""
basedir = basedir or self._db.directory
# Ensure new metadata is available to items for destination
# computation.
if store:
self.store()
# Move items.
items = list(self.items())
for item in items:
item.move(operation, basedir=basedir, with_album=False, store=store)
# Move art.
self.move_art(operation)
if store:
self.store()
def item_dir(self):
"""Return the directory containing the album's first item,
provided that such an item exists.
"""
item = self.items().get()
if not item:
raise ValueError(f"empty album for album id {self.id}")
return os.path.dirname(item.path)
def _albumtotal(self):
"""Return the total number of tracks on all discs on the album."""
if self.disctotal == 1 or not beets.config["per_disc_numbering"]:
return self.items()[0].tracktotal
counted = []
total = 0
for item in self.items():
if item.disc in counted:
continue
total += item.tracktotal
counted.append(item.disc)
if len(counted) == self.disctotal:
break
return total
def art_destination(self, image, item_dir=None):
"""Return a path to the destination for the album art image
for the album.
`image` is the path of the image that will be
moved there (used for its extension).
The path construction uses the existing path of the album's
items, so the album must contain at least one item or
item_dir must be provided.
"""
image = bytestring_path(image)
item_dir = item_dir or self.item_dir()
filename_tmpl = template(beets.config["art_filename"].as_str())
subpath = self.evaluate_template(filename_tmpl, True)
if beets.config["asciify_paths"]:
subpath = util.asciify_path(
subpath, beets.config["path_sep_replace"].as_str()
)
subpath = util.sanitize_path(
subpath, replacements=self._db.replacements
)
subpath = bytestring_path(subpath)
_, ext = os.path.splitext(image)
dest = os.path.join(item_dir, subpath + ext)
return bytestring_path(dest)
def set_art(self, path, copy=True):
"""Set the album's cover art to the image at the given path.
The image is copied (or moved) into place, replacing any
existing art.
Send an 'art_set' event with `self` as the sole argument.
"""
path = bytestring_path(path)
oldart = self.artpath
artdest = self.art_destination(path)
if oldart and samefile(path, oldart):
# Art already set.
return
elif samefile(path, artdest):
# Art already in place.
self.artpath = path
return
# Normal operation.
if oldart == artdest:
util.remove(oldart)
artdest = util.unique_path(artdest)
if copy:
util.copy(path, artdest)
else:
util.move(path, artdest)
self.artpath = artdest
plugins.send("art_set", album=self)
def store(self, fields=None, inherit=True):
"""Update the database with the album information.
`fields` represents the fields to be stored. If not specified,
all fields will be.
The album's tracks are also updated when the `inherit` flag is enabled.
This applies to fixed attributes as well as flexible ones. The `id`
attribute of the album will never be inherited.
"""
# Get modified track fields.
track_updates = {}
track_deletes = set()
for key in self._dirty:
if inherit:
if key in self.item_keys: # is a fixed attribute
track_updates[key] = self[key]
elif key not in self: # is a fixed or a flexible attribute
track_deletes.add(key)
elif key != "id": # is a flexible attribute
track_updates[key] = self[key]
with self._db.transaction():
super().store(fields)
if track_updates:
for item in self.items():
for key, value in track_updates.items():
item[key] = value
item.store()
if track_deletes:
for item in self.items():
for key in track_deletes:
if key in item:
del item[key]
item.store()
def try_sync(self, write, move, inherit=True):
"""Synchronize the album and its items with the database.
Optionally, also write any new tags into the files and update
their paths.
`write` indicates whether to write tags to the item files, and
`move` controls whether files (both audio and album art) are
moved.
"""
self.store(inherit=inherit)
for item in self.items():
item.try_sync(write, move)
class Item(LibModel):
"""Represent a song or track."""
_table = "items"
_flex_table = "item_attributes"
_fields = {
"id": types.PRIMARY_ID,
"path": types.PathType(),
"album_id": types.FOREIGN_ID,
"title": types.STRING,
"artist": types.STRING,
"artists": types.MULTI_VALUE_DSV,
"artists_ids": types.MULTI_VALUE_DSV,
"artist_sort": types.STRING,
"artists_sort": types.MULTI_VALUE_DSV,
"artist_credit": types.STRING,
"artists_credit": types.MULTI_VALUE_DSV,
"remixer": types.STRING,
"album": types.STRING,
"albumartist": types.STRING,
"albumartists": types.MULTI_VALUE_DSV,
"albumartist_sort": types.STRING,
"albumartists_sort": types.MULTI_VALUE_DSV,
"albumartist_credit": types.STRING,
"albumartists_credit": types.MULTI_VALUE_DSV,
"genre": types.STRING,
"style": types.STRING,
"discogs_albumid": types.INTEGER,
"discogs_artistid": types.INTEGER,
"discogs_labelid": types.INTEGER,
"lyricist": types.STRING,
"composer": types.STRING,
"composer_sort": types.STRING,
"work": types.STRING,
"mb_workid": types.STRING,
"work_disambig": types.STRING,
"arranger": types.STRING,
"grouping": types.STRING,
"year": types.PaddedInt(4),
"month": types.PaddedInt(2),
"day": types.PaddedInt(2),
"track": types.PaddedInt(2),
"tracktotal": types.PaddedInt(2),
"disc": types.PaddedInt(2),
"disctotal": types.PaddedInt(2),
"lyrics": types.STRING,
"comments": types.STRING,
"bpm": types.INTEGER,
"comp": types.BOOLEAN,
"mb_trackid": types.STRING,
"mb_albumid": types.STRING,
"mb_artistid": types.STRING,
"mb_artistids": types.MULTI_VALUE_DSV,
"mb_albumartistid": types.STRING,
"mb_albumartistids": types.MULTI_VALUE_DSV,
"mb_releasetrackid": types.STRING,
"trackdisambig": types.STRING,
"albumtype": types.STRING,
"albumtypes": types.SEMICOLON_SPACE_DSV,
"label": types.STRING,
"barcode": types.STRING,
"acoustid_fingerprint": types.STRING,
"acoustid_id": types.STRING,
"mb_releasegroupid": types.STRING,
"release_group_title": types.STRING,
"asin": types.STRING,
"isrc": types.STRING,
"catalognum": types.STRING,
"script": types.STRING,
"language": types.STRING,
"country": types.STRING,
"albumstatus": types.STRING,
"media": types.STRING,
"albumdisambig": types.STRING,
"releasegroupdisambig": types.STRING,
"disctitle": types.STRING,
"encoder": types.STRING,
"rg_track_gain": types.NULL_FLOAT,
"rg_track_peak": types.NULL_FLOAT,
"rg_album_gain": types.NULL_FLOAT,
"rg_album_peak": types.NULL_FLOAT,
"r128_track_gain": types.NULL_FLOAT,
"r128_album_gain": types.NULL_FLOAT,
"original_year": types.PaddedInt(4),
"original_month": types.PaddedInt(2),
"original_day": types.PaddedInt(2),
"initial_key": types.MusicalKey(),
"length": types.DurationType(),
"bitrate": types.ScaledInt(1000, "kbps"),
"bitrate_mode": types.STRING,
"encoder_info": types.STRING,
"encoder_settings": types.STRING,
"format": types.STRING,
"samplerate": types.ScaledInt(1000, "kHz"),
"bitdepth": types.INTEGER,
"channels": types.INTEGER,
"mtime": types.DATE,
"added": types.DATE,
}
_search_fields = (
"artist",
"title",
"comments",
"album",
"albumartist",
"genre",
)
# Set of item fields that are backed by `MediaFile` fields.
# Any kind of field (fixed, flexible, and computed) may be a media
# field. Only these fields are read from disk in `read` and written in
# `write`.
_media_fields = set(MediaFile.readable_fields()).intersection(
_fields.keys()
)
# Set of item fields that are backed by *writable* `MediaFile` tag
# fields.
# This excludes fields that represent audio data, such as `bitrate` or
# `length`.
_media_tag_fields = set(MediaFile.fields()).intersection(_fields.keys())
_formatter = FormattedItemMapping
_sorts = {"artist": dbcore.query.SmartArtistSort}
@cached_classproperty
def _queries(cls) -> dict[str, FieldQueryType]:
return {**super()._queries, "singleton": dbcore.query.SingletonQuery}
_format_config_key = "format_item"
# Cached album object. Read-only.
__album: Album | None = None
@cached_classproperty
def _relation(cls) -> type[Album]:
return Album
@cached_classproperty
def relation_join(cls) -> str:
"""Return the FROM clause which includes related albums.
We need to use a LEFT JOIN here, otherwise items that are not part of
an album (e.g. singletons) would be left out.
"""
return (
f"LEFT JOIN {cls._relation._table} "
f"ON {cls._table}.album_id = {cls._relation._table}.id"
)
@property
def _cached_album(self):
"""The Album object that this item belongs to, if any, or
None if the item is a singleton or is not associated with a
library.
The instance is cached and refreshed on access.
DO NOT MODIFY!
If you want a copy to modify, use :meth:`get_album`.
"""
if not self.__album and self._db:
self.__album = self._db.get_album(self)
elif self.__album:
self.__album.load()
return self.__album
@_cached_album.setter
def _cached_album(self, album):
self.__album = album
@classmethod
def _getters(cls):
getters = plugins.item_field_getters()
getters["singleton"] = lambda i: i.album_id is None
getters["filesize"] = Item.try_filesize # In bytes.
return getters
def duplicates_query(self, fields: list[str]) -> dbcore.AndQuery:
"""Return a query for entities with same values in the given fields."""
return super().duplicates_query(fields) & dbcore.query.NoneQuery(
"album_id"
)
@classmethod
def from_path(cls, path):
"""Create a new item from the media file at the specified path."""
# Initiate with values that aren't read from files.
i = cls(album_id=None)
i.read(path)
i.mtime = i.current_mtime() # Initial mtime.
return i
def __setitem__(self, key, value):
"""Set the item's value for a standard field or a flexattr."""
# Encode unicode paths and read buffers.
if key == "path":
if isinstance(value, str):
value = bytestring_path(value)
elif isinstance(value, types.BLOB_TYPE):
value = bytes(value)
elif key == "album_id":
self._cached_album = None
changed = super()._setitem(key, value)
if changed and key in MediaFile.fields():
self.mtime = 0 # Reset mtime on dirty.
def __getitem__(self, key):
"""Get the value for a field, falling back to the album if
necessary.
Raise a KeyError if the field is not available.
"""
try:
return super().__getitem__(key)
except KeyError:
if self._cached_album:
return self._cached_album[key]
raise
def __repr__(self):
# This must not use `with_album=True`, because that might access
# the database. When debugging, that is not guaranteed to succeed, and
# can even deadlock due to the database lock.
return (
f"{type(self).__name__}"
f"({', '.join(f'{k}={self[k]!r}' for k in self.keys(with_album=False))})"
)
def keys(self, computed=False, with_album=True):
"""Get a list of available field names.
`with_album` controls whether the album's fields are included.
"""
keys = super().keys(computed=computed)
if with_album and self._cached_album:
keys = set(keys)
keys.update(self._cached_album.keys(computed=computed))
keys = list(keys)
return keys
def get(self, key, default=None, with_album=True):
"""Get the value for a given key or `default` if it does not
exist.
Set `with_album` to false to skip album fallback.
"""
try:
return self._get(key, default, raise_=with_album)
except KeyError:
if self._cached_album:
return self._cached_album.get(key, default)
return default
def update(self, values):
"""Set all key/value pairs in the mapping.
If mtime is specified, it is not reset (as it might otherwise be).
"""
super().update(values)
if self.mtime == 0 and "mtime" in values:
self.mtime = values["mtime"]
def clear(self):
"""Set all key/value pairs to None."""
for key in self._media_tag_fields:
setattr(self, key, None)
def get_album(self):
"""Get the Album object that this item belongs to, if any, or
None if the item is a singleton or is not associated with a
library.
"""
if not self._db:
return None
return self._db.get_album(self)
# Interaction with file metadata.
def read(self, read_path=None):
"""Read the metadata from the associated file.
If `read_path` is specified, read metadata from that file
instead. Update all the properties in `_media_fields`
from the media file.
Raise a `ReadError` if the file could not be read.
"""
if read_path is None:
read_path = self.path
else:
read_path = normpath(read_path)
try:
mediafile = MediaFile(syspath(read_path))
except UnreadableFileError as exc:
raise ReadError(read_path, exc)
for key in self._media_fields:
value = getattr(mediafile, key)
if isinstance(value, int):
if value.bit_length() > 63:
value = 0
self[key] = value
# Database's mtime should now reflect the on-disk value.
if read_path == self.path:
self.mtime = self.current_mtime()
self.path = read_path
def write(self, path=None, tags=None, id3v23=None):
"""Write the item's metadata to a media file.
All fields in `_media_fields` are written to disk according to
the values on this object.
`path` is the path of the mediafile to write the data to. It
defaults to the item's path.
`tags` is a dictionary of additional metadata the should be
written to the file. (These tags need not be in `_media_fields`.)
`id3v23` will override the global `id3v23` config option if it is
set to something other than `None`.
Can raise either a `ReadError` or a `WriteError`.
"""
if path is None:
path = self.path
else:
path = normpath(path)
if id3v23 is None:
id3v23 = beets.config["id3v23"].get(bool)
# Get the data to write to the file.
item_tags = dict(self)
item_tags = {
k: v for k, v in item_tags.items() if k in self._media_fields
} # Only write media fields.
if tags is not None:
item_tags.update(tags)
plugins.send("write", item=self, path=path, tags=item_tags)
# Open the file.
try:
mediafile = MediaFile(syspath(path), id3v23=id3v23)
except UnreadableFileError as exc:
raise ReadError(path, exc)
# Write the tags to the file.
mediafile.update(item_tags)
try:
mediafile.save()
except UnreadableFileError as exc:
raise WriteError(self.path, exc)
# The file has a new mtime.
if path == self.path:
self.mtime = self.current_mtime()
plugins.send("after_write", item=self, path=path)
def try_write(self, *args, **kwargs):
"""Call `write()` but catch and log `FileOperationError`
exceptions.
Return `False` an exception was caught and `True` otherwise.
"""
try:
self.write(*args, **kwargs)
return True
except FileOperationError as exc:
log.error("{}", exc)
return False
def try_sync(self, write, move, with_album=True):
"""Synchronize the item with the database and, possibly, update its
tags on disk and its path (by moving the file).
`write` indicates whether to write new tags into the file. Similarly,
`move` controls whether the path should be updated. In the
latter case, files are *only* moved when they are inside their
library's directory (if any).
Similar to calling :meth:`write`, :meth:`move`, and :meth:`store`
(conditionally).
"""
if write:
self.try_write()
if move:
# Check whether this file is inside the library directory.
if self._db and self._db.directory in util.ancestry(self.path):
log.debug("moving {.filepath} to synchronize path", self)
self.move(with_album=with_album)
self.store()
# Files themselves.
def move_file(self, dest, operation=MoveOperation.MOVE):
"""Move, copy, link or hardlink the item depending on `operation`,
updating the path value if the move succeeds.
If a file exists at `dest`, then it is slightly modified to be unique.
`operation` should be an instance of `util.MoveOperation`.
"""
if not util.samefile(self.path, dest):
dest = util.unique_path(dest)
if operation == MoveOperation.MOVE:
plugins.send(
"before_item_moved",
item=self,
source=self.path,
destination=dest,
)
util.move(self.path, dest)
plugins.send(
"item_moved", item=self, source=self.path, destination=dest
)
elif operation == MoveOperation.COPY:
util.copy(self.path, dest)
plugins.send(
"item_copied", item=self, source=self.path, destination=dest
)
elif operation == MoveOperation.LINK:
util.link(self.path, dest)
plugins.send(
"item_linked", item=self, source=self.path, destination=dest
)
elif operation == MoveOperation.HARDLINK:
util.hardlink(self.path, dest)
plugins.send(
"item_hardlinked", item=self, source=self.path, destination=dest
)
elif operation == MoveOperation.REFLINK:
util.reflink(self.path, dest, fallback=False)
plugins.send(
"item_reflinked", item=self, source=self.path, destination=dest
)
elif operation == MoveOperation.REFLINK_AUTO:
util.reflink(self.path, dest, fallback=True)
plugins.send(
"item_reflinked", item=self, source=self.path, destination=dest
)
else:
assert False, "unknown MoveOperation"
# Either copying or moving succeeded, so update the stored path.
self.path = dest
def current_mtime(self):
"""Return the current mtime of the file, rounded to the nearest
integer.
"""
return int(os.path.getmtime(syspath(self.path)))
def try_filesize(self):
"""Get the size of the underlying file in bytes.
If the file is missing, return 0 (and log a warning).
"""
try:
return os.path.getsize(syspath(self.path))
except (OSError, Exception) as exc:
log.warning("could not get filesize: {}", exc)
return 0
# Model methods.
def remove(self, delete=False, with_album=True):
"""Remove the item.
If `delete`, then the associated file is removed from disk.
If `with_album`, then the item's album (if any) is removed
if the item was the last in the album.
"""
super().remove()
# Remove the album if it is empty.
if with_album:
album = self.get_album()
if album and not album.items():
album.remove(delete, False)
# Send a 'item_removed' signal to plugins
plugins.send("item_removed", item=self)
# Delete the associated file.
if delete:
util.remove(self.path)
util.prune_dirs(os.path.dirname(self.path), self._db.directory)
self._db._memotable = {}
def move(
self,
operation=MoveOperation.MOVE,
basedir=None,
with_album=True,
store=True,
):
"""Move the item to its designated location within the library
directory (provided by destination()).
Subdirectories are created as needed. If the operation succeeds,
the item's path field is updated to reflect the new location.
Instead of moving the item it can also be copied, linked or hardlinked
depending on `operation` which should be an instance of
`util.MoveOperation`.
`basedir` overrides the library base directory for the destination.
If the item is in an album and `with_album` is `True`, the album is
given an opportunity to move its art.
By default, the item is stored to the database if it is in the
database, so any dirty fields prior to the move() call will be written
as a side effect.
If `store` is `False` however, the item won't be stored and it will
have to be manually stored after invoking this method.
"""
self._check_db()
dest = self.destination(basedir=basedir)
# Create necessary ancestry for the move.
util.mkdirall(dest)
# Perform the move and store the change.
old_path = self.path
self.move_file(dest, operation)
if store:
self.store()
# If this item is in an album, move its art.
if with_album:
album = self.get_album()
if album:
album.move_art(operation)
if store:
album.store()
# Prune vacated directory.
if operation == MoveOperation.MOVE:
util.prune_dirs(os.path.dirname(old_path), self._db.directory)
# Templating.
def destination(
self,
relative_to_libdir=False,
basedir=None,
path_formats=None,
) -> bytes:
"""Return the path in the library directory designated for the item
(i.e., where the file ought to be).
The path is returned as a bytestring. ``basedir`` can override the
library's base directory for the destination. If ``relative_to_libdir``
is true, returns just the fragment of the path underneath the library
base directory.
"""
db = self._check_db()
basedir = basedir or db.directory
path_formats = path_formats or db.path_formats
# Use a path format based on a query, falling back on the
# default.
for query, path_format in path_formats:
if query == PF_KEY_DEFAULT:
continue
query, _ = parse_query_string(query, type(self))
if query.match(self):
# The query matches the item! Use the corresponding path
# format.
break
else:
# No query matched; fall back to default.
for query, path_format in path_formats:
if query == PF_KEY_DEFAULT:
break
else:
assert False, "no default path format"
if isinstance(path_format, Template):
subpath_tmpl = path_format
else:
subpath_tmpl = template(path_format)
# Evaluate the selected template.
subpath = self.evaluate_template(subpath_tmpl, True)
# Prepare path for output: normalize Unicode characters.
if sys.platform == "darwin":
subpath = unicodedata.normalize("NFD", subpath)
else:
subpath = unicodedata.normalize("NFC", subpath)
if beets.config["asciify_paths"]:
subpath = util.asciify_path(
subpath, beets.config["path_sep_replace"].as_str()
)
lib_path_str, fallback = util.legalize_path(
subpath, db.replacements, self.filepath.suffix
)
if fallback:
# Print an error message if legalization fell back to
# default replacements because of the maximum length.
log.warning(
"Fell back to default replacements when naming "
"file {}. Configure replacements to avoid lengthening "
"the filename.",
subpath,
)
lib_path_bytes = util.bytestring_path(lib_path_str)
if relative_to_libdir:
return lib_path_bytes
return normpath(os.path.join(basedir, lib_path_bytes))
def _int_arg(s):
"""Convert a string argument to an integer for use in a template
function.
May raise a ValueError.
"""
return int(s.strip())
class DefaultTemplateFunctions:
"""A container class for the default functions provided to path
templates.
These functions are contained in an object to provide
additional context to the functions -- specifically, the Item being
evaluated.
"""
_prefix = "tmpl_"
@cached_classproperty
def _func_names(cls) -> list[str]:
"""Names of tmpl_* functions in this class."""
return [s for s in dir(cls) if s.startswith(cls._prefix)]
def __init__(self, item=None, lib=None):
"""Parametrize the functions.
If `item` or `lib` is None, then some functions (namely, ``aunique``)
will always evaluate to the empty string.
"""
self.item = item
self.lib = lib
def functions(self):
"""Return a dictionary containing the functions defined in this
object.
The keys are function names (as exposed in templates)
and the values are Python functions.
"""
out = {}
for key in self._func_names:
out[key[len(self._prefix) :]] = getattr(self, key)
return out
@staticmethod
def tmpl_lower(s):
"""Convert a string to lower case."""
return s.lower()
@staticmethod
def tmpl_upper(s):
"""Convert a string to upper case."""
return s.upper()
@staticmethod
def tmpl_capitalize(s):
"""Converts to a capitalized string."""
return s.capitalize()
@staticmethod
def tmpl_title(s):
"""Convert a string to title case."""
return string.capwords(s)
@staticmethod
def tmpl_left(s, chars):
"""Get the leftmost characters of a string."""
return s[0 : _int_arg(chars)]
@staticmethod
def tmpl_right(s, chars):
"""Get the rightmost characters of a string."""
return s[-_int_arg(chars) :]
@staticmethod
def tmpl_if(condition, trueval, falseval=""):
"""If ``condition`` is nonempty and nonzero, emit ``trueval``;
otherwise, emit ``falseval`` (if provided).
"""
try:
int_condition = _int_arg(condition)
except ValueError:
if condition.lower() == "false":
return falseval
else:
condition = int_condition
if condition:
return trueval
else:
return falseval
@staticmethod
def tmpl_asciify(s):
"""Translate non-ASCII characters to their ASCII equivalents."""
return util.asciify_path(s, beets.config["path_sep_replace"].as_str())
@staticmethod
def tmpl_time(s, fmt):
"""Format a time value using `strftime`."""
cur_fmt = beets.config["time_format"].as_str()
return time.strftime(fmt, time.strptime(s, cur_fmt))
def tmpl_aunique(self, keys=None, disam=None, bracket=None):
"""Generate a string that is guaranteed to be unique among all
albums in the library who share the same set of keys.
A fields from "disam" is used in the string if one is sufficient to
disambiguate the albums. Otherwise, a fallback opaque value is
used. Both "keys" and "disam" should be given as
whitespace-separated lists of field names, while "bracket" is a
pair of characters to be used as brackets surrounding the
disambiguator or empty to have no brackets.
"""
# Fast paths: no album, no item or library, or memoized value.
if not self.item or not self.lib:
return ""
if isinstance(self.item, Item):
album_id = self.item.album_id
elif isinstance(self.item, Album):
album_id = self.item.id
if album_id is None:
return ""
memokey = self._tmpl_unique_memokey("aunique", keys, disam, album_id)
memoval = self.lib._memotable.get(memokey)
if memoval is not None:
return memoval
album = self.lib.get_album(album_id)
return self._tmpl_unique(
"aunique",
keys,
disam,
bracket,
album_id,
album,
album.item_keys,
# Do nothing for singletons.
lambda a: a is None,
)
def tmpl_sunique(self, keys=None, disam=None, bracket=None):
"""Generate a string that is guaranteed to be unique among all
singletons in the library who share the same set of keys.
A fields from "disam" is used in the string if one is sufficient to
disambiguate the albums. Otherwise, a fallback opaque value is
used. Both "keys" and "disam" should be given as
whitespace-separated lists of field names, while "bracket" is a
pair of characters to be used as brackets surrounding the
disambiguator or empty to have no brackets.
"""
# Fast paths: no album, no item or library, or memoized value.
if not self.item or not self.lib:
return ""
if isinstance(self.item, Item):
item_id = self.item.id
else:
raise NotImplementedError("sunique is only implemented for items")
if item_id is None:
return ""
return self._tmpl_unique(
"sunique",
keys,
disam,
bracket,
item_id,
self.item,
Item.all_keys(),
# Do nothing for non singletons.
lambda i: i.album_id is not None,
)
def _tmpl_unique_memokey(self, name, keys, disam, item_id):
"""Get the memokey for the unique template named "name" for the
specific parameters.
"""
return (name, keys, disam, item_id)
def _tmpl_unique(
self,
name,
keys,
disam,
bracket,
item_id,
db_item,
item_keys,
skip_item,
):
"""Generate a string that is guaranteed to be unique among all items of
the same type as "db_item" who share the same set of keys.
A field from "disam" is used in the string if one is sufficient to
disambiguate the items. Otherwise, a fallback opaque value is
used. Both "keys" and "disam" should be given as
whitespace-separated lists of field names, while "bracket" is a
pair of characters to be used as brackets surrounding the
disambiguator or empty to have no brackets.
"name" is the name of the templates. It is also the name of the
configuration section where the default values of the parameters
are stored.
"skip_item" is a function that must return True when the template
should return an empty string.
"initial_subqueries" is a list of subqueries that should be included
in the query to find the ambiguous items.
"""
memokey = self._tmpl_unique_memokey(name, keys, disam, item_id)
memoval = self.lib._memotable.get(memokey)
if memoval is not None:
return memoval
if skip_item(db_item):
self.lib._memotable[memokey] = ""
return ""
keys = keys or beets.config[name]["keys"].as_str()
disam = disam or beets.config[name]["disambiguators"].as_str()
if bracket is None:
bracket = beets.config[name]["bracket"].as_str()
keys = keys.split()
disam = disam.split()
# Assign a left and right bracket or leave blank if argument is empty.
if len(bracket) == 2:
bracket_l = bracket[0]
bracket_r = bracket[1]
else:
bracket_l = ""
bracket_r = ""
# Find matching items to disambiguate with.
query = db_item.duplicates_query(keys)
ambigous_items = (
self.lib.items(query)
if isinstance(db_item, Item)
else self.lib.albums(query)
)
# If there's only one item to matching these details, then do
# nothing.
if len(ambigous_items) == 1:
self.lib._memotable[memokey] = ""
return ""
# Find the first disambiguator that distinguishes the items.
for disambiguator in disam:
# Get the value for each item for the current field.
disam_values = {s.get(disambiguator, "") for s in ambigous_items}
# If the set of unique values is equal to the number of
# items in the disambiguation set, we're done -- this is
# sufficient disambiguation.
if len(disam_values) == len(ambigous_items):
break
else:
# No disambiguator distinguished all fields.
res = f" {bracket_l}{item_id}{bracket_r}"
self.lib._memotable[memokey] = res
return res
# Flatten disambiguation value into a string.
disam_value = db_item.formatted(for_path=True).get(disambiguator)
# Return empty string if disambiguator is empty.
if disam_value:
res = f" {bracket_l}{disam_value}{bracket_r}"
else:
res = ""
self.lib._memotable[memokey] = res
return res
@staticmethod
def tmpl_first(s, count=1, skip=0, sep="; ", join_str="; "):
"""Get the item(s) from x to y in a string separated by something
and join then with something.
Args:
s: the string
count: The number of items included
skip: The number of items skipped
sep: the separator. Usually is '; ' (default) or '/ '
join_str: the string which will join the items, default '; '.
"""
skip = int(skip)
count = skip + int(count)
return join_str.join(s.split(sep)[skip:count])
def tmpl_ifdef(self, field, trueval="", falseval=""):
"""If field exists return trueval or the field (default)
otherwise, emit return falseval (if provided).
Args:
field: The name of the field
trueval: The string if the condition is true
falseval: The string if the condition is false
Returns:
The string, based on condition.
"""
if field in self.item:
return trueval if trueval else self.item.formatted().get(field)
else:
return falseval
beetbox-beets-c1877b7/beets/library/queries.py 0000664 0000000 0000000 00000003262 15073551743 0021420 0 ustar 00root root 0000000 0000000 from __future__ import annotations
import shlex
import beets
from beets import dbcore, logging, plugins
log = logging.getLogger("beets")
# Special path format key.
PF_KEY_DEFAULT = "default"
# Query construction helpers.
def parse_query_parts(parts, model_cls):
"""Given a beets query string as a list of components, return the
`Query` and `Sort` they represent.
Like `dbcore.parse_sorted_query`, with beets query prefixes and
ensuring that implicit path queries are made explicit with 'path::'
"""
# Get query types and their prefix characters.
prefixes = {
":": dbcore.query.RegexpQuery,
"=~": dbcore.query.StringQuery,
"=": dbcore.query.MatchQuery,
}
prefixes.update(plugins.queries())
# Special-case path-like queries, which are non-field queries
# containing path separators (/).
parts = [
f"path:{s}" if dbcore.query.PathQuery.is_path_query(s) else s
for s in parts
]
case_insensitive = beets.config["sort_case_insensitive"].get(bool)
query, sort = dbcore.parse_sorted_query(
model_cls, parts, prefixes, case_insensitive
)
log.debug("Parsed query: {!r}", query)
log.debug("Parsed sort: {!r}", sort)
return query, sort
def parse_query_string(s, model_cls):
"""Given a beets query string, return the `Query` and `Sort` they
represent.
The string is split into components using shell-like syntax.
"""
message = f"Query is not unicode: {s!r}"
assert isinstance(s, str), message
try:
parts = shlex.split(s)
except ValueError as exc:
raise dbcore.InvalidQueryError(s, exc)
return parse_query_parts(parts, model_cls)
beetbox-beets-c1877b7/beets/logging.py 0000664 0000000 0000000 00000012776 15073551743 0017737 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A drop-in replacement for the standard-library `logging` module.
Provides everything the "logging" module does. In addition, beets' logger
(as obtained by `getLogger(name)`) supports thread-local levels, and messages
use {}-style formatting and can interpolate keywords arguments to the logging
calls (`debug`, `info`, etc).
"""
from __future__ import annotations
import threading
from copy import copy
from logging import (
DEBUG,
INFO,
NOTSET,
WARNING,
FileHandler,
Filter,
Handler,
Logger,
NullHandler,
RootLogger,
StreamHandler,
)
from typing import TYPE_CHECKING, Any, Mapping, TypeVar, Union, overload
__all__ = [
"DEBUG",
"INFO",
"NOTSET",
"WARNING",
"FileHandler",
"Filter",
"Handler",
"Logger",
"NullHandler",
"StreamHandler",
"getLogger",
]
if TYPE_CHECKING:
T = TypeVar("T")
from types import TracebackType
# see https://github.com/python/typeshed/blob/main/stdlib/logging/__init__.pyi
_SysExcInfoType = Union[
tuple[type[BaseException], BaseException, Union[TracebackType, None]],
tuple[None, None, None],
]
_ExcInfoType = Union[None, bool, _SysExcInfoType, BaseException]
_ArgsType = Union[tuple[object, ...], Mapping[str, object]]
def _logsafe(val: T) -> str | T:
"""Coerce `bytes` to `str` to avoid crashes solely due to logging.
This is particularly relevant for bytestring paths. Much of our code
explicitly uses `displayable_path` for them, but better be safe and prevent
any crashes that are solely due to log formatting.
"""
# Bytestring: Needs decoding to be safe for substitution in format strings.
if isinstance(val, bytes):
# Blindly convert with UTF-8. Eventually, it would be nice to
# (a) only do this for paths, if they can be given a distinct
# type, and (b) warn the developer if they do this for other
# bytestrings.
return val.decode("utf-8", "replace")
# Other objects are used as-is so field access, etc., still works in
# the format string. Relies on a working __str__ implementation.
return val
class StrFormatLogger(Logger):
"""A version of `Logger` that uses `str.format`-style formatting
instead of %-style formatting and supports keyword arguments.
We cannot easily get rid of this even in the Python 3 era: This custom
formatting supports substitution from `kwargs` into the message, which the
default `logging.Logger._log()` implementation does not.
Remark by @sampsyo: https://stackoverflow.com/a/24683360 might be a way to
achieve this with less code.
"""
class _LogMessage:
def __init__(
self,
msg: str,
args: _ArgsType,
kwargs: dict[str, Any],
):
self.msg = msg
self.args = args
self.kwargs = kwargs
def __str__(self):
args = [_logsafe(a) for a in self.args]
kwargs = {k: _logsafe(v) for (k, v) in self.kwargs.items()}
return self.msg.format(*args, **kwargs)
def _log(
self,
level: int,
msg: object,
args: _ArgsType,
exc_info: _ExcInfoType = None,
extra: Mapping[str, Any] | None = None,
stack_info: bool = False,
stacklevel: int = 1,
**kwargs,
):
"""Log msg.format(*args, **kwargs)"""
if isinstance(msg, str):
msg = self._LogMessage(msg, args, kwargs)
return super()._log(
level,
msg,
(),
exc_info=exc_info,
extra=extra,
stack_info=stack_info,
stacklevel=stacklevel,
)
class ThreadLocalLevelLogger(Logger):
"""A version of `Logger` whose level is thread-local instead of shared."""
def __init__(self, name, level=NOTSET):
self._thread_level = threading.local()
self.default_level = NOTSET
super().__init__(name, level)
@property
def level(self):
try:
return self._thread_level.level
except AttributeError:
self._thread_level.level = self.default_level
return self.level
@level.setter
def level(self, value):
self._thread_level.level = value
def set_global_level(self, level):
"""Set the level on the current thread + the default value for all
threads.
"""
self.default_level = level
self.setLevel(level)
class BeetsLogger(ThreadLocalLevelLogger, StrFormatLogger):
pass
my_manager = copy(Logger.manager)
my_manager.loggerClass = BeetsLogger
@overload
def getLogger(name: str) -> BeetsLogger: ...
@overload
def getLogger(name: None = ...) -> RootLogger: ...
def getLogger(name=None) -> BeetsLogger | RootLogger: # noqa: N802
if name:
return my_manager.getLogger(name) # type: ignore[return-value]
else:
return Logger.root
beetbox-beets-c1877b7/beets/mediafile.py 0000664 0000000 0000000 00000002243 15073551743 0020214 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import warnings
import mediafile
warnings.warn(
"beets.mediafile is deprecated; use mediafile instead",
# Show the location of the `import mediafile` statement as the warning's
# source, rather than this file, such that the offending module can be
# identified easily.
stacklevel=2,
)
# Import everything from the mediafile module into this module.
for key, value in mediafile.__dict__.items():
if key not in ["__name__"]:
globals()[key] = value
# Cleanup namespace.
del key, value, warnings, mediafile
beetbox-beets-c1877b7/beets/metadata_plugins.py 0000664 0000000 0000000 00000030277 15073551743 0021626 0 ustar 00root root 0000000 0000000 """Metadata source plugin interface.
This allows beets to lookup metadata from various sources. We define
a common interface for all metadata sources which need to be
implemented as plugins.
"""
from __future__ import annotations
import abc
import re
from functools import cache, cached_property
from typing import TYPE_CHECKING, Generic, Literal, Sequence, TypedDict, TypeVar
import unidecode
from confuse import NotFoundError
from typing_extensions import NotRequired
from beets.util import cached_classproperty
from beets.util.id_extractors import extract_release_id
from .plugins import BeetsPlugin, find_plugins, notify_info_yielded, send
if TYPE_CHECKING:
from collections.abc import Iterable
from .autotag.hooks import AlbumInfo, Item, TrackInfo
@cache
def find_metadata_source_plugins() -> list[MetadataSourcePlugin]:
"""Return a list of all loaded metadata source plugins."""
# TODO: Make this an isinstance(MetadataSourcePlugin, ...) check in v3.0.0
return [p for p in find_plugins() if hasattr(p, "data_source")] # type: ignore[misc]
@notify_info_yielded("albuminfo_received")
def candidates(*args, **kwargs) -> Iterable[AlbumInfo]:
"""Return matching album candidates from all metadata source plugins."""
for plugin in find_metadata_source_plugins():
yield from plugin.candidates(*args, **kwargs)
@notify_info_yielded("trackinfo_received")
def item_candidates(*args, **kwargs) -> Iterable[TrackInfo]:
"""Return matching track candidates fromm all metadata source plugins."""
for plugin in find_metadata_source_plugins():
yield from plugin.item_candidates(*args, **kwargs)
def album_for_id(_id: str) -> AlbumInfo | None:
"""Get AlbumInfo object for the given ID string.
A single ID can yield just a single album, so we return the first match.
"""
for plugin in find_metadata_source_plugins():
if info := plugin.album_for_id(album_id=_id):
send("albuminfo_received", info=info)
return info
return None
def track_for_id(_id: str) -> TrackInfo | None:
"""Get TrackInfo object for the given ID string.
A single ID can yield just a single track, so we return the first match.
"""
for plugin in find_metadata_source_plugins():
if info := plugin.track_for_id(_id):
send("trackinfo_received", info=info)
return info
return None
@cache
def get_penalty(data_source: str | None) -> float:
"""Get the penalty value for the given data source."""
return next(
(
p.data_source_mismatch_penalty
for p in find_metadata_source_plugins()
if p.data_source == data_source
),
MetadataSourcePlugin.DEFAULT_DATA_SOURCE_MISMATCH_PENALTY,
)
class MetadataSourcePlugin(BeetsPlugin, metaclass=abc.ABCMeta):
"""A plugin that provides metadata from a specific source.
This base class implements a contract for plugins that provide metadata
from a specific source. The plugin must implement the methods to search for albums
and tracks, and to retrieve album and track information by ID.
"""
DEFAULT_DATA_SOURCE_MISMATCH_PENALTY = 0.5
@cached_classproperty
def data_source(cls) -> str:
"""The data source name for this plugin.
This is inferred from the plugin name.
"""
return cls.__name__.replace("Plugin", "") # type: ignore[attr-defined]
@cached_property
def data_source_mismatch_penalty(self) -> float:
try:
return self.config["source_weight"].as_number()
except NotFoundError:
return self.config["data_source_mismatch_penalty"].as_number()
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.config.add(
{
"search_limit": 5,
"data_source_mismatch_penalty": self.DEFAULT_DATA_SOURCE_MISMATCH_PENALTY, # noqa: E501
}
)
@abc.abstractmethod
def album_for_id(self, album_id: str) -> AlbumInfo | None:
"""Return :py:class:`AlbumInfo` object or None if no matching release was
found."""
raise NotImplementedError
@abc.abstractmethod
def track_for_id(self, track_id: str) -> TrackInfo | None:
"""Return a :py:class:`TrackInfo` object or None if no matching release was
found.
"""
raise NotImplementedError
# ---------------------------------- search ---------------------------------- #
@abc.abstractmethod
def candidates(
self,
items: Sequence[Item],
artist: str,
album: str,
va_likely: bool,
) -> Iterable[AlbumInfo]:
"""Return :py:class:`AlbumInfo` candidates that match the given album.
Used in the autotag functionality to search for albums.
:param items: List of items in the album
:param artist: Album artist
:param album: Album name
:param va_likely: Whether the album is likely to be by various artists
"""
raise NotImplementedError
@abc.abstractmethod
def item_candidates(
self, item: Item, artist: str, title: str
) -> Iterable[TrackInfo]:
"""Return :py:class:`TrackInfo` candidates that match the given track.
Used in the autotag functionality to search for tracks.
:param item: Track item
:param artist: Track artist
:param title: Track title
"""
raise NotImplementedError
def albums_for_ids(self, ids: Sequence[str]) -> Iterable[AlbumInfo | None]:
"""Batch lookup of album metadata for a list of album IDs.
Given a list of album identifiers, yields corresponding AlbumInfo objects.
Missing albums result in None values in the output iterator.
Plugins may implement this for optimized batched lookups instead of
single calls to album_for_id.
"""
return (self.album_for_id(id) for id in ids)
def tracks_for_ids(self, ids: Sequence[str]) -> Iterable[TrackInfo | None]:
"""Batch lookup of track metadata for a list of track IDs.
Given a list of track identifiers, yields corresponding TrackInfo objects.
Missing tracks result in None values in the output iterator.
Plugins may implement this for optimized batched lookups instead of
single calls to track_for_id.
"""
return (self.track_for_id(id) for id in ids)
def _extract_id(self, url: str) -> str | None:
"""Extract an ID from a URL for this metadata source plugin.
Uses the plugin's data source name to determine the ID format and
extracts the ID from a given URL.
"""
return extract_release_id(self.data_source, url)
@staticmethod
def get_artist(
artists: Iterable[dict[str | int, str]],
id_key: str | int = "id",
name_key: str | int = "name",
join_key: str | int | None = None,
) -> tuple[str, str | None]:
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of artist object dicts.
For each artist, this function moves articles (such as 'a', 'an', and 'the')
to the front. It returns a tuple containing the comma-separated string
of all normalized artists and the ``id`` of the main/first artist.
Alternatively a keyword can be used to combine artists together into a
single string by passing the join_key argument.
:param artists: Iterable of artist dicts or lists returned by API.
:param id_key: Key or index corresponding to the value of ``id`` for
the main/first artist. Defaults to 'id'.
:param name_key: Key or index corresponding to values of names
to concatenate for the artist string (containing all artists).
Defaults to 'name'.
:param join_key: Key or index corresponding to a field containing a
keyword to use for combining artists into a single string, for
example "Feat.", "Vs.", "And" or similar. The default is None
which keeps the default behaviour (comma-separated).
:return: Normalized artist string.
"""
artist_id = None
artist_string = ""
artists = list(artists) # In case a generator was passed.
total = len(artists)
for idx, artist in enumerate(artists):
if not artist_id:
artist_id = artist[id_key]
name = artist[name_key]
# Move articles to the front.
name = re.sub(r"^(.*?), (a|an|the)$", r"\2 \1", name, flags=re.I)
# Use a join keyword if requested and available.
if idx < (total - 1): # Skip joining on last.
if join_key and artist.get(join_key, None):
name += f" {artist[join_key]} "
else:
name += ", "
artist_string += name
return artist_string, artist_id
class IDResponse(TypedDict):
"""Response from the API containing an ID."""
id: str
class SearchFilter(TypedDict):
artist: NotRequired[str]
album: NotRequired[str]
R = TypeVar("R", bound=IDResponse)
class SearchApiMetadataSourcePlugin(
Generic[R], MetadataSourcePlugin, metaclass=abc.ABCMeta
):
"""Helper class to implement a metadata source plugin with an API.
Plugins using this ABC must implement an API search method to
retrieve album and track information by ID,
i.e. `album_for_id` and `track_for_id`, and a search method to
perform a search on the API. The search method should return a list
of identifiers for the requested type (album or track).
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.config.add(
{
"search_query_ascii": False,
}
)
@abc.abstractmethod
def _search_api(
self,
query_type: Literal["album", "track"],
filters: SearchFilter,
query_string: str = "",
) -> Sequence[R]:
"""Perform a search on the API.
:param query_type: The type of query to perform.
:param filters: A dictionary of filters to apply to the search.
:param query_string: Additional query to include in the search.
Should return a list of identifiers for the requested type (album or track).
"""
raise NotImplementedError
def candidates(
self,
items: Sequence[Item],
artist: str,
album: str,
va_likely: bool,
) -> Iterable[AlbumInfo]:
query_filters: SearchFilter = {}
if album:
query_filters["album"] = album
if not va_likely:
query_filters["artist"] = artist
results = self._search_api("album", query_filters)
if not results:
return []
return filter(
None, self.albums_for_ids([result["id"] for result in results])
)
def item_candidates(
self, item: Item, artist: str, title: str
) -> Iterable[TrackInfo]:
results = self._search_api(
"track", {"artist": artist}, query_string=title
)
if not results:
return []
return filter(
None,
self.tracks_for_ids([result["id"] for result in results if result]),
)
def _construct_search_query(
self, filters: SearchFilter, query_string: str
) -> str:
"""Construct a query string with the specified filters and keywords to
be provided to the spotify (or similar) search API.
The returned format was initially designed for spotify's search API but
we found is also useful with other APIs that support similar query structures.
see `spotify `_
and `deezer `_.
:param filters: Field filters to apply.
:param query_string: Query keywords to use.
:return: Query string to be provided to the search API.
"""
components = [query_string, *(f"{k}:'{v}'" for k, v in filters.items())]
query = " ".join(filter(None, components))
if self.config["search_query_ascii"].get():
query = unidecode.unidecode(query)
return query
beetbox-beets-c1877b7/beets/plugins.py 0000664 0000000 0000000 00000055567 15073551743 0017777 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Support for beets plugins."""
from __future__ import annotations
import abc
import inspect
import re
import sys
import warnings
from collections import defaultdict
from functools import cached_property, wraps
from importlib import import_module
from pathlib import Path
from types import GenericAlias
from typing import TYPE_CHECKING, Any, ClassVar, Literal, TypeVar
import mediafile
from typing_extensions import ParamSpec
import beets
from beets import logging
from beets.util import unique_list
if TYPE_CHECKING:
from collections.abc import Callable, Iterable, Sequence
from confuse import ConfigView
from beets.dbcore import Query
from beets.dbcore.db import FieldQueryType
from beets.dbcore.types import Type
from beets.importer import ImportSession, ImportTask
from beets.library import Album, Item, Library
from beets.ui import Subcommand
# TYPE_CHECKING guard is needed for any derived type
# which uses an import from `beets.library` and `beets.imported`
ImportStageFunc = Callable[[ImportSession, ImportTask], None]
T = TypeVar("T", Album, Item, str)
TFunc = Callable[[T], str]
TFuncMap = dict[str, TFunc[T]]
AnyModel = TypeVar("AnyModel", Album, Item)
P = ParamSpec("P")
Ret = TypeVar("Ret", bound=Any)
Listener = Callable[..., Any]
IterF = Callable[P, Iterable[Ret]]
PLUGIN_NAMESPACE = "beetsplug"
# Plugins using the Last.fm API can share the same API key.
LASTFM_KEY = "2dc3914abf35f0d9c92d97d8f8e42b43"
EventType = Literal[
"after_write",
"album_imported",
"album_removed",
"albuminfo_received",
"before_choose_candidate",
"before_item_moved",
"cli_exit",
"database_change",
"import",
"import_begin",
"import_task_apply",
"import_task_before_choice",
"import_task_choice",
"import_task_created",
"import_task_files",
"import_task_start",
"item_copied",
"item_hardlinked",
"item_imported",
"item_linked",
"item_moved",
"item_reflinked",
"item_removed",
"library_opened",
"mb_album_extract",
"mb_track_extract",
"pluginload",
"trackinfo_received",
"write",
]
# Global logger.
log = logging.getLogger("beets")
class PluginConflictError(Exception):
"""Indicates that the services provided by one plugin conflict with
those of another.
For example two plugins may define different types for flexible fields.
"""
class PluginImportError(ImportError):
"""Indicates that a plugin could not be imported.
This is a subclass of ImportError so that it can be caught separately
from other errors.
"""
def __init__(self, name: str):
super().__init__(f"Could not import plugin {name}")
class PluginLogFilter(logging.Filter):
"""A logging filter that identifies the plugin that emitted a log
message.
"""
def __init__(self, plugin):
self.prefix = f"{plugin.name}: "
def filter(self, record):
if hasattr(record.msg, "msg") and isinstance(record.msg.msg, str):
# A _LogMessage from our hacked-up Logging replacement.
record.msg.msg = f"{self.prefix}{record.msg.msg}"
elif isinstance(record.msg, str):
record.msg = f"{self.prefix}{record.msg}"
return True
# Managing the plugins themselves.
class BeetsPlugin(metaclass=abc.ABCMeta):
"""The base class for all beets plugins. Plugins provide
functionality by defining a subclass of BeetsPlugin and overriding
the abstract methods defined here.
"""
_raw_listeners: ClassVar[dict[EventType, list[Listener]]] = defaultdict(
list
)
listeners: ClassVar[dict[EventType, list[Listener]]] = defaultdict(list)
template_funcs: TFuncMap[str] | None = None
template_fields: TFuncMap[Item] | None = None
album_template_fields: TFuncMap[Album] | None = None
name: str
config: ConfigView
early_import_stages: list[ImportStageFunc]
import_stages: list[ImportStageFunc]
def __init_subclass__(cls) -> None:
"""Enable legacy metadataâ€source plugins to work with the new interface.
When a plugin subclass of BeetsPlugin defines a `data_source` attribute
but does not inherit from MetadataSourcePlugin, this hook:
1. Skips abstract classes.
2. Warns that the class should extend MetadataSourcePlugin (deprecation).
3. Copies any nonabstract methods from MetadataSourcePlugin onto the
subclass to provide the full plugin API.
This compatibility layer will be removed in the v3.0.0 release.
"""
# TODO: Remove in v3.0.0
if inspect.isabstract(cls):
return
from beets.metadata_plugins import MetadataSourcePlugin
if issubclass(cls, MetadataSourcePlugin) or not hasattr(
cls, "data_source"
):
return
warnings.warn(
f"{cls.__name__} is used as a legacy metadata source. "
"It should extend MetadataSourcePlugin instead of BeetsPlugin. "
"Support for this will be removed in the v3.0.0 release!",
DeprecationWarning,
stacklevel=3,
)
method: property | cached_property[Any] | Callable[..., Any]
for name, method in inspect.getmembers(
MetadataSourcePlugin,
predicate=lambda f: ( # type: ignore[arg-type]
(
isinstance(f, (property, cached_property))
and not hasattr(
BeetsPlugin,
getattr(f, "attrname", None) or f.fget.__name__, # type: ignore[union-attr]
)
)
or (
inspect.isfunction(f)
and f.__name__
and not getattr(f, "__isabstractmethod__", False)
and not hasattr(BeetsPlugin, f.__name__)
)
),
):
setattr(cls, name, method)
def __init__(self, name: str | None = None):
"""Perform one-time plugin setup."""
self.name = name or self.__module__.split(".")[-1]
self.config = beets.config[self.name]
# Set class attributes if they are not already set
# for the type of plugin.
if not self.template_funcs:
self.template_funcs = {}
if not self.template_fields:
self.template_fields = {}
if not self.album_template_fields:
self.album_template_fields = {}
self.early_import_stages = []
self.import_stages = []
self._log = log.getChild(self.name)
self._log.setLevel(logging.NOTSET) # Use `beets` logger level.
if not any(isinstance(f, PluginLogFilter) for f in self._log.filters):
self._log.addFilter(PluginLogFilter(self))
# In order to verify the config we need to make sure the plugin is fully
# configured (plugins usually add the default configuration *after*
# calling super().__init__()).
self.register_listener("pluginload", self._verify_config)
def _verify_config(self, *_, **__) -> None:
"""Verify plugin configuration.
If deprecated 'source_weight' option is explicitly set by the user, they
will see a warning in the logs. Otherwise, this must be configured by
a third party plugin, thus we raise a deprecation warning which won't be
shown to user but will be visible to plugin developers.
"""
# TODO: Remove in v3.0.0
if (
not hasattr(self, "data_source")
or "source_weight" not in self.config
):
return
message = (
"'source_weight' configuration option is deprecated and will be"
" removed in v3.0.0. Use 'data_source_mismatch_penalty' instead"
)
for source in self.config.root().sources:
if "source_weight" in (source.get(self.name) or {}):
if source.filename: # user config
self._log.warning(message)
else: # 3rd-party plugin config
warnings.warn(message, DeprecationWarning, stacklevel=0)
def commands(self) -> Sequence[Subcommand]:
"""Should return a list of beets.ui.Subcommand objects for
commands that should be added to beets' CLI.
"""
return ()
def _set_stage_log_level(
self,
stages: list[ImportStageFunc],
) -> list[ImportStageFunc]:
"""Adjust all the stages in `stages` to WARNING logging level."""
return [
self._set_log_level_and_params(logging.WARNING, stage)
for stage in stages
]
def get_early_import_stages(self) -> list[ImportStageFunc]:
"""Return a list of functions that should be called as importer
pipelines stages early in the pipeline.
The callables are wrapped versions of the functions in
`self.early_import_stages`. Wrapping provides some bookkeeping for the
plugin: specifically, the logging level is adjusted to WARNING.
"""
return self._set_stage_log_level(self.early_import_stages)
def get_import_stages(self) -> list[ImportStageFunc]:
"""Return a list of functions that should be called as importer
pipelines stages.
The callables are wrapped versions of the functions in
`self.import_stages`. Wrapping provides some bookkeeping for the
plugin: specifically, the logging level is adjusted to WARNING.
"""
return self._set_stage_log_level(self.import_stages)
def _set_log_level_and_params(
self,
base_log_level: int,
func: Callable[P, Ret],
) -> Callable[P, Ret]:
"""Wrap `func` to temporarily set this plugin's logger level to
`base_log_level` + config options (and restore it to its previous
value after the function returns). Also determines which params may not
be sent for backwards-compatibility.
"""
argspec = inspect.getfullargspec(func)
@wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> Ret:
assert self._log.level == logging.NOTSET
verbosity = beets.config["verbose"].get(int)
log_level = max(logging.DEBUG, base_log_level - 10 * verbosity)
self._log.setLevel(log_level)
if argspec.varkw is None:
kwargs = {k: v for k, v in kwargs.items() if k in argspec.args} # type: ignore[assignment]
try:
return func(*args, **kwargs)
finally:
self._log.setLevel(logging.NOTSET)
return wrapper
def queries(self) -> dict[str, type[Query]]:
"""Return a dict mapping prefixes to Query subclasses."""
return {}
def add_media_field(
self, name: str, descriptor: mediafile.MediaField
) -> None:
"""Add a field that is synchronized between media files and items.
When a media field is added ``item.write()`` will set the name
property of the item's MediaFile to ``item[name]`` and save the
changes. Similarly ``item.read()`` will set ``item[name]`` to
the value of the name property of the media file.
"""
# Defer import to prevent circular dependency
from beets import library
mediafile.MediaFile.add_field(name, descriptor)
library.Item._media_fields.add(name)
def register_listener(self, event: EventType, func: Listener) -> None:
"""Add a function as a listener for the specified event."""
if func not in self._raw_listeners[event]:
self._raw_listeners[event].append(func)
self.listeners[event].append(
self._set_log_level_and_params(logging.WARNING, func)
)
@classmethod
def template_func(cls, name: str) -> Callable[[TFunc[str]], TFunc[str]]:
"""Decorator that registers a path template function. The
function will be invoked as ``%name{}`` from path format
strings.
"""
def helper(func: TFunc[str]) -> TFunc[str]:
if cls.template_funcs is None:
cls.template_funcs = {}
cls.template_funcs[name] = func
return func
return helper
@classmethod
def template_field(cls, name: str) -> Callable[[TFunc[Item]], TFunc[Item]]:
"""Decorator that registers a path template field computation.
The value will be referenced as ``$name`` from path format
strings. The function must accept a single parameter, the Item
being formatted.
"""
def helper(func: TFunc[Item]) -> TFunc[Item]:
if cls.template_fields is None:
cls.template_fields = {}
cls.template_fields[name] = func
return func
return helper
def get_plugin_names() -> list[str]:
"""Discover and return the set of plugin names to be loaded.
Configures the plugin search paths and resolves the final set of plugins
based on configuration settings, inclusion filters, and exclusion rules.
Automatically includes the musicbrainz plugin when enabled in configuration.
"""
paths = [
str(Path(p).expanduser().absolute())
for p in beets.config["pluginpath"].as_str_seq(split=False)
]
log.debug("plugin paths: {}", paths)
# Extend the `beetsplug` package to include the plugin paths.
import beetsplug
beetsplug.__path__ = paths + list(beetsplug.__path__)
# For backwards compatibility, also support plugin paths that
# *contain* a `beetsplug` package.
sys.path += paths
plugins = unique_list(beets.config["plugins"].as_str_seq())
# TODO: Remove in v3.0.0
if (
"musicbrainz" not in plugins
and "musicbrainz" in beets.config
and beets.config["musicbrainz"].get().get("enabled")
):
plugins.append("musicbrainz")
beets.config.add({"disabled_plugins": []})
disabled_plugins = set(beets.config["disabled_plugins"].as_str_seq())
return [p for p in plugins if p not in disabled_plugins]
def _get_plugin(name: str) -> BeetsPlugin | None:
"""Dynamically load and instantiate a plugin class by name.
Attempts to import the plugin module, locate the appropriate plugin class
within it, and return an instance. Handles import failures gracefully and
logs warnings for missing plugins or loading errors.
Note we load the *last* plugin class found in the plugin namespace. This
allows plugins to define helper classes that inherit from BeetsPlugin
without those being loaded as the main plugin class.
Returns None if the plugin could not be loaded for any reason.
"""
try:
try:
namespace = import_module(f"{PLUGIN_NAMESPACE}.{name}")
except Exception as exc:
raise PluginImportError(name) from exc
for obj in reversed(namespace.__dict__.values()):
if (
inspect.isclass(obj)
and not isinstance(
obj, GenericAlias
) # seems to be needed for python <= 3.9 only
and issubclass(obj, BeetsPlugin)
and obj != BeetsPlugin
and not inspect.isabstract(obj)
# Only consider this plugin's module or submodules to avoid
# conflicts when plugins import other BeetsPlugin classes
and (
obj.__module__ == namespace.__name__
or obj.__module__.startswith(f"{namespace.__name__}.")
)
):
return obj()
except Exception:
log.warning("** error loading plugin {}", name, exc_info=True)
return None
_instances: list[BeetsPlugin] = []
def load_plugins() -> None:
"""Initialize the plugin system by loading all configured plugins.
Performs one-time plugin discovery and instantiation, storing loaded plugin
instances globally. Emits a pluginload event after successful initialization
to notify other components.
"""
if not _instances:
names = get_plugin_names()
log.debug("Loading plugins: {}", ", ".join(sorted(names)))
_instances.extend(filter(None, map(_get_plugin, names)))
send("pluginload")
def find_plugins() -> Iterable[BeetsPlugin]:
return _instances
# Communication with plugins.
def commands() -> list[Subcommand]:
"""Returns a list of Subcommand objects from all loaded plugins."""
out: list[Subcommand] = []
for plugin in find_plugins():
out += plugin.commands()
return out
def queries() -> dict[str, type[Query]]:
"""Returns a dict mapping prefix strings to Query subclasses all loaded
plugins.
"""
out: dict[str, type[Query]] = {}
for plugin in find_plugins():
out.update(plugin.queries())
return out
def types(model_cls: type[AnyModel]) -> dict[str, Type]:
"""Return mapping between flex field names and types for the given model."""
attr_name = f"{model_cls.__name__.lower()}_types"
types: dict[str, Type] = {}
for plugin in find_plugins():
plugin_types = getattr(plugin, attr_name, {})
for field in plugin_types:
if field in types and plugin_types[field] != types[field]:
raise PluginConflictError(
f"Plugin {plugin.name} defines flexible field {field} "
"which has already been defined with "
"another type."
)
types.update(plugin_types)
return types
def named_queries(model_cls: type[AnyModel]) -> dict[str, FieldQueryType]:
"""Return mapping between field names and queries for the given model."""
attr_name = f"{model_cls.__name__.lower()}_queries"
return {
field: query
for plugin in find_plugins()
for field, query in getattr(plugin, attr_name, {}).items()
}
def notify_info_yielded(
event: EventType,
) -> Callable[[IterF[P, Ret]], IterF[P, Ret]]:
"""Makes a generator send the event 'event' every time it yields.
This decorator is supposed to decorate a generator, but any function
returning an iterable should work.
Each yielded value is passed to plugins using the 'info' parameter of
'send'.
"""
def decorator(func: IterF[P, Ret]) -> IterF[P, Ret]:
@wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> Iterable[Ret]:
for v in func(*args, **kwargs):
send(event, info=v)
yield v
return wrapper
return decorator
def template_funcs() -> TFuncMap[str]:
"""Get all the template functions declared by plugins as a
dictionary.
"""
funcs: TFuncMap[str] = {}
for plugin in find_plugins():
if plugin.template_funcs:
funcs.update(plugin.template_funcs)
return funcs
def early_import_stages() -> list[ImportStageFunc]:
"""Get a list of early import stage functions defined by plugins."""
stages: list[ImportStageFunc] = []
for plugin in find_plugins():
stages += plugin.get_early_import_stages()
return stages
def import_stages() -> list[ImportStageFunc]:
"""Get a list of import stage functions defined by plugins."""
stages: list[ImportStageFunc] = []
for plugin in find_plugins():
stages += plugin.get_import_stages()
return stages
# New-style (lazy) plugin-provided fields.
F = TypeVar("F")
def _check_conflicts_and_merge(
plugin: BeetsPlugin, plugin_funcs: dict[str, F] | None, funcs: dict[str, F]
) -> None:
"""Check the provided template functions for conflicts and merge into funcs.
Raises a `PluginConflictError` if a plugin defines template functions
for fields that another plugin has already defined template functions for.
"""
if plugin_funcs:
if not plugin_funcs.keys().isdisjoint(funcs.keys()):
conflicted_fields = ", ".join(plugin_funcs.keys() & funcs.keys())
raise PluginConflictError(
f"Plugin {plugin.name} defines template functions for "
f"{conflicted_fields} that conflict with another plugin."
)
funcs.update(plugin_funcs)
def item_field_getters() -> TFuncMap[Item]:
"""Get a dictionary mapping field names to unary functions that
compute the field's value.
"""
funcs: TFuncMap[Item] = {}
for plugin in find_plugins():
_check_conflicts_and_merge(plugin, plugin.template_fields, funcs)
return funcs
def album_field_getters() -> TFuncMap[Album]:
"""As above, for album fields."""
funcs: TFuncMap[Album] = {}
for plugin in find_plugins():
_check_conflicts_and_merge(plugin, plugin.album_template_fields, funcs)
return funcs
# Event dispatch.
def send(event: EventType, **arguments: Any) -> list[Any]:
"""Send an event to all assigned event listeners.
`event` is the name of the event to send, all other named arguments
are passed along to the handlers.
Return a list of non-None values returned from the handlers.
"""
log.debug("Sending event: {}", event)
return [
r
for handler in BeetsPlugin.listeners[event]
if (r := handler(**arguments)) is not None
]
def feat_tokens(for_artist: bool = True) -> str:
"""Return a regular expression that matches phrases like "featuring"
that separate a main artist or a song title from secondary artists.
The `for_artist` option determines whether the regex should be
suitable for matching artist fields (the default) or title fields.
"""
feat_words = ["ft", "featuring", "feat", "feat.", "ft."]
if for_artist:
feat_words += ["with", "vs", "and", "con", "&"]
return (
rf"(?<=[\s(\[])(?:{'|'.join(re.escape(x) for x in feat_words)})(?=\s)"
)
def apply_item_changes(
lib: Library, item: Item, move: bool, pretend: bool, write: bool
) -> None:
"""Store, move, and write the item according to the arguments.
:param lib: beets library.
:param item: Item whose changes to apply.
:param move: Move the item if it's in the library.
:param pretend: Return without moving, writing, or storing the item's
metadata.
:param write: Write the item's metadata to its media file.
"""
if pretend:
return
from beets import util
# Move the item if it's in the library.
if move and lib.directory in util.ancestry(item.path):
item.move(with_album=False)
if write:
item.try_write()
item.store()
beetbox-beets-c1877b7/beets/py.typed 0000664 0000000 0000000 00000000000 15073551743 0017407 0 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beets/random.py 0000664 0000000 0000000 00000007114 15073551743 0017557 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Philippe Mongeau.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Get a random song or album from the library."""
import random
from itertools import groupby
from operator import attrgetter
def _length(obj, album):
"""Get the duration of an item or album."""
if album:
return sum(i.length for i in obj.items())
else:
return obj.length
def _equal_chance_permutation(objs, field="albumartist", random_gen=None):
"""Generate (lazily) a permutation of the objects where every group
with equal values for `field` have an equal chance of appearing in
any given position.
"""
rand = random_gen or random
# Group the objects by artist so we can sample from them.
key = attrgetter(field)
objs.sort(key=key)
objs_by_artists = {}
for artist, v in groupby(objs, key):
objs_by_artists[artist] = list(v)
# While we still have artists with music to choose from, pick one
# randomly and pick a track from that artist.
while objs_by_artists:
# Choose an artist and an object for that artist, removing
# this choice from the pool.
artist = rand.choice(list(objs_by_artists.keys()))
objs_from_artist = objs_by_artists[artist]
i = rand.randint(0, len(objs_from_artist) - 1)
yield objs_from_artist.pop(i)
# Remove the artist if we've used up all of its objects.
if not objs_from_artist:
del objs_by_artists[artist]
def _take(iter, num):
"""Return a list containing the first `num` values in `iter` (or
fewer, if the iterable ends early).
"""
out = []
for val in iter:
out.append(val)
num -= 1
if num <= 0:
break
return out
def _take_time(iter, secs, album):
"""Return a list containing the first values in `iter`, which should
be Item or Album objects, that add up to the given amount of time in
seconds.
"""
out = []
total_time = 0.0
for obj in iter:
length = _length(obj, album)
if total_time + length <= secs:
out.append(obj)
total_time += length
return out
def random_objs(
objs, album, number=1, time=None, equal_chance=False, random_gen=None
):
"""Get a random subset of the provided `objs`.
If `number` is provided, produce that many matches. Otherwise, if
`time` is provided, instead select a list whose total time is close
to that number of minutes. If `equal_chance` is true, give each
artist an equal chance of being included so that artists with more
songs are not represented disproportionately.
"""
rand = random_gen or random
# Permute the objects either in a straightforward way or an
# artist-balanced way.
if equal_chance:
perm = _equal_chance_permutation(objs)
else:
perm = objs
rand.shuffle(perm) # N.B. This shuffles the original list.
# Select objects by time our count.
if time:
return _take_time(perm, time * 60, album)
else:
return _take(perm, number)
beetbox-beets-c1877b7/beets/test/ 0000775 0000000 0000000 00000000000 15073551743 0016701 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beets/test/__init__.py 0000664 0000000 0000000 00000001615 15073551743 0021015 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2024, Lars Kruse
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module contains components of beets' test environment, which
may be of use for testing procedures of external libraries or programs.
For example the 'TestHelper' class may be useful for creating an
in-memory beets library filled with a few example items.
"""
beetbox-beets-c1877b7/beets/test/_common.py 0000664 0000000 0000000 00000013253 15073551743 0020706 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Some common functionality for beets' test cases."""
import os
import sys
import unittest
from contextlib import contextmanager
import beets
import beets.library
# Make sure the development versions of the plugins are used
import beetsplug
from beets import importer, logging, util
from beets.ui import commands
from beets.util import syspath
beetsplug.__path__ = [
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.path.pardir,
os.path.pardir,
"beetsplug",
)
)
]
# Test resources path.
RSRC = util.bytestring_path(
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.path.pardir,
os.path.pardir,
"test",
"rsrc",
)
)
)
PLUGINPATH = os.path.join(RSRC.decode(), "beetsplug")
# Propagate to root logger so the test runner can capture it
log = logging.getLogger("beets")
log.propagate = True
log.setLevel(logging.DEBUG)
# OS feature test.
HAVE_SYMLINK = sys.platform != "win32"
HAVE_HARDLINK = sys.platform != "win32"
def item(lib=None, **kwargs):
defaults = dict(
title="the title",
artist="the artist",
albumartist="the album artist",
album="the album",
genre="the genre",
lyricist="the lyricist",
composer="the composer",
arranger="the arranger",
grouping="the grouping",
work="the work title",
mb_workid="the work musicbrainz id",
work_disambig="the work disambiguation",
year=1,
month=2,
day=3,
track=4,
tracktotal=5,
disc=6,
disctotal=7,
lyrics="the lyrics",
comments="the comments",
bpm=8,
comp=True,
length=60.0,
bitrate=128000,
format="FLAC",
mb_trackid="someID-1",
mb_albumid="someID-2",
mb_artistid="someID-3",
mb_albumartistid="someID-4",
mb_releasetrackid="someID-5",
album_id=None,
mtime=12345,
)
i = beets.library.Item(**{**defaults, **kwargs})
if lib:
lib.add(i)
return i
# Dummy import session.
def import_session(lib=None, loghandler=None, paths=[], query=[], cli=False):
cls = commands.TerminalImportSession if cli else importer.ImportSession
return cls(lib, loghandler, paths, query)
# Mock I/O.
class InputError(Exception):
def __init__(self, output=None):
self.output = output
def __str__(self):
msg = "Attempt to read with no input provided."
if self.output is not None:
msg += f" Output: {self.output!r}"
return msg
class DummyOut:
encoding = "utf-8"
def __init__(self):
self.buf = []
def write(self, s):
self.buf.append(s)
def get(self):
return "".join(self.buf)
def flush(self):
self.clear()
def clear(self):
self.buf = []
class DummyIn:
encoding = "utf-8"
def __init__(self, out=None):
self.buf = []
self.reads = 0
self.out = out
def add(self, s):
self.buf.append(f"{s}\n")
def close(self):
pass
def readline(self):
if not self.buf:
if self.out:
raise InputError(self.out.get())
else:
raise InputError()
self.reads += 1
return self.buf.pop(0)
class DummyIO:
"""Mocks input and output streams for testing UI code."""
def __init__(self):
self.stdout = DummyOut()
self.stdin = DummyIn(self.stdout)
def addinput(self, s):
self.stdin.add(s)
def getoutput(self):
res = self.stdout.get()
self.stdout.clear()
return res
def readcount(self):
return self.stdin.reads
def install(self):
sys.stdin = self.stdin
sys.stdout = self.stdout
def restore(self):
sys.stdin = sys.__stdin__
sys.stdout = sys.__stdout__
# Utility.
def touch(path):
open(syspath(path), "a").close()
class Bag:
"""An object that exposes a set of fields given as keyword
arguments. Any field not found in the dictionary appears to be None.
Used for mocking Album objects and the like.
"""
def __init__(self, **fields):
self.fields = fields
def __getattr__(self, key):
return self.fields.get(key)
# Platform mocking.
@contextmanager
def platform_windows():
import ntpath
old_path = os.path
try:
os.path = ntpath
yield
finally:
os.path = old_path
@contextmanager
def platform_posix():
import posixpath
old_path = os.path
try:
os.path = posixpath
yield
finally:
os.path = old_path
@contextmanager
def system_mock(name):
import platform
old_system = platform.system
platform.system = lambda: name
try:
yield
finally:
platform.system = old_system
def slow_test(unused=None):
def _id(obj):
return obj
if "SKIP_SLOW_TESTS" in os.environ:
return unittest.skip("test is slow")
return _id
beetbox-beets-c1877b7/beets/test/helper.py 0000664 0000000 0000000 00000066530 15073551743 0020544 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module includes various helpers that provide fixtures, capture
information or mock the environment.
- The `control_stdin` and `capture_stdout` context managers allow one to
interact with the user interface.
- `has_program` checks the presence of a command on the system.
- The `ImportSessionFixture` allows one to run importer code while
controlling the interactions through code.
- The `TestHelper` class encapsulates various fixtures that can be set up.
"""
from __future__ import annotations
import os
import os.path
import shutil
import subprocess
import sys
import unittest
from contextlib import contextmanager
from dataclasses import dataclass
from enum import Enum
from functools import cached_property
from io import StringIO
from pathlib import Path
from tempfile import gettempdir, mkdtemp, mkstemp
from typing import Any, ClassVar
from unittest.mock import patch
import responses
from mediafile import Image, MediaFile
import beets
import beets.plugins
from beets import importer, logging, util
from beets.autotag.hooks import AlbumInfo, TrackInfo
from beets.importer import ImportSession
from beets.library import Item, Library
from beets.test import _common
from beets.ui.commands import TerminalImportSession
from beets.util import (
MoveOperation,
bytestring_path,
clean_module_tempdir,
syspath,
)
class LogCapture(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.messages = []
def emit(self, record):
self.messages.append(str(record.msg))
@contextmanager
def capture_log(logger="beets"):
capture = LogCapture()
log = logging.getLogger(logger)
log.addHandler(capture)
try:
yield capture.messages
finally:
log.removeHandler(capture)
@contextmanager
def control_stdin(input=None):
"""Sends ``input`` to stdin.
>>> with control_stdin('yes'):
... input()
'yes'
"""
org = sys.stdin
sys.stdin = StringIO(input)
try:
yield sys.stdin
finally:
sys.stdin = org
@contextmanager
def capture_stdout():
"""Save stdout in a StringIO.
>>> with capture_stdout() as output:
... print('spam')
...
>>> output.getvalue()
'spam'
"""
org = sys.stdout
sys.stdout = capture = StringIO()
try:
yield sys.stdout
finally:
sys.stdout = org
print(capture.getvalue())
def has_program(cmd, args=["--version"]):
"""Returns `True` if `cmd` can be executed."""
full_cmd = [cmd] + args
try:
with open(os.devnull, "wb") as devnull:
subprocess.check_call(
full_cmd, stderr=devnull, stdout=devnull, stdin=devnull
)
except OSError:
return False
except subprocess.CalledProcessError:
return False
else:
return True
def check_reflink_support(path: str) -> bool:
try:
import reflink
except ImportError:
return False
return reflink.supported_at(path)
class ConfigMixin:
@cached_property
def config(self) -> beets.IncludeLazyConfig:
"""Base beets configuration for tests."""
config = beets.config
config.sources = []
config.read(user=False, defaults=True)
config["plugins"] = []
config["verbose"] = 1
config["ui"]["color"] = False
config["threaded"] = False
return config
NEEDS_REFLINK = unittest.skipUnless(
check_reflink_support(gettempdir()), "no reflink support for libdir"
)
class IOMixin:
@cached_property
def io(self) -> _common.DummyIO:
return _common.DummyIO()
def setUp(self):
super().setUp()
self.io.install()
def tearDown(self):
super().tearDown()
self.io.restore()
class TestHelper(ConfigMixin):
"""Helper mixin for high-level cli and plugin tests.
This mixin provides methods to isolate beets' global state provide
fixtures.
"""
resource_path = Path(os.fsdecode(_common.RSRC)) / "full.mp3"
db_on_disk: ClassVar[bool] = False
@cached_property
def temp_dir_path(self) -> Path:
return Path(self.create_temp_dir())
@cached_property
def temp_dir(self) -> bytes:
return util.bytestring_path(self.temp_dir_path)
@cached_property
def lib_path(self) -> Path:
lib_path = self.temp_dir_path / "libdir"
lib_path.mkdir(exist_ok=True)
return lib_path
@cached_property
def libdir(self) -> bytes:
return bytestring_path(self.lib_path)
# TODO automate teardown through hook registration
def setup_beets(self):
"""Setup pristine global configuration and library for testing.
Sets ``beets.config`` so we can safely use any functionality
that uses the global configuration. All paths used are
contained in a temporary directory
Sets the following properties on itself.
- ``temp_dir`` Path to a temporary directory containing all
files specific to beets
- ``libdir`` Path to a subfolder of ``temp_dir``, containing the
library's media files. Same as ``config['directory']``.
- ``lib`` Library instance created with the settings from
``config``.
Make sure you call ``teardown_beets()`` afterwards.
"""
temp_dir_str = str(self.temp_dir_path)
self.env_patcher = patch.dict(
"os.environ",
{
"BEETSDIR": temp_dir_str,
"HOME": temp_dir_str, # used by Confuse to create directories.
},
)
self.env_patcher.start()
self.config["directory"] = str(self.lib_path)
if self.db_on_disk:
dbpath = util.bytestring_path(self.config["library"].as_filename())
else:
dbpath = ":memory:"
self.lib = Library(dbpath, self.libdir)
def teardown_beets(self):
self.env_patcher.stop()
self.lib._close()
self.remove_temp_dir()
# Library fixtures methods
def create_item(self, **values):
"""Return an `Item` instance with sensible default values.
The item receives its attributes from `**values` paratmeter. The
`title`, `artist`, `album`, `track`, `format` and `path`
attributes have defaults if they are not given as parameters.
The `title` attribute is formatted with a running item count to
prevent duplicates. The default for the `path` attribute
respects the `format` value.
The item is attached to the database from `self.lib`.
"""
values_ = {
"title": "t\u00eftle {}",
"artist": "the \u00e4rtist",
"album": "the \u00e4lbum",
"track": 1,
"format": "MP3",
}
values_.update(values)
values_["title"] = values_["title"].format(1)
values_["db"] = self.lib
item = Item(**values_)
if "path" not in values:
item["path"] = f"audio.{item['format'].lower()}"
# mtime needs to be set last since other assignments reset it.
item.mtime = 12345
return item
def add_item(self, **values):
"""Add an item to the library and return it.
Creates the item by passing the parameters to `create_item()`.
If `path` is not set in `values` it is set to `item.destination()`.
"""
# When specifying a path, store it normalized (as beets does
# ordinarily).
if "path" in values:
values["path"] = util.normpath(values["path"])
item = self.create_item(**values)
item.add(self.lib)
# Ensure every item has a path.
if "path" not in values:
item["path"] = item.destination()
item.store()
return item
def add_item_fixture(self, **values):
"""Add an item with an actual audio file to the library."""
item = self.create_item(**values)
extension = item["format"].lower()
item["path"] = os.path.join(
_common.RSRC, util.bytestring_path(f"min.{extension}")
)
item.add(self.lib)
item.move(operation=MoveOperation.COPY)
item.store()
return item
def add_album(self, **values):
item = self.add_item(**values)
return self.lib.add_album([item])
def add_item_fixtures(self, ext="mp3", count=1):
"""Add a number of items with files to the database."""
# TODO base this on `add_item()`
items = []
path = os.path.join(_common.RSRC, util.bytestring_path(f"full.{ext}"))
for i in range(count):
item = Item.from_path(path)
item.album = f"\u00e4lbum {i}" # Check unicode paths
item.title = f"t\u00eftle {i}"
# mtime needs to be set last since other assignments reset it.
item.mtime = 12345
item.add(self.lib)
item.move(operation=MoveOperation.COPY)
item.store()
items.append(item)
return items
def add_album_fixture(
self,
track_count=1,
fname="full",
ext="mp3",
disc_count=1,
):
"""Add an album with files to the database."""
items = []
path = os.path.join(
_common.RSRC,
util.bytestring_path(f"{fname}.{ext}"),
)
for discnumber in range(1, disc_count + 1):
for i in range(track_count):
item = Item.from_path(path)
item.album = "\u00e4lbum" # Check unicode paths
item.title = f"t\u00eftle {i}"
item.disc = discnumber
# mtime needs to be set last since other assignments reset it.
item.mtime = 12345
item.add(self.lib)
item.move(operation=MoveOperation.COPY)
item.store()
items.append(item)
return self.lib.add_album(items)
def create_mediafile_fixture(self, ext="mp3", images=[]):
"""Copy a fixture mediafile with the extension to `temp_dir`.
`images` is a subset of 'png', 'jpg', and 'tiff'. For each
specified extension a cover art image is added to the media
file.
"""
src = os.path.join(_common.RSRC, util.bytestring_path(f"full.{ext}"))
handle, path = mkstemp(dir=self.temp_dir)
path = bytestring_path(path)
os.close(handle)
shutil.copyfile(syspath(src), syspath(path))
if images:
mediafile = MediaFile(path)
imgs = []
for img_ext in images:
file = util.bytestring_path(f"image-2x3.{img_ext}")
img_path = os.path.join(_common.RSRC, file)
with open(img_path, "rb") as f:
imgs.append(Image(f.read()))
mediafile.images = imgs
mediafile.save()
return path
# Running beets commands
def run_command(self, *args, **kwargs):
"""Run a beets command with an arbitrary amount of arguments. The
Library` defaults to `self.lib`, but can be overridden with
the keyword argument `lib`.
"""
sys.argv = ["beet"] # avoid leakage from test suite args
lib = None
if hasattr(self, "lib"):
lib = self.lib
lib = kwargs.get("lib", lib)
beets.ui._raw_main(list(args), lib)
def run_with_output(self, *args):
with capture_stdout() as out:
self.run_command(*args)
return out.getvalue()
# Safe file operations
def create_temp_dir(self, **kwargs) -> str:
return mkdtemp(**kwargs)
def remove_temp_dir(self):
"""Delete the temporary directory created by `create_temp_dir`."""
shutil.rmtree(self.temp_dir_path)
def touch(self, path, dir=None, content=""):
"""Create a file at `path` with given content.
If `dir` is given, it is prepended to `path`. After that, if the
path is relative, it is resolved with respect to
`self.temp_dir`.
"""
if dir:
path = os.path.join(dir, path)
if not os.path.isabs(path):
path = os.path.join(self.temp_dir, path)
parent = os.path.dirname(path)
if not os.path.isdir(syspath(parent)):
os.makedirs(syspath(parent))
with open(syspath(path), "a+") as f:
f.write(content)
return path
# A test harness for all beets tests.
# Provides temporary, isolated configuration.
class BeetsTestCase(unittest.TestCase, TestHelper):
"""A unittest.TestCase subclass that saves and restores beets'
global configuration. This allows tests to make temporary
modifications that will then be automatically removed when the test
completes. Also provides some additional assertion methods, a
temporary directory, and a DummyIO.
"""
def setUp(self):
self.setup_beets()
def tearDown(self):
self.teardown_beets()
class ItemInDBTestCase(BeetsTestCase):
"""A test case that includes an in-memory library object (`lib`) and
an item added to the library (`i`).
"""
def setUp(self):
super().setUp()
self.i = _common.item(self.lib)
class PluginMixin(ConfigMixin):
plugin: ClassVar[str]
preload_plugin: ClassVar[bool] = True
def setup_beets(self):
super().setup_beets()
if self.preload_plugin:
self.load_plugins()
def teardown_beets(self):
super().teardown_beets()
self.unload_plugins()
def register_plugin(
self, plugin_class: type[beets.plugins.BeetsPlugin]
) -> None:
beets.plugins._instances.append(plugin_class())
def load_plugins(self, *plugins: str) -> None:
"""Load and initialize plugins by names.
Similar setting a list of plugins in the configuration. Make
sure you call ``unload_plugins()`` afterwards.
"""
# FIXME this should eventually be handled by a plugin manager
plugins = (self.plugin,) if hasattr(self, "plugin") else plugins
self.config["plugins"] = plugins
beets.plugins.load_plugins()
def unload_plugins(self) -> None:
"""Unload all plugins and remove them from the configuration."""
# FIXME this should eventually be handled by a plugin manager
beets.plugins.BeetsPlugin.listeners.clear()
beets.plugins.BeetsPlugin._raw_listeners.clear()
self.config["plugins"] = []
beets.plugins._instances.clear()
@contextmanager
def configure_plugin(self, config: Any):
self.config[self.plugin].set(config)
self.load_plugins(self.plugin)
yield
self.unload_plugins()
class PluginTestCase(PluginMixin, BeetsTestCase):
pass
class ImportHelper(TestHelper):
"""Provides tools to setup a library, a directory containing files that are
to be imported and an import session. The class also provides stubs for the
autotagging library and several assertions for the library.
"""
default_import_config = {
"autotag": True,
"copy": True,
"hardlink": False,
"link": False,
"move": False,
"resume": False,
"singletons": False,
"timid": True,
}
lib: Library
importer: ImportSession
@cached_property
def import_path(self) -> Path:
import_path = self.temp_dir_path / "import"
import_path.mkdir(exist_ok=True)
return import_path
@cached_property
def import_dir(self) -> bytes:
return bytestring_path(self.import_path)
def setUp(self):
super().setUp()
self.import_media = []
self.lib.path_formats = [
("default", os.path.join("$artist", "$album", "$title")),
("singleton:true", os.path.join("singletons", "$title")),
("comp:true", os.path.join("compilations", "$album", "$title")),
]
def prepare_track_for_import(
self,
track_id: int,
album_path: Path,
album_id: int | None = None,
) -> Path:
track_path = album_path / f"track_{track_id}.mp3"
shutil.copy(self.resource_path, track_path)
medium = MediaFile(track_path)
medium.update(
{
"album": f"Tag Album{f' {album_id}' if album_id else ''}",
"albumartist": None,
"mb_albumid": None,
"comp": None,
"artist": "Tag Artist",
"title": f"Tag Track {track_id}",
"track": track_id,
"mb_trackid": None,
}
)
medium.save()
self.import_media.append(medium)
return track_path
def prepare_album_for_import(
self,
item_count: int,
album_id: int | None = None,
album_path: Path | None = None,
) -> list[Path]:
"""Create an album directory with media files to import.
The directory has following layout
album/
track_1.mp3
track_2.mp3
track_3.mp3
"""
if not album_path:
album_dir = f"album_{album_id}" if album_id else "album"
album_path = self.import_path / album_dir
album_path.mkdir(exist_ok=True)
return [
self.prepare_track_for_import(tid, album_path, album_id=album_id)
for tid in range(1, item_count + 1)
]
def prepare_albums_for_import(self, count: int = 1) -> None:
album_dirs = self.import_path.glob("album_*")
base_idx = int(str(max(album_dirs, default="0")).split("_")[-1]) + 1
for album_id in range(base_idx, count + base_idx):
self.prepare_album_for_import(1, album_id=album_id)
def _get_import_session(self, import_dir: bytes) -> ImportSession:
return ImportSessionFixture(
self.lib,
loghandler=None,
query=None,
paths=[import_dir],
)
def setup_importer(
self, import_dir: bytes | None = None, **kwargs
) -> ImportSession:
self.config["import"].set_args({**self.default_import_config, **kwargs})
self.importer = self._get_import_session(import_dir or self.import_dir)
return self.importer
def setup_singleton_importer(self, **kwargs) -> ImportSession:
return self.setup_importer(singletons=True, **kwargs)
class AsIsImporterMixin:
def setUp(self):
super().setUp()
self.prepare_album_for_import(1)
def run_asis_importer(self, **kwargs):
importer = self.setup_importer(autotag=False, **kwargs)
importer.run()
return importer
class ImportTestCase(ImportHelper, BeetsTestCase):
pass
class ImportSessionFixture(ImportSession):
"""ImportSession that can be controlled programaticaly.
>>> lib = Library(':memory:')
>>> importer = ImportSessionFixture(lib, paths=['/path/to/import'])
>>> importer.add_choice(importer.Action.SKIP)
>>> importer.add_choice(importer.Action.ASIS)
>>> importer.default_choice = importer.Action.APPLY
>>> importer.run()
This imports ``/path/to/import`` into `lib`. It skips the first
album and imports the second one with metadata from the tags. For the
remaining albums, the metadata from the autotagger will be applied.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._choices = []
self._resolutions = []
default_choice = importer.Action.APPLY
def add_choice(self, choice):
self._choices.append(choice)
def clear_choices(self):
self._choices = []
def choose_match(self, task):
try:
choice = self._choices.pop(0)
except IndexError:
choice = self.default_choice
if choice == importer.Action.APPLY:
return task.candidates[0]
elif isinstance(choice, int):
return task.candidates[choice - 1]
else:
return choice
choose_item = choose_match
Resolution = Enum("Resolution", "REMOVE SKIP KEEPBOTH MERGE")
default_resolution = "REMOVE"
def resolve_duplicate(self, task, found_duplicates):
try:
res = self._resolutions.pop(0)
except IndexError:
res = self.default_resolution
if res == self.Resolution.SKIP:
task.set_choice(importer.Action.SKIP)
elif res == self.Resolution.REMOVE:
task.should_remove_duplicates = True
elif res == self.Resolution.MERGE:
task.should_merge_duplicates = True
class TerminalImportSessionFixture(TerminalImportSession):
def __init__(self, *args, **kwargs):
self.io = kwargs.pop("io")
super().__init__(*args, **kwargs)
self._choices = []
default_choice = importer.Action.APPLY
def add_choice(self, choice):
self._choices.append(choice)
def clear_choices(self):
self._choices = []
def choose_match(self, task):
self._add_choice_input()
return super().choose_match(task)
def choose_item(self, task):
self._add_choice_input()
return super().choose_item(task)
def _add_choice_input(self):
try:
choice = self._choices.pop(0)
except IndexError:
choice = self.default_choice
if choice == importer.Action.APPLY:
self.io.addinput("A")
elif choice == importer.Action.ASIS:
self.io.addinput("U")
elif choice == importer.Action.ALBUMS:
self.io.addinput("G")
elif choice == importer.Action.TRACKS:
self.io.addinput("T")
elif choice == importer.Action.SKIP:
self.io.addinput("S")
else:
self.io.addinput("M")
self.io.addinput(str(choice))
self._add_choice_input()
class TerminalImportMixin(IOMixin, ImportHelper):
"""Provides_a terminal importer for the import session."""
io: _common.DummyIO
def _get_import_session(self, import_dir: bytes) -> importer.ImportSession:
self.io.install()
return TerminalImportSessionFixture(
self.lib,
loghandler=None,
query=None,
io=self.io,
paths=[import_dir],
)
@dataclass
class AutotagStub:
"""Stub out MusicBrainz album and track matcher and control what the
autotagger returns.
"""
NONE = "NONE"
IDENT = "IDENT"
GOOD = "GOOD"
BAD = "BAD"
MISSING = "MISSING"
matching: str
length = 2
def install(self):
self.patchers = [
patch("beets.metadata_plugins.album_for_id", lambda *_: None),
patch("beets.metadata_plugins.track_for_id", lambda *_: None),
patch("beets.metadata_plugins.candidates", self.candidates),
patch(
"beets.metadata_plugins.item_candidates", self.item_candidates
),
]
for p in self.patchers:
p.start()
return self
def restore(self):
for p in self.patchers:
p.stop()
def candidates(self, items, artist, album, va_likely):
if self.matching == self.IDENT:
yield self._make_album_match(artist, album, len(items))
elif self.matching == self.GOOD:
for i in range(self.length):
yield self._make_album_match(artist, album, len(items), i)
elif self.matching == self.BAD:
for i in range(self.length):
yield self._make_album_match(artist, album, len(items), i + 1)
elif self.matching == self.MISSING:
yield self._make_album_match(artist, album, len(items), missing=1)
def item_candidates(self, item, artist, title):
yield TrackInfo(
title=title.replace("Tag", "Applied"),
track_id="trackid",
artist=artist.replace("Tag", "Applied"),
artist_id="artistid",
length=1,
index=0,
)
def _make_track_match(self, artist, album, number):
return TrackInfo(
title=f"Applied Track {number}",
track_id=f"match {number}",
artist=artist,
length=1,
index=0,
)
def _make_album_match(self, artist, album, tracks, distance=0, missing=0):
id = f" {'M' * distance}" if distance else ""
if artist is None:
artist = "Various Artists"
else:
artist = f"{artist.replace('Tag', 'Applied')}{id}"
album = f"{album.replace('Tag', 'Applied')}{id}"
track_infos = []
for i in range(tracks - missing):
track_infos.append(self._make_track_match(artist, album, i + 1))
return AlbumInfo(
artist=artist,
album=album,
tracks=track_infos,
va=False,
album_id=f"albumid{id}",
artist_id=f"artistid{id}",
albumtype="soundtrack",
data_source="match_source",
bandcamp_album_id="bc_url",
)
class AutotagImportTestCase(ImportTestCase):
matching = AutotagStub.IDENT
def setUp(self):
super().setUp()
self.matcher = AutotagStub(self.matching).install()
self.addCleanup(self.matcher.restore)
class FetchImageHelper:
"""Helper mixin for mocking requests when fetching images
with remote art sources.
"""
@responses.activate
def run(self, *args, **kwargs):
super().run(*args, **kwargs)
IMAGEHEADER: dict[str, bytes] = {
"image/jpeg": b"\xff\xd8\xff\x00\x00\x00JFIF",
"image/png": b"\211PNG\r\n\032\n",
"image/gif": b"GIF89a",
# dummy type that is definitely not a valid image content type
"image/watercolour": b"watercolour",
"text/html": (
b"\n\n\n\n"
b"\n\n"
),
}
def mock_response(
self,
url: str,
content_type: str = "image/jpeg",
file_type: None | str = None,
) -> None:
# Potentially return a file of a type that differs from the
# server-advertised content type to mimic misbehaving servers.
if file_type is None:
file_type = content_type
try:
# imghdr reads 32 bytes
header = self.IMAGEHEADER[file_type].ljust(32, b"\x00")
except KeyError:
# If we can't return a file that looks like real file of the requested
# type, better fail the test than returning something else, which might
# violate assumption made when writing a test.
raise AssertionError(f"Mocking {file_type} responses not supported")
responses.add(
responses.GET,
url,
content_type=content_type,
body=header,
)
class CleanupModulesMixin:
modules: ClassVar[tuple[str, ...]]
@classmethod
def tearDownClass(cls) -> None:
"""Remove files created by the plugin."""
for module in cls.modules:
clean_module_tempdir(module)
beetbox-beets-c1877b7/beets/ui/ 0000775 0000000 0000000 00000000000 15073551743 0016337 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beets/ui/__init__.py 0000664 0000000 0000000 00000163071 15073551743 0020460 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module contains all of the core logic for beets' command-line
interface. To invoke the CLI, just call beets.ui.main(). The actual
CLI commands are implemented in the ui.commands module.
"""
from __future__ import annotations
import errno
import optparse
import os.path
import re
import sqlite3
import struct
import sys
import textwrap
import traceback
import warnings
from difflib import SequenceMatcher
from functools import cache
from itertools import chain
from typing import Any, Callable, Literal
import confuse
from beets import config, library, logging, plugins, util
from beets.dbcore import db
from beets.dbcore import query as db_query
from beets.util import as_string
from beets.util.functemplate import template
# On Windows platforms, use colorama to support "ANSI" terminal colors.
if sys.platform == "win32":
try:
import colorama
except ImportError:
pass
else:
colorama.init()
log = logging.getLogger("beets")
if not log.handlers:
log.addHandler(logging.StreamHandler())
log.propagate = False # Don't propagate to root handler.
PF_KEY_QUERIES = {
"comp": "comp:true",
"singleton": "singleton:true",
}
class UserError(Exception):
"""UI exception. Commands should throw this in order to display
nonrecoverable errors to the user.
"""
# Encoding utilities.
def _in_encoding():
"""Get the encoding to use for *inputting* strings from the console."""
return _stream_encoding(sys.stdin)
def _out_encoding():
"""Get the encoding to use for *outputting* strings to the console."""
return _stream_encoding(sys.stdout)
def _stream_encoding(stream, default="utf-8"):
"""A helper for `_in_encoding` and `_out_encoding`: get the stream's
preferred encoding, using a configured override or a default
fallback if neither is not specified.
"""
# Configured override?
encoding = config["terminal_encoding"].get()
if encoding:
return encoding
# For testing: When sys.stdout or sys.stdin is a StringIO under the
# test harness, it doesn't have an `encoding` attribute. Just use
# UTF-8.
if not hasattr(stream, "encoding"):
return default
# Python's guessed output stream encoding, or UTF-8 as a fallback
# (e.g., when piped to a file).
return stream.encoding or default
def decargs(arglist):
"""Given a list of command-line argument bytestrings, attempts to
decode them to Unicode strings when running under Python 2.
.. deprecated:: 2.4.0
This function will be removed in 3.0.0.
"""
warnings.warn(
"decargs() is deprecated and will be removed in version 3.0.0.",
DeprecationWarning,
stacklevel=2,
)
return arglist
def print_(*strings: str, end: str = "\n") -> None:
"""Like print, but rather than raising an error when a character
is not in the terminal's encoding's character set, just silently
replaces it.
The `end` keyword argument behaves similarly to the built-in `print`
(it defaults to a newline).
"""
txt = f"{' '.join(strings or ('',))}{end}"
# Encode the string and write it to stdout.
# On Python 3, sys.stdout expects text strings and uses the
# exception-throwing encoding error policy. To avoid throwing
# errors and use our configurable encoding override, we use the
# underlying bytes buffer instead.
if hasattr(sys.stdout, "buffer"):
out = txt.encode(_out_encoding(), "replace")
sys.stdout.buffer.write(out)
sys.stdout.buffer.flush()
else:
# In our test harnesses (e.g., DummyOut), sys.stdout.buffer
# does not exist. We instead just record the text string.
sys.stdout.write(txt)
# Configuration wrappers.
def _bool_fallback(a, b):
"""Given a boolean or None, return the original value or a fallback."""
if a is None:
assert isinstance(b, bool)
return b
else:
assert isinstance(a, bool)
return a
def should_write(write_opt=None):
"""Decide whether a command that updates metadata should also write
tags, using the importer configuration as the default.
"""
return _bool_fallback(write_opt, config["import"]["write"].get(bool))
def should_move(move_opt=None):
"""Decide whether a command that updates metadata should also move
files when they're inside the library, using the importer
configuration as the default.
Specifically, commands should move files after metadata updates only
when the importer is configured *either* to move *or* to copy files.
They should avoid moving files when the importer is configured not
to touch any filenames.
"""
return _bool_fallback(
move_opt,
config["import"]["move"].get(bool)
or config["import"]["copy"].get(bool),
)
# Input prompts.
def indent(count):
"""Returns a string with `count` many spaces."""
return " " * count
def input_(prompt=None):
"""Like `input`, but decodes the result to a Unicode string.
Raises a UserError if stdin is not available. The prompt is sent to
stdout rather than stderr. A printed between the prompt and the
input cursor.
"""
# raw_input incorrectly sends prompts to stderr, not stdout, so we
# use print_() explicitly to display prompts.
# https://bugs.python.org/issue1927
if prompt:
print_(prompt, end=" ")
try:
resp = input()
except EOFError:
raise UserError("stdin stream ended while input required")
return resp
def input_options(
options,
require=False,
prompt=None,
fallback_prompt=None,
numrange=None,
default=None,
max_width=72,
):
"""Prompts a user for input. The sequence of `options` defines the
choices the user has. A single-letter shortcut is inferred for each
option; the user's choice is returned as that single, lower-case
letter. The options should be provided as lower-case strings unless
a particular shortcut is desired; in that case, only that letter
should be capitalized.
By default, the first option is the default. `default` can be provided to
override this. If `require` is provided, then there is no default. The
prompt and fallback prompt are also inferred but can be overridden.
If numrange is provided, it is a pair of `(high, low)` (both ints)
indicating that, in addition to `options`, the user may enter an
integer in that inclusive range.
`max_width` specifies the maximum number of columns in the
automatically generated prompt string.
"""
# Assign single letters to each option. Also capitalize the options
# to indicate the letter.
letters = {}
display_letters = []
capitalized = []
first = True
for option in options:
# Is a letter already capitalized?
for letter in option:
if letter.isalpha() and letter.upper() == letter:
found_letter = letter
break
else:
# Infer a letter.
for letter in option:
if not letter.isalpha():
continue # Don't use punctuation.
if letter not in letters:
found_letter = letter
break
else:
raise ValueError("no unambiguous lettering found")
letters[found_letter.lower()] = option
index = option.index(found_letter)
# Mark the option's shortcut letter for display.
if not require and (
(default is None and not numrange and first)
or (
isinstance(default, str)
and found_letter.lower() == default.lower()
)
):
# The first option is the default; mark it.
show_letter = f"[{found_letter.upper()}]"
is_default = True
else:
show_letter = found_letter.upper()
is_default = False
# Colorize the letter shortcut.
show_letter = colorize(
"action_default" if is_default else "action", show_letter
)
# Insert the highlighted letter back into the word.
descr_color = "action_default" if is_default else "action_description"
capitalized.append(
colorize(descr_color, option[:index])
+ show_letter
+ colorize(descr_color, option[index + 1 :])
)
display_letters.append(found_letter.upper())
first = False
# The default is just the first option if unspecified.
if require:
default = None
elif default is None:
if numrange:
default = numrange[0]
else:
default = display_letters[0].lower()
# Make a prompt if one is not provided.
if not prompt:
prompt_parts = []
prompt_part_lengths = []
if numrange:
if isinstance(default, int):
default_name = str(default)
default_name = colorize("action_default", default_name)
tmpl = "# selection (default {})"
prompt_parts.append(tmpl.format(default_name))
prompt_part_lengths.append(len(tmpl) - 2 + len(str(default)))
else:
prompt_parts.append("# selection")
prompt_part_lengths.append(len(prompt_parts[-1]))
prompt_parts += capitalized
prompt_part_lengths += [len(s) for s in options]
# Wrap the query text.
# Start prompt with U+279C: Heavy Round-Tipped Rightwards Arrow
prompt = colorize("action", "\u279c ")
line_length = 0
for i, (part, length) in enumerate(
zip(prompt_parts, prompt_part_lengths)
):
# Add punctuation.
if i == len(prompt_parts) - 1:
part += colorize("action_description", "?")
else:
part += colorize("action_description", ",")
length += 1
# Choose either the current line or the beginning of the next.
if line_length + length + 1 > max_width:
prompt += "\n"
line_length = 0
if line_length != 0:
# Not the beginning of the line; need a space.
part = f" {part}"
length += 1
prompt += part
line_length += length
# Make a fallback prompt too. This is displayed if the user enters
# something that is not recognized.
if not fallback_prompt:
fallback_prompt = "Enter one of "
if numrange:
fallback_prompt += "{}-{}, ".format(*numrange)
fallback_prompt += f"{', '.join(display_letters)}:"
resp = input_(prompt)
while True:
resp = resp.strip().lower()
# Try default option.
if default is not None and not resp:
resp = default
# Try an integer input if available.
if numrange:
try:
resp = int(resp)
except ValueError:
pass
else:
low, high = numrange
if low <= resp <= high:
return resp
else:
resp = None
# Try a normal letter input.
if resp:
resp = resp[0]
if resp in letters:
return resp
# Prompt for new input.
resp = input_(fallback_prompt)
def input_yn(prompt, require=False):
"""Prompts the user for a "yes" or "no" response. The default is
"yes" unless `require` is `True`, in which case there is no default.
"""
# Start prompt with U+279C: Heavy Round-Tipped Rightwards Arrow
yesno = colorize("action", "\u279c ") + colorize(
"action_description", "Enter Y or N:"
)
sel = input_options(("y", "n"), require, prompt, yesno)
return sel == "y"
def input_select_objects(prompt, objs, rep, prompt_all=None):
"""Prompt to user to choose all, none, or some of the given objects.
Return the list of selected objects.
`prompt` is the prompt string to use for each question (it should be
phrased as an imperative verb). If `prompt_all` is given, it is used
instead of `prompt` for the first (yes(/no/select) question.
`rep` is a function to call on each object to print it out when confirming
objects individually.
"""
choice = input_options(
("y", "n", "s"), False, f"{prompt_all or prompt}? (Yes/no/select)"
)
print() # Blank line.
if choice == "y": # Yes.
return objs
elif choice == "s": # Select.
out = []
for obj in objs:
rep(obj)
answer = input_options(
("y", "n", "q"),
True,
f"{prompt}? (yes/no/quit)",
"Enter Y or N:",
)
if answer == "y":
out.append(obj)
elif answer == "q":
return out
return out
else: # No.
return []
# Colorization.
# ANSI terminal colorization code heavily inspired by pygments:
# https://bitbucket.org/birkenfeld/pygments-main/src/default/pygments/console.py
# (pygments is by Tim Hatch, Armin Ronacher, et al.)
COLOR_ESCAPE = "\x1b"
LEGACY_COLORS = {
"black": ["black"],
"darkred": ["red"],
"darkgreen": ["green"],
"brown": ["yellow"],
"darkyellow": ["yellow"],
"darkblue": ["blue"],
"purple": ["magenta"],
"darkmagenta": ["magenta"],
"teal": ["cyan"],
"darkcyan": ["cyan"],
"lightgray": ["white"],
"darkgray": ["bold", "black"],
"red": ["bold", "red"],
"green": ["bold", "green"],
"yellow": ["bold", "yellow"],
"blue": ["bold", "blue"],
"fuchsia": ["bold", "magenta"],
"magenta": ["bold", "magenta"],
"turquoise": ["bold", "cyan"],
"cyan": ["bold", "cyan"],
"white": ["bold", "white"],
}
# All ANSI Colors.
CODE_BY_COLOR = {
# Styles.
"normal": 0,
"bold": 1,
"faint": 2,
# "italic": 3,
"underline": 4,
# "blink_slow": 5,
# "blink_rapid": 6,
"inverse": 7,
# "conceal": 8,
# "crossed_out": 9
# Text colors.
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"magenta": 35,
"cyan": 36,
"white": 37,
# Background colors.
"bg_black": 40,
"bg_red": 41,
"bg_green": 42,
"bg_yellow": 43,
"bg_blue": 44,
"bg_magenta": 45,
"bg_cyan": 46,
"bg_white": 47,
}
RESET_COLOR = f"{COLOR_ESCAPE}[39;49;00m"
# Precompile common ANSI-escape regex patterns
ANSI_CODE_REGEX = re.compile(rf"({COLOR_ESCAPE}\[[;0-9]*m)")
ESC_TEXT_REGEX = re.compile(
rf"""(?P[^{COLOR_ESCAPE}]*)
(?P(?:{ANSI_CODE_REGEX.pattern})+)
(?P[^{COLOR_ESCAPE}]+)(?P{re.escape(RESET_COLOR)})
(?P[^{COLOR_ESCAPE}]*)""",
re.VERBOSE,
)
ColorName = Literal[
"text_success",
"text_warning",
"text_error",
"text_highlight",
"text_highlight_minor",
"action_default",
"action",
# New Colors
"text_faint",
"import_path",
"import_path_items",
"action_description",
"changed",
"text_diff_added",
"text_diff_removed",
]
@cache
def get_color_config() -> dict[ColorName, str]:
"""Parse and validate color configuration, converting names to ANSI codes.
Processes the UI color configuration, handling both new list format and
legacy single-color format. Validates all color names against known codes
and raises an error for any invalid entries.
"""
colors_by_color_name: dict[ColorName, list[str]] = {
k: (v if isinstance(v, list) else LEGACY_COLORS.get(v, [v]))
for k, v in config["ui"]["colors"].flatten().items()
}
if invalid_colors := (
set(chain.from_iterable(colors_by_color_name.values()))
- CODE_BY_COLOR.keys()
):
raise UserError(
f"Invalid color(s) in configuration: {', '.join(invalid_colors)}"
)
return {
n: ";".join(str(CODE_BY_COLOR[c]) for c in colors)
for n, colors in colors_by_color_name.items()
}
def colorize(color_name: ColorName, text: str) -> str:
"""Apply ANSI color formatting to text based on configuration settings.
Returns colored text when color output is enabled and NO_COLOR environment
variable is not set, otherwise returns plain text unchanged.
"""
if config["ui"]["color"] and "NO_COLOR" not in os.environ:
color_code = get_color_config()[color_name]
return f"{COLOR_ESCAPE}[{color_code}m{text}{RESET_COLOR}"
return text
def uncolorize(colored_text):
"""Remove colors from a string."""
# Define a regular expression to match ANSI codes.
# See: http://stackoverflow.com/a/2187024/1382707
# Explanation of regular expression:
# \x1b - matches ESC character
# \[ - matches opening square bracket
# [;\d]* - matches a sequence consisting of one or more digits or
# semicola
# [A-Za-z] - matches a letter
return ANSI_CODE_REGEX.sub("", colored_text)
def color_split(colored_text, index):
length = 0
pre_split = ""
post_split = ""
found_color_code = None
found_split = False
for part in ANSI_CODE_REGEX.split(colored_text):
# Count how many real letters we have passed
length += color_len(part)
if found_split:
post_split += part
else:
if ANSI_CODE_REGEX.match(part):
# This is a color code
if part == RESET_COLOR:
found_color_code = None
else:
found_color_code = part
pre_split += part
else:
if index < length:
# Found part with our split in.
split_index = index - (length - color_len(part))
found_split = True
if found_color_code:
pre_split += f"{part[:split_index]}{RESET_COLOR}"
post_split += f"{found_color_code}{part[split_index:]}"
else:
pre_split += part[:split_index]
post_split += part[split_index:]
else:
# Not found, add this part to the pre split
pre_split += part
return pre_split, post_split
def color_len(colored_text):
"""Measure the length of a string while excluding ANSI codes from the
measurement. The standard `len(my_string)` method also counts ANSI codes
to the string length, which is counterproductive when layouting a
Terminal interface.
"""
# Return the length of the uncolored string.
return len(uncolorize(colored_text))
def _colordiff(a: Any, b: Any) -> tuple[str, str]:
"""Given two values, return the same pair of strings except with
their differences highlighted in the specified color. Strings are
highlighted intelligently to show differences; other values are
stringified and highlighted in their entirety.
"""
# First, convert paths to readable format
if isinstance(a, bytes) or isinstance(b, bytes):
# A path field.
a = util.displayable_path(a)
b = util.displayable_path(b)
if not isinstance(a, str) or not isinstance(b, str):
# Non-strings: use ordinary equality.
if a == b:
return str(a), str(b)
else:
return (
colorize("text_diff_removed", str(a)),
colorize("text_diff_added", str(b)),
)
before = ""
after = ""
matcher = SequenceMatcher(lambda x: False, a, b)
for op, a_start, a_end, b_start, b_end in matcher.get_opcodes():
before_part, after_part = a[a_start:a_end], b[b_start:b_end]
if op in {"delete", "replace"}:
before_part = colorize("text_diff_removed", before_part)
if op in {"insert", "replace"}:
after_part = colorize("text_diff_added", after_part)
before += before_part
after += after_part
return before, after
def colordiff(a, b):
"""Colorize differences between two values if color is enabled.
(Like _colordiff but conditional.)
"""
if config["ui"]["color"]:
return _colordiff(a, b)
else:
return str(a), str(b)
def get_path_formats(subview=None):
"""Get the configuration's path formats as a list of query/template
pairs.
"""
path_formats = []
subview = subview or config["paths"]
for query, view in subview.items():
query = PF_KEY_QUERIES.get(query, query) # Expand common queries.
path_formats.append((query, template(view.as_str())))
return path_formats
def get_replacements():
"""Confuse validation function that reads regex/string pairs."""
replacements = []
for pattern, repl in config["replace"].get(dict).items():
repl = repl or ""
try:
replacements.append((re.compile(pattern), repl))
except re.error:
raise UserError(
f"malformed regular expression in replace: {pattern}"
)
return replacements
def term_width():
"""Get the width (columns) of the terminal."""
fallback = config["ui"]["terminal_width"].get(int)
# The fcntl and termios modules are not available on non-Unix
# platforms, so we fall back to a constant.
try:
import fcntl
import termios
except ImportError:
return fallback
try:
buf = fcntl.ioctl(0, termios.TIOCGWINSZ, " " * 4)
except OSError:
return fallback
try:
height, width = struct.unpack("hh", buf)
except struct.error:
return fallback
return width
def split_into_lines(string, width_tuple):
"""Splits string into a list of substrings at whitespace.
`width_tuple` is a 3-tuple of `(first_width, last_width, middle_width)`.
The first substring has a length not longer than `first_width`, the last
substring has a length not longer than `last_width`, and all other
substrings have a length not longer than `middle_width`.
`string` may contain ANSI codes at word borders.
"""
first_width, middle_width, last_width = width_tuple
words = []
if uncolorize(string) == string:
# No colors in string
words = string.split()
else:
# Use a regex to find escapes and the text within them.
for m in ESC_TEXT_REGEX.finditer(string):
# m contains four groups:
# pretext - any text before escape sequence
# esc - intitial escape sequence
# text - text, no escape sequence, may contain spaces
# reset - ASCII colour reset
space_before_text = False
if m.group("pretext") != "":
# Some pretext found, let's handle it
# Add any words in the pretext
words += m.group("pretext").split()
if m.group("pretext")[-1] == " ":
# Pretext ended on a space
space_before_text = True
else:
# Pretext ended mid-word, ensure next word
pass
else:
# pretext empty, treat as if there is a space before
space_before_text = True
if m.group("text")[0] == " ":
# First character of the text is a space
space_before_text = True
# Now, handle the words in the main text:
raw_words = m.group("text").split()
if space_before_text:
# Colorize each word with pre/post escapes
# Reconstruct colored words
words += [
f"{m['esc']}{raw_word}{RESET_COLOR}"
for raw_word in raw_words
]
elif raw_words:
# Pretext stops mid-word
if m.group("esc") != RESET_COLOR:
# Add the rest of the current word, with a reset after it
words[-1] += f"{m['esc']}{raw_words[0]}{RESET_COLOR}"
# Add the subsequent colored words:
words += [
f"{m['esc']}{raw_word}{RESET_COLOR}"
for raw_word in raw_words[1:]
]
else:
# Caught a mid-word escape sequence
words[-1] += raw_words[0]
words += raw_words[1:]
if (
m.group("text")[-1] != " "
and m.group("posttext") != ""
and m.group("posttext")[0] != " "
):
# reset falls mid-word
post_text = m.group("posttext").split()
words[-1] += post_text[0]
words += post_text[1:]
else:
# Add any words after escape sequence
words += m.group("posttext").split()
result = []
next_substr = ""
# Iterate over all words.
previous_fit = False
for i in range(len(words)):
if i == 0:
pot_substr = words[i]
else:
# (optimistically) add the next word to check the fit
pot_substr = " ".join([next_substr, words[i]])
# Find out if the pot(ential)_substr fits into the next substring.
fits_first = len(result) == 0 and color_len(pot_substr) <= first_width
fits_middle = len(result) != 0 and color_len(pot_substr) <= middle_width
if fits_first or fits_middle:
# Fitted(!) let's try and add another word before appending
next_substr = pot_substr
previous_fit = True
elif not fits_first and not fits_middle and previous_fit:
# Extra word didn't fit, append what we have
result.append(next_substr)
next_substr = words[i]
previous_fit = color_len(next_substr) <= middle_width
else:
# Didn't fit anywhere
if uncolorize(pot_substr) == pot_substr:
# Simple uncolored string, append a cropped word
if len(result) == 0:
# Crop word by the first_width for the first line
result.append(pot_substr[:first_width])
# add rest of word to next line
next_substr = pot_substr[first_width:]
else:
result.append(pot_substr[:middle_width])
next_substr = pot_substr[middle_width:]
else:
# Colored strings
if len(result) == 0:
this_line, next_line = color_split(pot_substr, first_width)
result.append(this_line)
next_substr = next_line
else:
this_line, next_line = color_split(pot_substr, middle_width)
result.append(this_line)
next_substr = next_line
previous_fit = color_len(next_substr) <= middle_width
# We finished constructing the substrings, but the last substring
# has not yet been added to the result.
result.append(next_substr)
# Also, the length of the last substring was only checked against
# `middle_width`. Append an empty substring as the new last substring if
# the last substring is too long.
if not color_len(next_substr) <= last_width:
result.append("")
return result
def print_column_layout(
indent_str, left, right, separator=" -> ", max_width=term_width()
):
"""Print left & right data, with separator inbetween
'left' and 'right' have a structure of:
{'prefix':u'','contents':u'','suffix':u'','width':0}
In a column layout the printing will be:
{indent_str}{lhs0}{separator}{rhs0}
{lhs1 / padding }{rhs1}
...
The first line of each column (i.e. {lhs0} or {rhs0}) is:
{prefix}{part of contents}{suffix}
With subsequent lines (i.e. {lhs1}, {rhs1} onwards) being the
rest of contents, wrapped if the width would be otherwise exceeded.
"""
if f"{right['prefix']}{right['contents']}{right['suffix']}" == "":
# No right hand information, so we don't need a separator.
separator = ""
first_line_no_wrap = (
f"{indent_str}{left['prefix']}{left['contents']}{left['suffix']}"
f"{separator}{right['prefix']}{right['contents']}{right['suffix']}"
)
if color_len(first_line_no_wrap) < max_width:
# Everything fits, print out line.
print_(first_line_no_wrap)
else:
# Wrap into columns
if "width" not in left or "width" not in right:
# If widths have not been defined, set to share space.
left["width"] = (
max_width - len(indent_str) - color_len(separator)
) // 2
right["width"] = (
max_width - len(indent_str) - color_len(separator)
) // 2
# On the first line, account for suffix as well as prefix
left_width_tuple = (
left["width"]
- color_len(left["prefix"])
- color_len(left["suffix"]),
left["width"] - color_len(left["prefix"]),
left["width"] - color_len(left["prefix"]),
)
left_split = split_into_lines(left["contents"], left_width_tuple)
right_width_tuple = (
right["width"]
- color_len(right["prefix"])
- color_len(right["suffix"]),
right["width"] - color_len(right["prefix"]),
right["width"] - color_len(right["prefix"]),
)
right_split = split_into_lines(right["contents"], right_width_tuple)
max_line_count = max(len(left_split), len(right_split))
out = ""
for i in range(max_line_count):
# indentation
out += indent_str
# Prefix or indent_str for line
if i == 0:
out += left["prefix"]
else:
out += indent(color_len(left["prefix"]))
# Line i of left hand side contents.
if i < len(left_split):
out += left_split[i]
left_part_len = color_len(left_split[i])
else:
left_part_len = 0
# Padding until end of column.
# Note: differs from original
# column calcs in not -1 afterwards for space
# in track number as that is included in 'prefix'
padding = left["width"] - color_len(left["prefix"]) - left_part_len
# Remove some padding on the first line to display
# length
if i == 0:
padding -= color_len(left["suffix"])
out += indent(padding)
if i == 0:
out += left["suffix"]
# Separator between columns.
if i == 0:
out += separator
else:
out += indent(color_len(separator))
# Right prefix, contents, padding, suffix
if i == 0:
out += right["prefix"]
else:
out += indent(color_len(right["prefix"]))
# Line i of right hand side.
if i < len(right_split):
out += right_split[i]
right_part_len = color_len(right_split[i])
else:
right_part_len = 0
# Padding until end of column
padding = (
right["width"] - color_len(right["prefix"]) - right_part_len
)
# Remove some padding on the first line to display
# length
if i == 0:
padding -= color_len(right["suffix"])
out += indent(padding)
# Length in first line
if i == 0:
out += right["suffix"]
# Linebreak, except in the last line.
if i < max_line_count - 1:
out += "\n"
# Constructed all of the columns, now print
print_(out)
def print_newline_layout(
indent_str, left, right, separator=" -> ", max_width=term_width()
):
"""Prints using a newline separator between left & right if
they go over their allocated widths. The datastructures are
shared with the column layout. In contrast to the column layout,
the prefix and suffix are printed at the beginning and end of
the contents. If no wrapping is required (i.e. everything fits) the
first line will look exactly the same as the column layout:
{indent}{lhs0}{separator}{rhs0}
However if this would go over the width given, the layout now becomes:
{indent}{lhs0}
{indent}{separator}{rhs0}
If {lhs0} would go over the maximum width, the subsequent lines are
indented a second time for ease of reading.
"""
if f"{right['prefix']}{right['contents']}{right['suffix']}" == "":
# No right hand information, so we don't need a separator.
separator = ""
first_line_no_wrap = (
f"{indent_str}{left['prefix']}{left['contents']}{left['suffix']}"
f"{separator}{right['prefix']}{right['contents']}{right['suffix']}"
)
if color_len(first_line_no_wrap) < max_width:
# Everything fits, print out line.
print_(first_line_no_wrap)
else:
# Newline separation, with wrapping
empty_space = max_width - len(indent_str)
# On lower lines we will double the indent for clarity
left_width_tuple = (
empty_space,
empty_space - len(indent_str),
empty_space - len(indent_str),
)
left_str = f"{left['prefix']}{left['contents']}{left['suffix']}"
left_split = split_into_lines(left_str, left_width_tuple)
# Repeat calculations for rhs, including separator on first line
right_width_tuple = (
empty_space - color_len(separator),
empty_space - len(indent_str),
empty_space - len(indent_str),
)
right_str = f"{right['prefix']}{right['contents']}{right['suffix']}"
right_split = split_into_lines(right_str, right_width_tuple)
for i, line in enumerate(left_split):
if i == 0:
print_(f"{indent_str}{line}")
elif line != "":
# Ignore empty lines
print_(f"{indent_str * 2}{line}")
for i, line in enumerate(right_split):
if i == 0:
print_(f"{indent_str}{separator}{line}")
elif line != "":
print_(f"{indent_str * 2}{line}")
FLOAT_EPSILON = 0.01
def _field_diff(field, old, old_fmt, new, new_fmt):
"""Given two Model objects and their formatted views, format their values
for `field` and highlight changes among them. Return a human-readable
string. If the value has not changed, return None instead.
"""
oldval = old.get(field)
newval = new.get(field)
# If no change, abort.
if (
isinstance(oldval, float)
and isinstance(newval, float)
and abs(oldval - newval) < FLOAT_EPSILON
):
return None
elif oldval == newval:
return None
# Get formatted values for output.
oldstr = old_fmt.get(field, "")
newstr = new_fmt.get(field, "")
# For strings, highlight changes. For others, colorize the whole
# thing.
if isinstance(oldval, str):
oldstr, newstr = colordiff(oldval, newstr)
else:
oldstr = colorize("text_diff_removed", oldstr)
newstr = colorize("text_diff_added", newstr)
return f"{oldstr} -> {newstr}"
def show_model_changes(new, old=None, fields=None, always=False):
"""Given a Model object, print a list of changes from its pristine
version stored in the database. Return a boolean indicating whether
any changes were found.
`old` may be the "original" object to avoid using the pristine
version from the database. `fields` may be a list of fields to
restrict the detection to. `always` indicates whether the object is
always identified, regardless of whether any changes are present.
"""
old = old or new._db._get(type(new), new.id)
# Keep the formatted views around instead of re-creating them in each
# iteration step
old_fmt = old.formatted()
new_fmt = new.formatted()
# Build up lines showing changed fields.
changes = []
for field in old:
# Subset of the fields. Never show mtime.
if field == "mtime" or (fields and field not in fields):
continue
# Detect and show difference for this field.
line = _field_diff(field, old, old_fmt, new, new_fmt)
if line:
changes.append(f" {field}: {line}")
# New fields.
for field in set(new) - set(old):
if fields and field not in fields:
continue
changes.append(
f" {field}: {colorize('text_highlight', new_fmt[field])}"
)
# Print changes.
if changes or always:
print_(format(old))
if changes:
print_("\n".join(changes))
return bool(changes)
def show_path_changes(path_changes):
"""Given a list of tuples (source, destination) that indicate the
path changes, log the changes as INFO-level output to the beets log.
The output is guaranteed to be unicode.
Every pair is shown on a single line if the terminal width permits it,
else it is split over two lines. E.g.,
Source -> Destination
vs.
Source
-> Destination
"""
sources, destinations = zip(*path_changes)
# Ensure unicode output
sources = list(map(util.displayable_path, sources))
destinations = list(map(util.displayable_path, destinations))
# Calculate widths for terminal split
col_width = (term_width() - len(" -> ")) // 2
max_width = len(max(sources + destinations, key=len))
if max_width > col_width:
# Print every change over two lines
for source, dest in zip(sources, destinations):
color_source, color_dest = colordiff(source, dest)
print_(f"{color_source} \n -> {color_dest}")
else:
# Print every change on a single line, and add a header
title_pad = max_width - len("Source ") + len(" -> ")
print_(f"Source {' ' * title_pad} Destination")
for source, dest in zip(sources, destinations):
pad = max_width - len(source)
color_source, color_dest = colordiff(source, dest)
print_(f"{color_source} {' ' * pad} -> {color_dest}")
# Helper functions for option parsing.
def _store_dict(option, opt_str, value, parser):
"""Custom action callback to parse options which have ``key=value``
pairs as values. All such pairs passed for this option are
aggregated into a dictionary.
"""
dest = option.dest
option_values = getattr(parser.values, dest, None)
if option_values is None:
# This is the first supplied ``key=value`` pair of option.
# Initialize empty dictionary and get a reference to it.
setattr(parser.values, dest, {})
option_values = getattr(parser.values, dest)
try:
key, value = value.split("=", 1)
if not (key and value):
raise ValueError
except ValueError:
raise UserError(
f"supplied argument `{value}' is not of the form `key=value'"
)
option_values[key] = value
class CommonOptionsParser(optparse.OptionParser):
"""Offers a simple way to add common formatting options.
Options available include:
- matching albums instead of tracks: add_album_option()
- showing paths instead of items/albums: add_path_option()
- changing the format of displayed items/albums: add_format_option()
The last one can have several behaviors:
- against a special target
- with a certain format
- autodetected target with the album option
Each method is fully documented in the related method.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._album_flags = False
# this serves both as an indicator that we offer the feature AND allows
# us to check whether it has been specified on the CLI - bypassing the
# fact that arguments may be in any order
def add_album_option(self, flags=("-a", "--album")):
"""Add a -a/--album option to match albums instead of tracks.
If used then the format option can auto-detect whether we're setting
the format for items or albums.
Sets the album property on the options extracted from the CLI.
"""
album = optparse.Option(
*flags, action="store_true", help="match albums instead of tracks"
)
self.add_option(album)
self._album_flags = set(flags)
def _set_format(
self,
option,
opt_str,
value,
parser,
target=None,
fmt=None,
store_true=False,
):
"""Internal callback that sets the correct format while parsing CLI
arguments.
"""
if store_true:
setattr(parser.values, option.dest, True)
# Use the explicitly specified format, or the string from the option.
value = fmt or value or ""
parser.values.format = value
if target:
config[target._format_config_key].set(value)
else:
if self._album_flags:
if parser.values.album:
target = library.Album
else:
# the option is either missing either not parsed yet
if self._album_flags & set(parser.rargs):
target = library.Album
else:
target = library.Item
config[target._format_config_key].set(value)
else:
config[library.Item._format_config_key].set(value)
config[library.Album._format_config_key].set(value)
def add_path_option(self, flags=("-p", "--path")):
"""Add a -p/--path option to display the path instead of the default
format.
By default this affects both items and albums. If add_album_option()
is used then the target will be autodetected.
Sets the format property to '$path' on the options extracted from the
CLI.
"""
path = optparse.Option(
*flags,
nargs=0,
action="callback",
callback=self._set_format,
callback_kwargs={"fmt": "$path", "store_true": True},
help="print paths for matched items or albums",
)
self.add_option(path)
def add_format_option(self, flags=("-f", "--format"), target=None):
"""Add -f/--format option to print some LibModel instances with a
custom format.
`target` is optional and can be one of ``library.Item``, 'item',
``library.Album`` and 'album'.
Several behaviors are available:
- if `target` is given then the format is only applied to that
LibModel
- if the album option is used then the target will be autodetected
- otherwise the format is applied to both items and albums.
Sets the format property on the options extracted from the CLI.
"""
kwargs = {}
if target:
if isinstance(target, str):
target = {"item": library.Item, "album": library.Album}[target]
kwargs["target"] = target
opt = optparse.Option(
*flags,
action="callback",
callback=self._set_format,
callback_kwargs=kwargs,
help="print with custom format",
)
self.add_option(opt)
def add_all_common_options(self):
"""Add album, path and format options."""
self.add_album_option()
self.add_path_option()
self.add_format_option()
# Subcommand parsing infrastructure.
#
# This is a fairly generic subcommand parser for optparse. It is
# maintained externally here:
# https://gist.github.com/462717
# There you will also find a better description of the code and a more
# succinct example program.
class Subcommand:
"""A subcommand of a root command-line application that may be
invoked by a SubcommandOptionParser.
"""
func: Callable[[library.Library, optparse.Values, list[str]], Any]
def __init__(self, name, parser=None, help="", aliases=(), hide=False):
"""Creates a new subcommand. name is the primary way to invoke
the subcommand; aliases are alternate names. parser is an
OptionParser responsible for parsing the subcommand's options.
help is a short description of the command. If no parser is
given, it defaults to a new, empty CommonOptionsParser.
"""
self.name = name
self.parser = parser or CommonOptionsParser()
self.aliases = aliases
self.help = help
self.hide = hide
self._root_parser = None
def print_help(self):
self.parser.print_help()
def parse_args(self, args):
return self.parser.parse_args(args)
@property
def root_parser(self):
return self._root_parser
@root_parser.setter
def root_parser(self, root_parser):
self._root_parser = root_parser
self.parser.prog = (
f"{as_string(root_parser.get_prog_name())} {self.name}"
)
class SubcommandsOptionParser(CommonOptionsParser):
"""A variant of OptionParser that parses subcommands and their
arguments.
"""
def __init__(self, *args, **kwargs):
"""Create a new subcommand-aware option parser. All of the
options to OptionParser.__init__ are supported in addition
to subcommands, a sequence of Subcommand objects.
"""
# A more helpful default usage.
if "usage" not in kwargs:
kwargs["usage"] = """
%prog COMMAND [ARGS...]
%prog help COMMAND"""
kwargs["add_help_option"] = False
# Super constructor.
super().__init__(*args, **kwargs)
# Our root parser needs to stop on the first unrecognized argument.
self.disable_interspersed_args()
self.subcommands = []
def add_subcommand(self, *cmds):
"""Adds a Subcommand object to the parser's list of commands."""
for cmd in cmds:
cmd.root_parser = self
self.subcommands.append(cmd)
# Add the list of subcommands to the help message.
def format_help(self, formatter=None):
# Get the original help message, to which we will append.
out = super().format_help(formatter)
if formatter is None:
formatter = self.formatter
# Subcommands header.
result = ["\n"]
result.append(formatter.format_heading("Commands"))
formatter.indent()
# Generate the display names (including aliases).
# Also determine the help position.
disp_names = []
help_position = 0
subcommands = [c for c in self.subcommands if not c.hide]
subcommands.sort(key=lambda c: c.name)
for subcommand in subcommands:
name = subcommand.name
if subcommand.aliases:
name += f" ({', '.join(subcommand.aliases)})"
disp_names.append(name)
# Set the help position based on the max width.
proposed_help_position = len(name) + formatter.current_indent + 2
if proposed_help_position <= formatter.max_help_position:
help_position = max(help_position, proposed_help_position)
# Add each subcommand to the output.
for subcommand, name in zip(subcommands, disp_names):
# Lifted directly from optparse.py.
name_width = help_position - formatter.current_indent - 2
if len(name) > name_width:
name = f"{' ' * formatter.current_indent}{name}\n"
indent_first = help_position
else:
name = f"{' ' * formatter.current_indent}{name:<{name_width}}\n"
indent_first = 0
result.append(name)
help_width = formatter.width - help_position
help_lines = textwrap.wrap(subcommand.help, help_width)
help_line = help_lines[0] if help_lines else ""
result.append(f"{' ' * indent_first}{help_line}\n")
result.extend(
[f"{' ' * help_position}{line}\n" for line in help_lines[1:]]
)
formatter.dedent()
# Concatenate the original help message with the subcommand
# list.
return f"{out}{''.join(result)}"
def _subcommand_for_name(self, name):
"""Return the subcommand in self.subcommands matching the
given name. The name may either be the name of a subcommand or
an alias. If no subcommand matches, returns None.
"""
for subcommand in self.subcommands:
if name == subcommand.name or name in subcommand.aliases:
return subcommand
return None
def parse_global_options(self, args):
"""Parse options up to the subcommand argument. Returns a tuple
of the options object and the remaining arguments.
"""
options, subargs = self.parse_args(args)
# Force the help command
if options.help:
subargs = ["help"]
elif options.version:
subargs = ["version"]
return options, subargs
def parse_subcommand(self, args):
"""Given the `args` left unused by a `parse_global_options`,
return the invoked subcommand, the subcommand options, and the
subcommand arguments.
"""
# Help is default command
if not args:
args = ["help"]
cmdname = args.pop(0)
subcommand = self._subcommand_for_name(cmdname)
if not subcommand:
raise UserError(f"unknown command '{cmdname}'")
suboptions, subargs = subcommand.parse_args(args)
return subcommand, suboptions, subargs
optparse.Option.ALWAYS_TYPED_ACTIONS += ("callback",)
# The main entry point and bootstrapping.
def _setup(
options: optparse.Values, lib: library.Library | None
) -> tuple[list[Subcommand], library.Library]:
"""Prepare and global state and updates it with command line options.
Returns a list of subcommands, a list of plugins, and a library instance.
"""
config = _configure(options)
plugins.load_plugins()
# Get the default subcommands.
from beets.ui.commands import default_commands
subcommands = list(default_commands)
subcommands.extend(plugins.commands())
if lib is None:
lib = _open_library(config)
plugins.send("library_opened", lib=lib)
return subcommands, lib
def _configure(options):
"""Amend the global configuration object with command line options."""
# Add any additional config files specified with --config. This
# special handling lets specified plugins get loaded before we
# finish parsing the command line.
if getattr(options, "config", None) is not None:
overlay_path = options.config
del options.config
config.set_file(overlay_path)
else:
overlay_path = None
config.set_args(options)
# Configure the logger.
if config["verbose"].get(int):
log.set_global_level(logging.DEBUG)
else:
log.set_global_level(logging.INFO)
if overlay_path:
log.debug(
"overlaying configuration: {}", util.displayable_path(overlay_path)
)
config_path = config.user_config_path()
if os.path.isfile(config_path):
log.debug("user configuration: {}", util.displayable_path(config_path))
else:
log.debug(
"no user configuration found at {}",
util.displayable_path(config_path),
)
log.debug("data directory: {}", util.displayable_path(config.config_dir()))
return config
def _ensure_db_directory_exists(path):
if path == b":memory:": # in memory db
return
newpath = os.path.dirname(path)
if not os.path.isdir(newpath):
if input_yn(
f"The database directory {util.displayable_path(newpath)} does not"
" exist. Create it (Y/n)?"
):
os.makedirs(newpath)
def _open_library(config: confuse.LazyConfig) -> library.Library:
"""Create a new library instance from the configuration."""
dbpath = util.bytestring_path(config["library"].as_filename())
_ensure_db_directory_exists(dbpath)
try:
lib = library.Library(
dbpath,
config["directory"].as_filename(),
get_path_formats(),
get_replacements(),
)
lib.get_item(0) # Test database connection.
except (sqlite3.OperationalError, sqlite3.DatabaseError) as db_error:
log.debug("{}", traceback.format_exc())
raise UserError(
f"database file {util.displayable_path(dbpath)} cannot not be"
f" opened: {db_error}"
)
log.debug(
"library database: {}\nlibrary directory: {}",
util.displayable_path(lib.path),
util.displayable_path(lib.directory),
)
return lib
def _raw_main(args: list[str], lib=None) -> None:
"""A helper function for `main` without top-level exception
handling.
"""
parser = SubcommandsOptionParser()
parser.add_format_option(flags=("--format-item",), target=library.Item)
parser.add_format_option(flags=("--format-album",), target=library.Album)
parser.add_option(
"-l", "--library", dest="library", help="library database file to use"
)
parser.add_option(
"-d",
"--directory",
dest="directory",
help="destination music directory",
)
parser.add_option(
"-v",
"--verbose",
dest="verbose",
action="count",
help="log more details (use twice for even more)",
)
parser.add_option(
"-c", "--config", dest="config", help="path to configuration file"
)
def parse_csl_callback(
option: optparse.Option, _, value: str, parser: SubcommandsOptionParser
):
"""Parse a comma-separated list of values."""
setattr(
parser.values,
option.dest, # type: ignore[arg-type]
list(filter(None, value.split(","))),
)
parser.add_option(
"-p",
"--plugins",
dest="plugins",
action="callback",
callback=parse_csl_callback,
help="a comma-separated list of plugins to load",
)
parser.add_option(
"-P",
"--disable-plugins",
dest="disabled_plugins",
action="callback",
callback=parse_csl_callback,
help="a comma-separated list of plugins to disable",
)
parser.add_option(
"-h",
"--help",
dest="help",
action="store_true",
help="show this help message and exit",
)
parser.add_option(
"--version",
dest="version",
action="store_true",
help=optparse.SUPPRESS_HELP,
)
options, subargs = parser.parse_global_options(args)
# Special case for the `config --edit` command: bypass _setup so
# that an invalid configuration does not prevent the editor from
# starting.
if (
subargs
and subargs[0] == "config"
and ("-e" in subargs or "--edit" in subargs)
):
from beets.ui.commands import config_edit
return config_edit()
test_lib = bool(lib)
subcommands, lib = _setup(options, lib)
parser.add_subcommand(*subcommands)
subcommand, suboptions, subargs = parser.parse_subcommand(subargs)
subcommand.func(lib, suboptions, subargs)
plugins.send("cli_exit", lib=lib)
if not test_lib:
# Clean up the library unless it came from the test harness.
lib._close()
def main(args=None):
"""Run the main command-line interface for beets. Includes top-level
exception handlers that print friendly error messages.
"""
if "AppData\\Local\\Microsoft\\WindowsApps" in sys.exec_prefix:
log.error(
"error: beets is unable to use the Microsoft Store version of "
"Python. Please install Python from https://python.org.\n"
"error: More details can be found here "
"https://beets.readthedocs.io/en/stable/guides/main.html"
)
sys.exit(1)
try:
_raw_main(args)
except UserError as exc:
message = exc.args[0] if exc.args else None
log.error("error: {}", message)
sys.exit(1)
except util.HumanReadableError as exc:
exc.log(log)
sys.exit(1)
except library.FileOperationError as exc:
# These errors have reasonable human-readable descriptions, but
# we still want to log their tracebacks for debugging.
log.debug("{}", traceback.format_exc())
log.error("{}", exc)
sys.exit(1)
except confuse.ConfigError as exc:
log.error("configuration error: {}", exc)
sys.exit(1)
except db_query.InvalidQueryError as exc:
log.error("invalid query: {}", exc)
sys.exit(1)
except OSError as exc:
if exc.errno == errno.EPIPE:
# "Broken pipe". End silently.
sys.stderr.close()
else:
raise
except KeyboardInterrupt:
# Silently ignore ^C except in verbose mode.
log.debug("{}", traceback.format_exc())
except db.DBAccessError as exc:
log.error(
"database access error: {}\n"
"the library file might have a permissions problem",
exc,
)
sys.exit(1)
beetbox-beets-c1877b7/beets/ui/commands.py 0000775 0000000 0000000 00000233105 15073551743 0020521 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module provides the default commands for beets' command-line
interface.
"""
import os
import re
import textwrap
from collections import Counter
from collections.abc import Sequence
from functools import cached_property
from itertools import chain
from platform import python_version
from typing import Any, NamedTuple
import beets
from beets import autotag, config, importer, library, logging, plugins, ui, util
from beets.autotag import Recommendation, hooks
from beets.ui import (
input_,
print_,
print_column_layout,
print_newline_layout,
show_path_changes,
)
from beets.util import (
MoveOperation,
ancestry,
displayable_path,
functemplate,
normpath,
syspath,
)
from beets.util.units import human_bytes, human_seconds, human_seconds_short
from . import _store_dict
VARIOUS_ARTISTS = "Various Artists"
# Global logger.
log = logging.getLogger("beets")
# The list of default subcommands. This is populated with Subcommand
# objects that can be fed to a SubcommandsOptionParser.
default_commands = []
# Utilities.
def _do_query(lib, query, album, also_items=True):
"""For commands that operate on matched items, performs a query
and returns a list of matching items and a list of matching
albums. (The latter is only nonempty when album is True.) Raises
a UserError if no items match. also_items controls whether, when
fetching albums, the associated items should be fetched also.
"""
if album:
albums = list(lib.albums(query))
items = []
if also_items:
for al in albums:
items += al.items()
else:
albums = []
items = list(lib.items(query))
if album and not albums:
raise ui.UserError("No matching albums found.")
elif not album and not items:
raise ui.UserError("No matching items found.")
return items, albums
def _paths_from_logfile(path):
"""Parse the logfile and yield skipped paths to pass to the `import`
command.
"""
with open(path, encoding="utf-8") as fp:
for i, line in enumerate(fp, start=1):
verb, sep, paths = line.rstrip("\n").partition(" ")
if not sep:
raise ValueError(f"line {i} is invalid")
# Ignore informational lines that don't need to be re-imported.
if verb in {"import", "duplicate-keep", "duplicate-replace"}:
continue
if verb not in {"asis", "skip", "duplicate-skip"}:
raise ValueError(f"line {i} contains unknown verb {verb}")
yield os.path.commonpath(paths.split("; "))
def _parse_logfiles(logfiles):
"""Parse all `logfiles` and yield paths from it."""
for logfile in logfiles:
try:
yield from _paths_from_logfile(syspath(normpath(logfile)))
except ValueError as err:
raise ui.UserError(
f"malformed logfile {util.displayable_path(logfile)}: {err}"
) from err
except OSError as err:
raise ui.UserError(
f"unreadable logfile {util.displayable_path(logfile)}: {err}"
) from err
# fields: Shows a list of available fields for queries and format strings.
def _print_keys(query):
"""Given a SQLite query result, print the `key` field of each
returned row, with indentation of 2 spaces.
"""
for row in query:
print_(f" {row['key']}")
def fields_func(lib, opts, args):
def _print_rows(names):
names.sort()
print_(textwrap.indent("\n".join(names), " "))
print_("Item fields:")
_print_rows(library.Item.all_keys())
print_("Album fields:")
_print_rows(library.Album.all_keys())
with lib.transaction() as tx:
# The SQL uses the DISTINCT to get unique values from the query
unique_fields = "SELECT DISTINCT key FROM ({})"
print_("Item flexible attributes:")
_print_keys(tx.query(unique_fields.format(library.Item._flex_table)))
print_("Album flexible attributes:")
_print_keys(tx.query(unique_fields.format(library.Album._flex_table)))
fields_cmd = ui.Subcommand(
"fields", help="show fields available for queries and format strings"
)
fields_cmd.func = fields_func
default_commands.append(fields_cmd)
# help: Print help text for commands
class HelpCommand(ui.Subcommand):
def __init__(self):
super().__init__(
"help",
aliases=("?",),
help="give detailed help on a specific sub-command",
)
def func(self, lib, opts, args):
if args:
cmdname = args[0]
helpcommand = self.root_parser._subcommand_for_name(cmdname)
if not helpcommand:
raise ui.UserError(f"unknown command '{cmdname}'")
helpcommand.print_help()
else:
self.root_parser.print_help()
default_commands.append(HelpCommand())
# import: Autotagger and importer.
# Importer utilities and support.
def disambig_string(info):
"""Generate a string for an AlbumInfo or TrackInfo object that
provides context that helps disambiguate similar-looking albums and
tracks.
"""
if isinstance(info, hooks.AlbumInfo):
disambig = get_album_disambig_fields(info)
elif isinstance(info, hooks.TrackInfo):
disambig = get_singleton_disambig_fields(info)
else:
return ""
return ", ".join(disambig)
def get_singleton_disambig_fields(info: hooks.TrackInfo) -> Sequence[str]:
out = []
chosen_fields = config["match"]["singleton_disambig_fields"].as_str_seq()
calculated_values = {
"index": f"Index {info.index}",
"track_alt": f"Track {info.track_alt}",
"album": (
f"[{info.album}]"
if (
config["import"]["singleton_album_disambig"].get()
and info.get("album")
)
else ""
),
}
for field in chosen_fields:
if field in calculated_values:
out.append(str(calculated_values[field]))
else:
try:
out.append(str(info[field]))
except (AttributeError, KeyError):
print(f"Disambiguation string key {field} does not exist.")
return out
def get_album_disambig_fields(info: hooks.AlbumInfo) -> Sequence[str]:
out = []
chosen_fields = config["match"]["album_disambig_fields"].as_str_seq()
calculated_values = {
"media": (
f"{info.mediums}x{info.media}"
if (info.mediums and info.mediums > 1)
else info.media
),
}
for field in chosen_fields:
if field in calculated_values:
out.append(str(calculated_values[field]))
else:
try:
out.append(str(info[field]))
except (AttributeError, KeyError):
print(f"Disambiguation string key {field} does not exist.")
return out
def dist_colorize(string, dist):
"""Formats a string as a colorized similarity string according to
a distance.
"""
if dist <= config["match"]["strong_rec_thresh"].as_number():
string = ui.colorize("text_success", string)
elif dist <= config["match"]["medium_rec_thresh"].as_number():
string = ui.colorize("text_warning", string)
else:
string = ui.colorize("text_error", string)
return string
def dist_string(dist):
"""Formats a distance (a float) as a colorized similarity percentage
string.
"""
string = f"{(1 - dist) * 100:.1f}%"
return dist_colorize(string, dist)
def penalty_string(distance, limit=None):
"""Returns a colorized string that indicates all the penalties
applied to a distance object.
"""
penalties = []
for key in distance.keys():
key = key.replace("album_", "")
key = key.replace("track_", "")
key = key.replace("_", " ")
penalties.append(key)
if penalties:
if limit and len(penalties) > limit:
penalties = penalties[:limit] + ["..."]
# Prefix penalty string with U+2260: Not Equal To
penalty_string = f"\u2260 {', '.join(penalties)}"
return ui.colorize("changed", penalty_string)
class ChangeRepresentation:
"""Keeps track of all information needed to generate a (colored) text
representation of the changes that will be made if an album or singleton's
tags are changed according to `match`, which must be an AlbumMatch or
TrackMatch object, accordingly.
"""
@cached_property
def changed_prefix(self) -> str:
return ui.colorize("changed", "\u2260")
cur_artist = None
# cur_album set if album, cur_title set if singleton
cur_album = None
cur_title = None
match = None
indent_header = ""
indent_detail = ""
def __init__(self):
# Read match header indentation width from config.
match_header_indent_width = config["ui"]["import"]["indentation"][
"match_header"
].as_number()
self.indent_header = ui.indent(match_header_indent_width)
# Read match detail indentation width from config.
match_detail_indent_width = config["ui"]["import"]["indentation"][
"match_details"
].as_number()
self.indent_detail = ui.indent(match_detail_indent_width)
# Read match tracklist indentation width from config
match_tracklist_indent_width = config["ui"]["import"]["indentation"][
"match_tracklist"
].as_number()
self.indent_tracklist = ui.indent(match_tracklist_indent_width)
self.layout = config["ui"]["import"]["layout"].as_choice(
{
"column": 0,
"newline": 1,
}
)
def print_layout(
self, indent, left, right, separator=" -> ", max_width=None
):
if not max_width:
# If no max_width provided, use terminal width
max_width = ui.term_width()
if self.layout == 0:
print_column_layout(indent, left, right, separator, max_width)
else:
print_newline_layout(indent, left, right, separator, max_width)
def show_match_header(self):
"""Print out a 'header' identifying the suggested match (album name,
artist name,...) and summarizing the changes that would be made should
the user accept the match.
"""
# Print newline at beginning of change block.
print_("")
# 'Match' line and similarity.
print_(
f"{self.indent_header}Match ({dist_string(self.match.distance)}):"
)
if isinstance(self.match.info, autotag.hooks.AlbumInfo):
# Matching an album - print that
artist_album_str = (
f"{self.match.info.artist} - {self.match.info.album}"
)
else:
# Matching a single track
artist_album_str = (
f"{self.match.info.artist} - {self.match.info.title}"
)
print_(
self.indent_header
+ dist_colorize(artist_album_str, self.match.distance)
)
# Penalties.
penalties = penalty_string(self.match.distance)
if penalties:
print_(f"{self.indent_header}{penalties}")
# Disambiguation.
disambig = disambig_string(self.match.info)
if disambig:
print_(f"{self.indent_header}{disambig}")
# Data URL.
if self.match.info.data_url:
url = ui.colorize("text_faint", f"{self.match.info.data_url}")
print_(f"{self.indent_header}{url}")
def show_match_details(self):
"""Print out the details of the match, including changes in album name
and artist name.
"""
# Artist.
artist_l, artist_r = self.cur_artist or "", self.match.info.artist
if artist_r == VARIOUS_ARTISTS:
# Hide artists for VA releases.
artist_l, artist_r = "", ""
if artist_l != artist_r:
artist_l, artist_r = ui.colordiff(artist_l, artist_r)
left = {
"prefix": f"{self.changed_prefix} Artist: ",
"contents": artist_l,
"suffix": "",
}
right = {"prefix": "", "contents": artist_r, "suffix": ""}
self.print_layout(self.indent_detail, left, right)
else:
print_(f"{self.indent_detail}*", "Artist:", artist_r)
if self.cur_album:
# Album
album_l, album_r = self.cur_album or "", self.match.info.album
if (
self.cur_album != self.match.info.album
and self.match.info.album != VARIOUS_ARTISTS
):
album_l, album_r = ui.colordiff(album_l, album_r)
left = {
"prefix": f"{self.changed_prefix} Album: ",
"contents": album_l,
"suffix": "",
}
right = {"prefix": "", "contents": album_r, "suffix": ""}
self.print_layout(self.indent_detail, left, right)
else:
print_(f"{self.indent_detail}*", "Album:", album_r)
elif self.cur_title:
# Title - for singletons
title_l, title_r = self.cur_title or "", self.match.info.title
if self.cur_title != self.match.info.title:
title_l, title_r = ui.colordiff(title_l, title_r)
left = {
"prefix": f"{self.changed_prefix} Title: ",
"contents": title_l,
"suffix": "",
}
right = {"prefix": "", "contents": title_r, "suffix": ""}
self.print_layout(self.indent_detail, left, right)
else:
print_(f"{self.indent_detail}*", "Title:", title_r)
def make_medium_info_line(self, track_info):
"""Construct a line with the current medium's info."""
track_media = track_info.get("media", "Media")
# Build output string.
if self.match.info.mediums > 1 and track_info.disctitle:
return (
f"* {track_media} {track_info.medium}: {track_info.disctitle}"
)
elif self.match.info.mediums > 1:
return f"* {track_media} {track_info.medium}"
elif track_info.disctitle:
return f"* {track_media}: {track_info.disctitle}"
else:
return ""
def format_index(self, track_info):
"""Return a string representing the track index of the given
TrackInfo or Item object.
"""
if isinstance(track_info, hooks.TrackInfo):
index = track_info.index
medium_index = track_info.medium_index
medium = track_info.medium
mediums = self.match.info.mediums
else:
index = medium_index = track_info.track
medium = track_info.disc
mediums = track_info.disctotal
if config["per_disc_numbering"]:
if mediums and mediums > 1:
return f"{medium}-{medium_index}"
else:
return str(medium_index if medium_index is not None else index)
else:
return str(index)
def make_track_numbers(self, item, track_info):
"""Format colored track indices."""
cur_track = self.format_index(item)
new_track = self.format_index(track_info)
changed = False
# Choose color based on change.
if cur_track != new_track:
changed = True
if item.track in (track_info.index, track_info.medium_index):
highlight_color = "text_highlight_minor"
else:
highlight_color = "text_highlight"
else:
highlight_color = "text_faint"
lhs_track = ui.colorize(highlight_color, f"(#{cur_track})")
rhs_track = ui.colorize(highlight_color, f"(#{new_track})")
return lhs_track, rhs_track, changed
@staticmethod
def make_track_titles(item, track_info):
"""Format colored track titles."""
new_title = track_info.title
if not item.title.strip():
# If there's no title, we use the filename. Don't colordiff.
cur_title = displayable_path(os.path.basename(item.path))
return cur_title, new_title, True
else:
# If there is a title, highlight differences.
cur_title = item.title.strip()
cur_col, new_col = ui.colordiff(cur_title, new_title)
return cur_col, new_col, cur_title != new_title
@staticmethod
def make_track_lengths(item, track_info):
"""Format colored track lengths."""
changed = False
if (
item.length
and track_info.length
and abs(item.length - track_info.length)
>= config["ui"]["length_diff_thresh"].as_number()
):
highlight_color = "text_highlight"
changed = True
else:
highlight_color = "text_highlight_minor"
# Handle nonetype lengths by setting to 0
cur_length0 = item.length if item.length else 0
new_length0 = track_info.length if track_info.length else 0
# format into string
cur_length = f"({human_seconds_short(cur_length0)})"
new_length = f"({human_seconds_short(new_length0)})"
# colorize
lhs_length = ui.colorize(highlight_color, cur_length)
rhs_length = ui.colorize(highlight_color, new_length)
return lhs_length, rhs_length, changed
def make_line(self, item, track_info):
"""Extract changes from item -> new TrackInfo object, and colorize
appropriately. Returns (lhs, rhs) for column printing.
"""
# Track titles.
lhs_title, rhs_title, diff_title = self.make_track_titles(
item, track_info
)
# Track number change.
lhs_track, rhs_track, diff_track = self.make_track_numbers(
item, track_info
)
# Length change.
lhs_length, rhs_length, diff_length = self.make_track_lengths(
item, track_info
)
changed = diff_title or diff_track or diff_length
# Construct lhs and rhs dicts.
# Previously, we printed the penalties, however this is no longer
# the case, thus the 'info' dictionary is unneeded.
# penalties = penalty_string(self.match.distance.tracks[track_info])
lhs = {
"prefix": f"{self.changed_prefix if changed else '*'} {lhs_track} ",
"contents": lhs_title,
"suffix": f" {lhs_length}",
}
rhs = {"prefix": "", "contents": "", "suffix": ""}
if not changed:
# Only return the left side, as nothing changed.
return (lhs, rhs)
else:
# Construct a dictionary for the "changed to" side
rhs = {
"prefix": f"{rhs_track} ",
"contents": rhs_title,
"suffix": f" {rhs_length}",
}
return (lhs, rhs)
def print_tracklist(self, lines):
"""Calculates column widths for tracks stored as line tuples:
(left, right). Then prints each line of tracklist.
"""
if len(lines) == 0:
# If no lines provided, e.g. details not required, do nothing.
return
def get_width(side):
"""Return the width of left or right in uncolorized characters."""
try:
return len(
ui.uncolorize(
" ".join(
[side["prefix"], side["contents"], side["suffix"]]
)
)
)
except KeyError:
# An empty dictionary -> Nothing to report
return 0
# Check how to fit content into terminal window
indent_width = len(self.indent_tracklist)
terminal_width = ui.term_width()
joiner_width = len("".join(["* ", " -> "]))
col_width = (terminal_width - indent_width - joiner_width) // 2
max_width_l = max(get_width(line_tuple[0]) for line_tuple in lines)
max_width_r = max(get_width(line_tuple[1]) for line_tuple in lines)
if (
(max_width_l <= col_width)
and (max_width_r <= col_width)
or (
((max_width_l > col_width) or (max_width_r > col_width))
and ((max_width_l + max_width_r) <= col_width * 2)
)
):
# All content fits. Either both maximum widths are below column
# widths, or one of the columns is larger than allowed but the
# other is smaller than allowed.
# In this case we can afford to shrink the columns to fit their
# largest string
col_width_l = max_width_l
col_width_r = max_width_r
else:
# Not all content fits - stick with original half/half split
col_width_l = col_width
col_width_r = col_width
# Print out each line, using the calculated width from above.
for left, right in lines:
left["width"] = col_width_l
right["width"] = col_width_r
self.print_layout(self.indent_tracklist, left, right)
class AlbumChange(ChangeRepresentation):
"""Album change representation, setting cur_album"""
def __init__(self, cur_artist, cur_album, match):
super().__init__()
self.cur_artist = cur_artist
self.cur_album = cur_album
self.match = match
def show_match_tracks(self):
"""Print out the tracks of the match, summarizing changes the match
suggests for them.
"""
# Tracks.
# match is an AlbumMatch NamedTuple, mapping is a dict
# Sort the pairs by the track_info index (at index 1 of the NamedTuple)
pairs = list(self.match.mapping.items())
pairs.sort(key=lambda item_and_track_info: item_and_track_info[1].index)
# Build up LHS and RHS for track difference display. The `lines` list
# contains `(left, right)` tuples.
lines = []
medium = disctitle = None
for item, track_info in pairs:
# If the track is the first on a new medium, show medium
# number and title.
if medium != track_info.medium or disctitle != track_info.disctitle:
# Create header for new medium
header = self.make_medium_info_line(track_info)
if header != "":
# Print tracks from previous medium
self.print_tracklist(lines)
lines = []
print_(f"{self.indent_detail}{header}")
# Save new medium details for future comparison.
medium, disctitle = track_info.medium, track_info.disctitle
# Construct the line tuple for the track.
left, right = self.make_line(item, track_info)
if right["contents"] != "":
lines.append((left, right))
else:
if config["import"]["detail"]:
lines.append((left, right))
self.print_tracklist(lines)
# Missing and unmatched tracks.
if self.match.extra_tracks:
print_(
"Missing tracks"
f" ({len(self.match.extra_tracks)}/{len(self.match.info.tracks)} -"
f" {len(self.match.extra_tracks) / len(self.match.info.tracks):.1%}):"
)
for track_info in self.match.extra_tracks:
line = f" ! {track_info.title} (#{self.format_index(track_info)})"
if track_info.length:
line += f" ({human_seconds_short(track_info.length)})"
print_(ui.colorize("text_warning", line))
if self.match.extra_items:
print_(f"Unmatched tracks ({len(self.match.extra_items)}):")
for item in self.match.extra_items:
line = f" ! {item.title} (#{self.format_index(item)})"
if item.length:
line += f" ({human_seconds_short(item.length)})"
print_(ui.colorize("text_warning", line))
class TrackChange(ChangeRepresentation):
"""Track change representation, comparing item with match."""
def __init__(self, cur_artist, cur_title, match):
super().__init__()
self.cur_artist = cur_artist
self.cur_title = cur_title
self.match = match
def show_change(cur_artist, cur_album, match):
"""Print out a representation of the changes that will be made if an
album's tags are changed according to `match`, which must be an AlbumMatch
object.
"""
change = AlbumChange(
cur_artist=cur_artist, cur_album=cur_album, match=match
)
# Print the match header.
change.show_match_header()
# Print the match details.
change.show_match_details()
# Print the match tracks.
change.show_match_tracks()
def show_item_change(item, match):
"""Print out the change that would occur by tagging `item` with the
metadata from `match`, a TrackMatch object.
"""
change = TrackChange(
cur_artist=item.artist, cur_title=item.title, match=match
)
# Print the match header.
change.show_match_header()
# Print the match details.
change.show_match_details()
def summarize_items(items, singleton):
"""Produces a brief summary line describing a set of items. Used for
manually resolving duplicates during import.
`items` is a list of `Item` objects. `singleton` indicates whether
this is an album or single-item import (if the latter, them `items`
should only have one element).
"""
summary_parts = []
if not singleton:
summary_parts.append(f"{len(items)} items")
format_counts = {}
for item in items:
format_counts[item.format] = format_counts.get(item.format, 0) + 1
if len(format_counts) == 1:
# A single format.
summary_parts.append(items[0].format)
else:
# Enumerate all the formats by decreasing frequencies:
for fmt, count in sorted(
format_counts.items(),
key=lambda fmt_and_count: (-fmt_and_count[1], fmt_and_count[0]),
):
summary_parts.append(f"{fmt} {count}")
if items:
average_bitrate = sum([item.bitrate for item in items]) / len(items)
total_duration = sum([item.length for item in items])
total_filesize = sum([item.filesize for item in items])
summary_parts.append(f"{int(average_bitrate / 1000)}kbps")
if items[0].format == "FLAC":
sample_bits = (
f"{round(int(items[0].samplerate) / 1000, 1)}kHz"
f"/{items[0].bitdepth} bit"
)
summary_parts.append(sample_bits)
summary_parts.append(human_seconds_short(total_duration))
summary_parts.append(human_bytes(total_filesize))
return ", ".join(summary_parts)
def _summary_judgment(rec):
"""Determines whether a decision should be made without even asking
the user. This occurs in quiet mode and when an action is chosen for
NONE recommendations. Return None if the user should be queried.
Otherwise, returns an action. May also print to the console if a
summary judgment is made.
"""
if config["import"]["quiet"]:
if rec == Recommendation.strong:
return importer.Action.APPLY
else:
action = config["import"]["quiet_fallback"].as_choice(
{
"skip": importer.Action.SKIP,
"asis": importer.Action.ASIS,
}
)
elif config["import"]["timid"]:
return None
elif rec == Recommendation.none:
action = config["import"]["none_rec_action"].as_choice(
{
"skip": importer.Action.SKIP,
"asis": importer.Action.ASIS,
"ask": None,
}
)
else:
return None
if action == importer.Action.SKIP:
print_("Skipping.")
elif action == importer.Action.ASIS:
print_("Importing as-is.")
return action
class PromptChoice(NamedTuple):
short: str
long: str
callback: Any
def choose_candidate(
candidates,
singleton,
rec,
cur_artist=None,
cur_album=None,
item=None,
itemcount=None,
choices=[],
):
"""Given a sorted list of candidates, ask the user for a selection
of which candidate to use. Applies to both full albums and
singletons (tracks). Candidates are either AlbumMatch or TrackMatch
objects depending on `singleton`. for albums, `cur_artist`,
`cur_album`, and `itemcount` must be provided. For singletons,
`item` must be provided.
`choices` is a list of `PromptChoice`s to be used in each prompt.
Returns one of the following:
* the result of the choice, which may be SKIP or ASIS
* a candidate (an AlbumMatch/TrackMatch object)
* a chosen `PromptChoice` from `choices`
"""
# Sanity check.
if singleton:
assert item is not None
else:
assert cur_artist is not None
assert cur_album is not None
# Build helper variables for the prompt choices.
choice_opts = tuple(c.long for c in choices)
choice_actions = {c.short: c for c in choices}
# Zero candidates.
if not candidates:
if singleton:
print_("No matching recordings found.")
else:
print_(f"No matching release found for {itemcount} tracks.")
print_(
"For help, see: "
"https://beets.readthedocs.org/en/latest/faq.html#nomatch"
)
sel = ui.input_options(choice_opts)
if sel in choice_actions:
return choice_actions[sel]
else:
assert False
# Is the change good enough?
bypass_candidates = False
if rec != Recommendation.none:
match = candidates[0]
bypass_candidates = True
while True:
# Display and choose from candidates.
require = rec <= Recommendation.low
if not bypass_candidates:
# Display list of candidates.
print_("")
print_(
f"Finding tags for {'track' if singleton else 'album'} "
f'"{item.artist if singleton else cur_artist} -'
f' {item.title if singleton else cur_album}".'
)
print_(" Candidates:")
for i, match in enumerate(candidates):
# Index, metadata, and distance.
index0 = f"{i + 1}."
index = dist_colorize(index0, match.distance)
dist = f"({(1 - match.distance) * 100:.1f}%)"
distance = dist_colorize(dist, match.distance)
metadata = (
f"{match.info.artist} -"
f" {match.info.title if singleton else match.info.album}"
)
if i == 0:
metadata = dist_colorize(metadata, match.distance)
else:
metadata = ui.colorize("text_highlight_minor", metadata)
line1 = [index, distance, metadata]
print_(f" {' '.join(line1)}")
# Penalties.
penalties = penalty_string(match.distance, 3)
if penalties:
print_(f"{' ' * 13}{penalties}")
# Disambiguation
disambig = disambig_string(match.info)
if disambig:
print_(f"{' ' * 13}{disambig}")
# Ask the user for a choice.
sel = ui.input_options(choice_opts, numrange=(1, len(candidates)))
if sel == "m":
pass
elif sel in choice_actions:
return choice_actions[sel]
else: # Numerical selection.
match = candidates[sel - 1]
if sel != 1:
# When choosing anything but the first match,
# disable the default action.
require = True
bypass_candidates = False
# Show what we're about to do.
if singleton:
show_item_change(item, match)
else:
show_change(cur_artist, cur_album, match)
# Exact match => tag automatically if we're not in timid mode.
if rec == Recommendation.strong and not config["import"]["timid"]:
return match
# Ask for confirmation.
default = config["import"]["default_action"].as_choice(
{
"apply": "a",
"skip": "s",
"asis": "u",
"none": None,
}
)
if default is None:
require = True
# Bell ring when user interaction is needed.
if config["import"]["bell"]:
ui.print_("\a", end="")
sel = ui.input_options(
("Apply", "More candidates") + choice_opts,
require=require,
default=default,
)
if sel == "a":
return match
elif sel in choice_actions:
return choice_actions[sel]
def manual_search(session, task):
"""Get a new `Proposal` using manual search criteria.
Input either an artist and album (for full albums) or artist and
track name (for singletons) for manual search.
"""
artist = input_("Artist:").strip()
name = input_("Album:" if task.is_album else "Track:").strip()
if task.is_album:
_, _, prop = autotag.tag_album(task.items, artist, name)
return prop
else:
return autotag.tag_item(task.item, artist, name)
def manual_id(session, task):
"""Get a new `Proposal` using a manually-entered ID.
Input an ID, either for an album ("release") or a track ("recording").
"""
prompt = f"Enter {'release' if task.is_album else 'recording'} ID:"
search_id = input_(prompt).strip()
if task.is_album:
_, _, prop = autotag.tag_album(task.items, search_ids=search_id.split())
return prop
else:
return autotag.tag_item(task.item, search_ids=search_id.split())
def abort_action(session, task):
"""A prompt choice callback that aborts the importer."""
raise importer.ImportAbortError()
class TerminalImportSession(importer.ImportSession):
"""An import session that runs in a terminal."""
def choose_match(self, task):
"""Given an initial autotagging of items, go through an interactive
dance with the user to ask for a choice of metadata. Returns an
AlbumMatch object, ASIS, or SKIP.
"""
# Show what we're tagging.
print_()
path_str0 = displayable_path(task.paths, "\n")
path_str = ui.colorize("import_path", path_str0)
items_str0 = f"({len(task.items)} items)"
items_str = ui.colorize("import_path_items", items_str0)
print_(" ".join([path_str, items_str]))
# Let plugins display info or prompt the user before we go through the
# process of selecting candidate.
results = plugins.send(
"import_task_before_choice", session=self, task=task
)
actions = [action for action in results if action]
if len(actions) == 1:
return actions[0]
elif len(actions) > 1:
raise plugins.PluginConflictError(
"Only one handler for `import_task_before_choice` may return "
"an action."
)
# Take immediate action if appropriate.
action = _summary_judgment(task.rec)
if action == importer.Action.APPLY:
match = task.candidates[0]
show_change(task.cur_artist, task.cur_album, match)
return match
elif action is not None:
return action
# Loop until we have a choice.
while True:
# Ask for a choice from the user. The result of
# `choose_candidate` may be an `importer.Action`, an
# `AlbumMatch` object for a specific selection, or a
# `PromptChoice`.
choices = self._get_choices(task)
choice = choose_candidate(
task.candidates,
False,
task.rec,
task.cur_artist,
task.cur_album,
itemcount=len(task.items),
choices=choices,
)
# Basic choices that require no more action here.
if choice in (importer.Action.SKIP, importer.Action.ASIS):
# Pass selection to main control flow.
return choice
# Plugin-provided choices. We invoke the associated callback
# function.
elif choice in choices:
post_choice = choice.callback(self, task)
if isinstance(post_choice, importer.Action):
return post_choice
elif isinstance(post_choice, autotag.Proposal):
# Use the new candidates and continue around the loop.
task.candidates = post_choice.candidates
task.rec = post_choice.recommendation
# Otherwise, we have a specific match selection.
else:
# We have a candidate! Finish tagging. Here, choice is an
# AlbumMatch object.
assert isinstance(choice, autotag.AlbumMatch)
return choice
def choose_item(self, task):
"""Ask the user for a choice about tagging a single item. Returns
either an action constant or a TrackMatch object.
"""
print_()
print_(displayable_path(task.item.path))
candidates, rec = task.candidates, task.rec
# Take immediate action if appropriate.
action = _summary_judgment(task.rec)
if action == importer.Action.APPLY:
match = candidates[0]
show_item_change(task.item, match)
return match
elif action is not None:
return action
while True:
# Ask for a choice.
choices = self._get_choices(task)
choice = choose_candidate(
candidates, True, rec, item=task.item, choices=choices
)
if choice in (importer.Action.SKIP, importer.Action.ASIS):
return choice
elif choice in choices:
post_choice = choice.callback(self, task)
if isinstance(post_choice, importer.Action):
return post_choice
elif isinstance(post_choice, autotag.Proposal):
candidates = post_choice.candidates
rec = post_choice.recommendation
else:
# Chose a candidate.
assert isinstance(choice, autotag.TrackMatch)
return choice
def resolve_duplicate(self, task, found_duplicates):
"""Decide what to do when a new album or item seems similar to one
that's already in the library.
"""
log.warning(
"This {} is already in the library!",
("album" if task.is_album else "item"),
)
if config["import"]["quiet"]:
# In quiet mode, don't prompt -- just skip.
log.info("Skipping.")
sel = "s"
else:
# Print some detail about the existing and new items so the
# user can make an informed decision.
for duplicate in found_duplicates:
print_(
"Old: "
+ summarize_items(
(
list(duplicate.items())
if task.is_album
else [duplicate]
),
not task.is_album,
)
)
if config["import"]["duplicate_verbose_prompt"]:
if task.is_album:
for dup in duplicate.items():
print(f" {dup}")
else:
print(f" {duplicate}")
print_(
"New: "
+ summarize_items(
task.imported_items(),
not task.is_album,
)
)
if config["import"]["duplicate_verbose_prompt"]:
for item in task.imported_items():
print(f" {item}")
sel = ui.input_options(
("Skip new", "Keep all", "Remove old", "Merge all")
)
if sel == "s":
# Skip new.
task.set_choice(importer.Action.SKIP)
elif sel == "k":
# Keep both. Do nothing; leave the choice intact.
pass
elif sel == "r":
# Remove old.
task.should_remove_duplicates = True
elif sel == "m":
task.should_merge_duplicates = True
else:
assert False
def should_resume(self, path):
return ui.input_yn(
f"Import of the directory:\n{displayable_path(path)}\n"
"was interrupted. Resume (Y/n)?"
)
def _get_choices(self, task):
"""Get the list of prompt choices that should be presented to the
user. This consists of both built-in choices and ones provided by
plugins.
The `before_choose_candidate` event is sent to the plugins, with
session and task as its parameters. Plugins are responsible for
checking the right conditions and returning a list of `PromptChoice`s,
which is flattened and checked for conflicts.
If two or more choices have the same short letter, a warning is
emitted and all but one choices are discarded, giving preference
to the default importer choices.
Returns a list of `PromptChoice`s.
"""
# Standard, built-in choices.
choices = [
PromptChoice("s", "Skip", lambda s, t: importer.Action.SKIP),
PromptChoice("u", "Use as-is", lambda s, t: importer.Action.ASIS),
]
if task.is_album:
choices += [
PromptChoice(
"t", "as Tracks", lambda s, t: importer.Action.TRACKS
),
PromptChoice(
"g", "Group albums", lambda s, t: importer.Action.ALBUMS
),
]
choices += [
PromptChoice("e", "Enter search", manual_search),
PromptChoice("i", "enter Id", manual_id),
PromptChoice("b", "aBort", abort_action),
]
# Send the before_choose_candidate event and flatten list.
extra_choices = list(
chain(
*plugins.send(
"before_choose_candidate", session=self, task=task
)
)
)
# Add a "dummy" choice for the other baked-in option, for
# duplicate checking.
all_choices = (
[
PromptChoice("a", "Apply", None),
]
+ choices
+ extra_choices
)
# Check for conflicts.
short_letters = [c.short for c in all_choices]
if len(short_letters) != len(set(short_letters)):
# Duplicate short letter has been found.
duplicates = [
i for i, count in Counter(short_letters).items() if count > 1
]
for short in duplicates:
# Keep the first of the choices, removing the rest.
dup_choices = [c for c in all_choices if c.short == short]
for c in dup_choices[1:]:
log.warning(
"Prompt choice '{0.long}' removed due to conflict "
"with '{1[0].long}' (short letter: '{0.short}')",
c,
dup_choices,
)
extra_choices.remove(c)
return choices + extra_choices
# The import command.
def import_files(lib, paths: list[bytes], query):
"""Import the files in the given list of paths or matching the
query.
"""
# Check parameter consistency.
if config["import"]["quiet"] and config["import"]["timid"]:
raise ui.UserError("can't be both quiet and timid")
# Open the log.
if config["import"]["log"].get() is not None:
logpath = syspath(config["import"]["log"].as_filename())
try:
loghandler = logging.FileHandler(logpath, encoding="utf-8")
except OSError:
raise ui.UserError(
"Could not open log file for writing:"
f" {displayable_path(logpath)}"
)
else:
loghandler = None
# Never ask for input in quiet mode.
if config["import"]["resume"].get() == "ask" and config["import"]["quiet"]:
config["import"]["resume"] = False
session = TerminalImportSession(lib, loghandler, paths, query)
session.run()
# Emit event.
plugins.send("import", lib=lib, paths=paths)
def import_func(lib, opts, args: list[str]):
config["import"].set_args(opts)
# Special case: --copy flag suppresses import_move (which would
# otherwise take precedence).
if opts.copy:
config["import"]["move"] = False
if opts.library:
query = args
byte_paths = []
else:
query = None
paths = args
# The paths from the logfiles go into a separate list to allow handling
# errors differently from user-specified paths.
paths_from_logfiles = list(_parse_logfiles(opts.from_logfiles or []))
if not paths and not paths_from_logfiles:
raise ui.UserError("no path specified")
byte_paths = [os.fsencode(p) for p in paths]
paths_from_logfiles = [os.fsencode(p) for p in paths_from_logfiles]
# Check the user-specified directories.
for path in byte_paths:
if not os.path.exists(syspath(normpath(path))):
raise ui.UserError(
f"no such file or directory: {displayable_path(path)}"
)
# Check the directories from the logfiles, but don't throw an error in
# case those paths don't exist. Maybe some of those paths have already
# been imported and moved separately, so logging a warning should
# suffice.
for path in paths_from_logfiles:
if not os.path.exists(syspath(normpath(path))):
log.warning(
"No such file or directory: {}", displayable_path(path)
)
continue
byte_paths.append(path)
# If all paths were read from a logfile, and none of them exist, throw
# an error
if not paths:
raise ui.UserError("none of the paths are importable")
import_files(lib, byte_paths, query)
import_cmd = ui.Subcommand(
"import", help="import new music", aliases=("imp", "im")
)
import_cmd.parser.add_option(
"-c",
"--copy",
action="store_true",
default=None,
help="copy tracks into library directory (default)",
)
import_cmd.parser.add_option(
"-C",
"--nocopy",
action="store_false",
dest="copy",
help="don't copy tracks (opposite of -c)",
)
import_cmd.parser.add_option(
"-m",
"--move",
action="store_true",
dest="move",
help="move tracks into the library (overrides -c)",
)
import_cmd.parser.add_option(
"-w",
"--write",
action="store_true",
default=None,
help="write new metadata to files' tags (default)",
)
import_cmd.parser.add_option(
"-W",
"--nowrite",
action="store_false",
dest="write",
help="don't write metadata (opposite of -w)",
)
import_cmd.parser.add_option(
"-a",
"--autotag",
action="store_true",
dest="autotag",
help="infer tags for imported files (default)",
)
import_cmd.parser.add_option(
"-A",
"--noautotag",
action="store_false",
dest="autotag",
help="don't infer tags for imported files (opposite of -a)",
)
import_cmd.parser.add_option(
"-p",
"--resume",
action="store_true",
default=None,
help="resume importing if interrupted",
)
import_cmd.parser.add_option(
"-P",
"--noresume",
action="store_false",
dest="resume",
help="do not try to resume importing",
)
import_cmd.parser.add_option(
"-q",
"--quiet",
action="store_true",
dest="quiet",
help="never prompt for input: skip albums instead",
)
import_cmd.parser.add_option(
"--quiet-fallback",
type="string",
dest="quiet_fallback",
help="decision in quiet mode when no strong match: skip or asis",
)
import_cmd.parser.add_option(
"-l",
"--log",
dest="log",
help="file to log untaggable albums for later review",
)
import_cmd.parser.add_option(
"-s",
"--singletons",
action="store_true",
help="import individual tracks instead of full albums",
)
import_cmd.parser.add_option(
"-t",
"--timid",
dest="timid",
action="store_true",
help="always confirm all actions",
)
import_cmd.parser.add_option(
"-L",
"--library",
dest="library",
action="store_true",
help="retag items matching a query",
)
import_cmd.parser.add_option(
"-i",
"--incremental",
dest="incremental",
action="store_true",
help="skip already-imported directories",
)
import_cmd.parser.add_option(
"-I",
"--noincremental",
dest="incremental",
action="store_false",
help="do not skip already-imported directories",
)
import_cmd.parser.add_option(
"-R",
"--incremental-skip-later",
action="store_true",
dest="incremental_skip_later",
help="do not record skipped files during incremental import",
)
import_cmd.parser.add_option(
"-r",
"--noincremental-skip-later",
action="store_false",
dest="incremental_skip_later",
help="record skipped files during incremental import",
)
import_cmd.parser.add_option(
"--from-scratch",
dest="from_scratch",
action="store_true",
help="erase existing metadata before applying new metadata",
)
import_cmd.parser.add_option(
"--flat",
dest="flat",
action="store_true",
help="import an entire tree as a single album",
)
import_cmd.parser.add_option(
"-g",
"--group-albums",
dest="group_albums",
action="store_true",
help="group tracks in a folder into separate albums",
)
import_cmd.parser.add_option(
"--pretend",
dest="pretend",
action="store_true",
help="just print the files to import",
)
import_cmd.parser.add_option(
"-S",
"--search-id",
dest="search_ids",
action="append",
metavar="ID",
help="restrict matching to a specific metadata backend ID",
)
import_cmd.parser.add_option(
"--from-logfile",
dest="from_logfiles",
action="append",
metavar="PATH",
help="read skipped paths from an existing logfile",
)
import_cmd.parser.add_option(
"--set",
dest="set_fields",
action="callback",
callback=_store_dict,
metavar="FIELD=VALUE",
help="set the given fields to the supplied values",
)
import_cmd.func = import_func
default_commands.append(import_cmd)
# list: Query and show library contents.
def list_items(lib, query, album, fmt=""):
"""Print out items in lib matching query. If album, then search for
albums instead of single items.
"""
if album:
for album in lib.albums(query):
ui.print_(format(album, fmt))
else:
for item in lib.items(query):
ui.print_(format(item, fmt))
def list_func(lib, opts, args):
list_items(lib, args, opts.album)
list_cmd = ui.Subcommand("list", help="query the library", aliases=("ls",))
list_cmd.parser.usage += "\nExample: %prog -f '$album: $title' artist:beatles"
list_cmd.parser.add_all_common_options()
list_cmd.func = list_func
default_commands.append(list_cmd)
# update: Update library contents according to on-disk tags.
def update_items(lib, query, album, move, pretend, fields, exclude_fields=None):
"""For all the items matched by the query, update the library to
reflect the item's embedded tags.
:param fields: The fields to be stored. If not specified, all fields will
be.
:param exclude_fields: The fields to not be stored. If not specified, all
fields will be.
"""
with lib.transaction():
items, _ = _do_query(lib, query, album)
if move and fields is not None and "path" not in fields:
# Special case: if an item needs to be moved, the path field has to
# updated; otherwise the new path will not be reflected in the
# database.
fields.append("path")
if fields is None:
# no fields were provided, update all media fields
item_fields = fields or library.Item._media_fields
if move and "path" not in item_fields:
# move is enabled, add 'path' to the list of fields to update
item_fields.add("path")
else:
# fields was provided, just update those
item_fields = fields
# get all the album fields to update
album_fields = fields or library.Album._fields.keys()
if exclude_fields:
# remove any excluded fields from the item and album sets
item_fields = [f for f in item_fields if f not in exclude_fields]
album_fields = [f for f in album_fields if f not in exclude_fields]
# Walk through the items and pick up their changes.
affected_albums = set()
for item in items:
# Item deleted?
if not item.path or not os.path.exists(syspath(item.path)):
ui.print_(format(item))
ui.print_(ui.colorize("text_error", " deleted"))
if not pretend:
item.remove(True)
affected_albums.add(item.album_id)
continue
# Did the item change since last checked?
if item.current_mtime() <= item.mtime:
log.debug(
"skipping {0.filepath} because mtime is up to date ({0.mtime})",
item,
)
continue
# Read new data.
try:
item.read()
except library.ReadError as exc:
log.error("error reading {.filepath}: {}", item, exc)
continue
# Special-case album artist when it matches track artist. (Hacky
# but necessary for preserving album-level metadata for non-
# autotagged imports.)
if not item.albumartist:
old_item = lib.get_item(item.id)
if old_item.albumartist == old_item.artist == item.artist:
item.albumartist = old_item.albumartist
item._dirty.discard("albumartist")
# Check for and display changes.
changed = ui.show_model_changes(item, fields=item_fields)
# Save changes.
if not pretend:
if changed:
# Move the item if it's in the library.
if move and lib.directory in ancestry(item.path):
item.move(store=False)
item.store(fields=item_fields)
affected_albums.add(item.album_id)
else:
# The file's mtime was different, but there were no
# changes to the metadata. Store the new mtime,
# which is set in the call to read(), so we don't
# check this again in the future.
item.store(fields=item_fields)
# Skip album changes while pretending.
if pretend:
return
# Modify affected albums to reflect changes in their items.
for album_id in affected_albums:
if album_id is None: # Singletons.
continue
album = lib.get_album(album_id)
if not album: # Empty albums have already been removed.
log.debug("emptied album {}", album_id)
continue
first_item = album.items().get()
# Update album structure to reflect an item in it.
for key in library.Album.item_keys:
album[key] = first_item[key]
album.store(fields=album_fields)
# Move album art (and any inconsistent items).
if move and lib.directory in ancestry(first_item.path):
log.debug("moving album {}", album_id)
# Manually moving and storing the album.
items = list(album.items())
for item in items:
item.move(store=False, with_album=False)
item.store(fields=item_fields)
album.move(store=False)
album.store(fields=album_fields)
def update_func(lib, opts, args):
# Verify that the library folder exists to prevent accidental wipes.
if not os.path.isdir(syspath(lib.directory)):
ui.print_("Library path is unavailable or does not exist.")
ui.print_(lib.directory)
if not ui.input_yn("Are you sure you want to continue (y/n)?", True):
return
update_items(
lib,
args,
opts.album,
ui.should_move(opts.move),
opts.pretend,
opts.fields,
opts.exclude_fields,
)
update_cmd = ui.Subcommand(
"update",
help="update the library",
aliases=(
"upd",
"up",
),
)
update_cmd.parser.add_album_option()
update_cmd.parser.add_format_option()
update_cmd.parser.add_option(
"-m",
"--move",
action="store_true",
dest="move",
help="move files in the library directory",
)
update_cmd.parser.add_option(
"-M",
"--nomove",
action="store_false",
dest="move",
help="don't move files in library",
)
update_cmd.parser.add_option(
"-p",
"--pretend",
action="store_true",
help="show all changes but do nothing",
)
update_cmd.parser.add_option(
"-F",
"--field",
default=None,
action="append",
dest="fields",
help="list of fields to update",
)
update_cmd.parser.add_option(
"-e",
"--exclude-field",
default=None,
action="append",
dest="exclude_fields",
help="list of fields to exclude from updates",
)
update_cmd.func = update_func
default_commands.append(update_cmd)
# remove: Remove items from library, delete files.
def remove_items(lib, query, album, delete, force):
"""Remove items matching query from lib. If album, then match and
remove whole albums. If delete, also remove files from disk.
"""
# Get the matching items.
items, albums = _do_query(lib, query, album)
objs = albums if album else items
# Confirm file removal if not forcing removal.
if not force:
# Prepare confirmation with user.
album_str = (
f" in {len(albums)} album{'s' if len(albums) > 1 else ''}"
if album
else ""
)
if delete:
fmt = "$path - $title"
prompt = "Really DELETE"
prompt_all = (
"Really DELETE"
f" {len(items)} file{'s' if len(items) > 1 else ''}{album_str}"
)
else:
fmt = ""
prompt = "Really remove from the library?"
prompt_all = (
"Really remove"
f" {len(items)} item{'s' if len(items) > 1 else ''}{album_str}"
" from the library?"
)
# Helpers for printing affected items
def fmt_track(t):
ui.print_(format(t, fmt))
def fmt_album(a):
ui.print_()
for i in a.items():
fmt_track(i)
fmt_obj = fmt_album if album else fmt_track
# Show all the items.
for o in objs:
fmt_obj(o)
# Confirm with user.
objs = ui.input_select_objects(
prompt, objs, fmt_obj, prompt_all=prompt_all
)
if not objs:
return
# Remove (and possibly delete) items.
with lib.transaction():
for obj in objs:
obj.remove(delete)
def remove_func(lib, opts, args):
remove_items(lib, args, opts.album, opts.delete, opts.force)
remove_cmd = ui.Subcommand(
"remove", help="remove matching items from the library", aliases=("rm",)
)
remove_cmd.parser.add_option(
"-d", "--delete", action="store_true", help="also remove files from disk"
)
remove_cmd.parser.add_option(
"-f", "--force", action="store_true", help="do not ask when removing items"
)
remove_cmd.parser.add_album_option()
remove_cmd.func = remove_func
default_commands.append(remove_cmd)
# stats: Show library/query statistics.
def show_stats(lib, query, exact):
"""Shows some statistics about the matched items."""
items = lib.items(query)
total_size = 0
total_time = 0.0
total_items = 0
artists = set()
albums = set()
album_artists = set()
for item in items:
if exact:
try:
total_size += os.path.getsize(syspath(item.path))
except OSError as exc:
log.info("could not get size of {.path}: {}", item, exc)
else:
total_size += int(item.length * item.bitrate / 8)
total_time += item.length
total_items += 1
artists.add(item.artist)
album_artists.add(item.albumartist)
if item.album_id:
albums.add(item.album_id)
size_str = human_bytes(total_size)
if exact:
size_str += f" ({total_size} bytes)"
print_(f"""Tracks: {total_items}
Total time: {human_seconds(total_time)}
{f" ({total_time:.2f} seconds)" if exact else ""}
{"Total size" if exact else "Approximate total size"}: {size_str}
Artists: {len(artists)}
Albums: {len(albums)}
Album artists: {len(album_artists)}""")
def stats_func(lib, opts, args):
show_stats(lib, args, opts.exact)
stats_cmd = ui.Subcommand(
"stats", help="show statistics about the library or a query"
)
stats_cmd.parser.add_option(
"-e", "--exact", action="store_true", help="exact size and time"
)
stats_cmd.func = stats_func
default_commands.append(stats_cmd)
# version: Show current beets version.
def show_version(lib, opts, args):
print_(f"beets version {beets.__version__}")
print_(f"Python version {python_version()}")
# Show plugins.
names = sorted(p.name for p in plugins.find_plugins())
if names:
print_("plugins:", ", ".join(names))
else:
print_("no plugins loaded")
version_cmd = ui.Subcommand("version", help="output version information")
version_cmd.func = show_version
default_commands.append(version_cmd)
# modify: Declaratively change metadata.
def modify_items(lib, mods, dels, query, write, move, album, confirm, inherit):
"""Modifies matching items according to user-specified assignments and
deletions.
`mods` is a dictionary of field and value pairse indicating
assignments. `dels` is a list of fields to be deleted.
"""
# Parse key=value specifications into a dictionary.
model_cls = library.Album if album else library.Item
# Get the items to modify.
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
# Apply changes *temporarily*, preview them, and collect modified
# objects.
print_(f"Modifying {len(objs)} {'album' if album else 'item'}s.")
changed = []
templates = {
key: functemplate.template(value) for key, value in mods.items()
}
for obj in objs:
obj_mods = {
key: model_cls._parse(key, obj.evaluate_template(templates[key]))
for key in mods.keys()
}
if print_and_modify(obj, obj_mods, dels) and obj not in changed:
changed.append(obj)
# Still something to do?
if not changed:
print_("No changes to make.")
return
# Confirm action.
if confirm:
if write and move:
extra = ", move and write tags"
elif write:
extra = " and write tags"
elif move:
extra = " and move"
else:
extra = ""
changed = ui.input_select_objects(
f"Really modify{extra}",
changed,
lambda o: print_and_modify(o, mods, dels),
)
# Apply changes to database and files
with lib.transaction():
for obj in changed:
obj.try_sync(write, move, inherit)
def print_and_modify(obj, mods, dels):
"""Print the modifications to an item and return a bool indicating
whether any changes were made.
`mods` is a dictionary of fields and values to update on the object;
`dels` is a sequence of fields to delete.
"""
obj.update(mods)
for field in dels:
try:
del obj[field]
except KeyError:
pass
return ui.show_model_changes(obj)
def modify_parse_args(args):
"""Split the arguments for the modify subcommand into query parts,
assignments (field=value), and deletions (field!). Returns the result as
a three-tuple in that order.
"""
mods = {}
dels = []
query = []
for arg in args:
if arg.endswith("!") and "=" not in arg and ":" not in arg:
dels.append(arg[:-1]) # Strip trailing !.
elif "=" in arg and ":" not in arg.split("=", 1)[0]:
key, val = arg.split("=", 1)
mods[key] = val
else:
query.append(arg)
return query, mods, dels
def modify_func(lib, opts, args):
query, mods, dels = modify_parse_args(args)
if not mods and not dels:
raise ui.UserError("no modifications specified")
modify_items(
lib,
mods,
dels,
query,
ui.should_write(opts.write),
ui.should_move(opts.move),
opts.album,
not opts.yes,
opts.inherit,
)
modify_cmd = ui.Subcommand(
"modify", help="change metadata fields", aliases=("mod",)
)
modify_cmd.parser.add_option(
"-m",
"--move",
action="store_true",
dest="move",
help="move files in the library directory",
)
modify_cmd.parser.add_option(
"-M",
"--nomove",
action="store_false",
dest="move",
help="don't move files in library",
)
modify_cmd.parser.add_option(
"-w",
"--write",
action="store_true",
default=None,
help="write new metadata to files' tags (default)",
)
modify_cmd.parser.add_option(
"-W",
"--nowrite",
action="store_false",
dest="write",
help="don't write metadata (opposite of -w)",
)
modify_cmd.parser.add_album_option()
modify_cmd.parser.add_format_option(target="item")
modify_cmd.parser.add_option(
"-y", "--yes", action="store_true", help="skip confirmation"
)
modify_cmd.parser.add_option(
"-I",
"--noinherit",
action="store_false",
dest="inherit",
default=True,
help="when modifying albums, don't also change item data",
)
modify_cmd.func = modify_func
default_commands.append(modify_cmd)
# move: Move/copy files to the library or a new base directory.
def move_items(
lib,
dest_path: util.PathLike,
query,
copy,
album,
pretend,
confirm=False,
export=False,
):
"""Moves or copies items to a new base directory, given by dest. If
dest is None, then the library's base directory is used, making the
command "consolidate" files.
"""
dest = os.fsencode(dest_path) if dest_path else dest_path
items, albums = _do_query(lib, query, album, False)
objs = albums if album else items
num_objs = len(objs)
# Filter out files that don't need to be moved.
def isitemmoved(item):
return item.path != item.destination(basedir=dest)
def isalbummoved(album):
return any(isitemmoved(i) for i in album.items())
objs = [o for o in objs if (isalbummoved if album else isitemmoved)(o)]
num_unmoved = num_objs - len(objs)
# Report unmoved files that match the query.
unmoved_msg = ""
if num_unmoved > 0:
unmoved_msg = f" ({num_unmoved} already in place)"
copy = copy or export # Exporting always copies.
action = "Copying" if copy else "Moving"
act = "copy" if copy else "move"
entity = "album" if album else "item"
log.info(
"{} {} {}{}{}.",
action,
len(objs),
entity,
"s" if len(objs) != 1 else "",
unmoved_msg,
)
if not objs:
return
if pretend:
if album:
show_path_changes(
[
(item.path, item.destination(basedir=dest))
for obj in objs
for item in obj.items()
]
)
else:
show_path_changes(
[(obj.path, obj.destination(basedir=dest)) for obj in objs]
)
else:
if confirm:
objs = ui.input_select_objects(
f"Really {act}",
objs,
lambda o: show_path_changes(
[(o.path, o.destination(basedir=dest))]
),
)
for obj in objs:
log.debug("moving: {.filepath}", obj)
if export:
# Copy without affecting the database.
obj.move(
operation=MoveOperation.COPY, basedir=dest, store=False
)
else:
# Ordinary move/copy: store the new path.
if copy:
obj.move(operation=MoveOperation.COPY, basedir=dest)
else:
obj.move(operation=MoveOperation.MOVE, basedir=dest)
def move_func(lib, opts, args):
dest = opts.dest
if dest is not None:
dest = normpath(dest)
if not os.path.isdir(syspath(dest)):
raise ui.UserError(f"no such directory: {displayable_path(dest)}")
move_items(
lib,
dest,
args,
opts.copy,
opts.album,
opts.pretend,
opts.timid,
opts.export,
)
move_cmd = ui.Subcommand("move", help="move or copy items", aliases=("mv",))
move_cmd.parser.add_option(
"-d", "--dest", metavar="DIR", dest="dest", help="destination directory"
)
move_cmd.parser.add_option(
"-c",
"--copy",
default=False,
action="store_true",
help="copy instead of moving",
)
move_cmd.parser.add_option(
"-p",
"--pretend",
default=False,
action="store_true",
help="show how files would be moved, but don't touch anything",
)
move_cmd.parser.add_option(
"-t",
"--timid",
dest="timid",
action="store_true",
help="always confirm all actions",
)
move_cmd.parser.add_option(
"-e",
"--export",
default=False,
action="store_true",
help="copy without changing the database path",
)
move_cmd.parser.add_album_option()
move_cmd.func = move_func
default_commands.append(move_cmd)
# write: Write tags into files.
def write_items(lib, query, pretend, force):
"""Write tag information from the database to the respective files
in the filesystem.
"""
items, albums = _do_query(lib, query, False, False)
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
log.info("missing file: {.filepath}", item)
continue
# Get an Item object reflecting the "clean" (on-disk) state.
try:
clean_item = library.Item.from_path(item.path)
except library.ReadError as exc:
log.error("error reading {.filepath}: {}", item, exc)
continue
# Check for and display changes.
changed = ui.show_model_changes(
item, clean_item, library.Item._media_tag_fields, force
)
if (changed or force) and not pretend:
# We use `try_sync` here to keep the mtime up to date in the
# database.
item.try_sync(True, False)
def write_func(lib, opts, args):
write_items(lib, args, opts.pretend, opts.force)
write_cmd = ui.Subcommand("write", help="write tag information to files")
write_cmd.parser.add_option(
"-p",
"--pretend",
action="store_true",
help="show all changes but do nothing",
)
write_cmd.parser.add_option(
"-f",
"--force",
action="store_true",
help="write tags even if the existing tags match the database",
)
write_cmd.func = write_func
default_commands.append(write_cmd)
# config: Show and edit user configuration.
def config_func(lib, opts, args):
# Make sure lazy configuration is loaded
config.resolve()
# Print paths.
if opts.paths:
filenames = []
for source in config.sources:
if not opts.defaults and source.default:
continue
if source.filename:
filenames.append(source.filename)
# In case the user config file does not exist, prepend it to the
# list.
user_path = config.user_config_path()
if user_path not in filenames:
filenames.insert(0, user_path)
for filename in filenames:
print_(displayable_path(filename))
# Open in editor.
elif opts.edit:
config_edit()
# Dump configuration.
else:
config_out = config.dump(full=opts.defaults, redact=opts.redact)
if config_out.strip() != "{}":
print_(config_out)
else:
print("Empty configuration")
def config_edit():
"""Open a program to edit the user configuration.
An empty config file is created if no existing config file exists.
"""
path = config.user_config_path()
editor = util.editor_command()
try:
if not os.path.isfile(path):
open(path, "w+").close()
util.interactive_open([path], editor)
except OSError as exc:
message = f"Could not edit configuration: {exc}"
if not editor:
message += (
". Please set the VISUAL (or EDITOR) environment variable"
)
raise ui.UserError(message)
config_cmd = ui.Subcommand("config", help="show or edit the user configuration")
config_cmd.parser.add_option(
"-p",
"--paths",
action="store_true",
help="show files that configuration was loaded from",
)
config_cmd.parser.add_option(
"-e",
"--edit",
action="store_true",
help="edit user configuration with $VISUAL (or $EDITOR)",
)
config_cmd.parser.add_option(
"-d",
"--defaults",
action="store_true",
help="include the default configuration",
)
config_cmd.parser.add_option(
"-c",
"--clear",
action="store_false",
dest="redact",
default=True,
help="do not redact sensitive fields",
)
config_cmd.func = config_func
default_commands.append(config_cmd)
# completion: print completion script
def print_completion(*args):
for line in completion_script(default_commands + plugins.commands()):
print_(line, end="")
if not any(os.path.isfile(syspath(p)) for p in BASH_COMPLETION_PATHS):
log.warning(
"Warning: Unable to find the bash-completion package. "
"Command line completion might not work."
)
BASH_COMPLETION_PATHS = [
b"/etc/bash_completion",
b"/usr/share/bash-completion/bash_completion",
b"/usr/local/share/bash-completion/bash_completion",
# SmartOS
b"/opt/local/share/bash-completion/bash_completion",
# Homebrew (before bash-completion2)
b"/usr/local/etc/bash_completion",
]
def completion_script(commands):
"""Yield the full completion shell script as strings.
``commands`` is alist of ``ui.Subcommand`` instances to generate
completion data for.
"""
base_script = os.path.join(os.path.dirname(__file__), "completion_base.sh")
with open(base_script) as base_script:
yield base_script.read()
options = {}
aliases = {}
command_names = []
# Collect subcommands
for cmd in commands:
name = cmd.name
command_names.append(name)
for alias in cmd.aliases:
if re.match(r"^\w+$", alias):
aliases[alias] = name
options[name] = {"flags": [], "opts": []}
for opts in cmd.parser._get_all_options()[1:]:
if opts.action in ("store_true", "store_false"):
option_type = "flags"
else:
option_type = "opts"
options[name][option_type].extend(
opts._short_opts + opts._long_opts
)
# Add global options
options["_global"] = {
"flags": ["-v", "--verbose"],
"opts": "-l --library -c --config -d --directory -h --help".split(" "),
}
# Add flags common to all commands
options["_common"] = {"flags": ["-h", "--help"]}
# Start generating the script
yield "_beet() {\n"
# Command names
yield f" local commands={' '.join(command_names)!r}\n"
yield "\n"
# Command aliases
yield f" local aliases={' '.join(aliases.keys())!r}\n"
for alias, cmd in aliases.items():
yield f" local alias__{alias.replace('-', '_')}={cmd}\n"
yield "\n"
# Fields
fields = library.Item._fields.keys() | library.Album._fields.keys()
yield f" fields={' '.join(fields)!r}\n"
# Command options
for cmd, opts in options.items():
for option_type, option_list in opts.items():
if option_list:
option_list = " ".join(option_list)
yield (
" local"
f" {option_type}__{cmd.replace('-', '_')}='{option_list}'\n"
)
yield " _beet_dispatch\n"
yield "}\n"
completion_cmd = ui.Subcommand(
"completion",
help="print shell script that provides command line completion",
)
completion_cmd.func = print_completion
completion_cmd.hide = True
default_commands.append(completion_cmd)
beetbox-beets-c1877b7/beets/ui/completion_base.sh 0000664 0000000 0000000 00000012257 15073551743 0022045 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright (c) 2014, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# Completion for the `beet` command
# =================================
#
# Load this script to complete beets subcommands, options, and
# queries.
#
# If a beets command is found on the command line it completes filenames and
# the subcommand's options. Otherwise it will complete global options and
# subcommands. If the previous option on the command line expects an argument,
# it also completes filenames or directories. Options are only
# completed if '-' has already been typed on the command line.
#
# Note that completion of plugin commands only works for those plugins
# that were enabled when running `beet completion`. It does not check
# plugins dynamically
#
# Currently, only Bash 3.2 and newer is supported and the
# `bash-completion` package (v2.8 or newer) is required.
#
# TODO
# ----
#
# * There are some issues with arguments that are quoted on the command line.
#
# * Complete arguments for the `--format` option by expanding field variables.
#
# beet ls -f "$tit[TAB]
# beet ls -f "$title
#
# * Support long options with `=`, e.g. `--config=file`. Debian's bash
# completion package can handle this.
#
# Note that 'bash-completion' v2.8 is a part of Debian 10, which is part of
# LTS until 2024-06-30. After this date, the minimum version requirement can
# be changed, and newer features can be used unconditionally. See PR#5301.
#
if [[ ${BASH_COMPLETION_VERSINFO[0]} -ne 2 \
|| ${BASH_COMPLETION_VERSINFO[1]} -lt 8 ]]; then
echo "Incompatible version of 'bash-completion'!"
return 1
fi
# The later code relies on 'bash-completion' version 2.12, but older versions
# are still supported. Here, we provide implementations of the newer functions
# in terms of older ones, if 'bash-completion' is too old to have them.
if [[ ${BASH_COMPLETION_VERSINFO[1]} -lt 12 ]]; then
_comp_get_words() {
_get_comp_words_by_ref "$@"
}
_comp_compgen_filedir() {
_filedir "$@"
}
fi
# Determines the beets subcommand and dispatches the completion
# accordingly.
_beet_dispatch() {
local cur prev cmd=
COMPREPLY=()
_comp_get_words -n : cur prev
# Look for the beets subcommand
local arg
for (( i=1; i < COMP_CWORD; i++ )); do
arg="${COMP_WORDS[i]}"
if _list_include_item "${opts___global}" $arg; then
((i++))
elif [[ "$arg" != -* ]]; then
cmd="$arg"
break
fi
done
# Replace command shortcuts
if [[ -n $cmd ]] && _list_include_item "$aliases" "$cmd"; then
eval "cmd=\$alias__${cmd//-/_}"
fi
case $cmd in
help)
COMPREPLY+=( $(compgen -W "$commands" -- $cur) )
;;
list|remove|move|update|write|stats)
_beet_complete_query
;;
"")
_beet_complete_global
;;
*)
_beet_complete
;;
esac
}
# Adds option and file completion to COMPREPLY for the subcommand $cmd
_beet_complete() {
if [[ $cur == -* ]]; then
local opts flags completions
eval "opts=\$opts__${cmd//-/_}"
eval "flags=\$flags__${cmd//-/_}"
completions="${flags___common} ${opts} ${flags}"
COMPREPLY+=( $(compgen -W "$completions" -- $cur) )
else
_comp_compgen_filedir
fi
}
# Add global options and subcommands to the completion
_beet_complete_global() {
case $prev in
-h|--help)
# Complete commands
COMPREPLY+=( $(compgen -W "$commands" -- $cur) )
return
;;
-l|--library|-c|--config)
# Filename completion
_comp_compgen_filedir
return
;;
-d|--directory)
# Directory completion
_comp_compgen_filedir -d
return
;;
esac
if [[ $cur == -* ]]; then
local completions="$opts___global $flags___global"
COMPREPLY+=( $(compgen -W "$completions" -- $cur) )
elif [[ -n $cur ]] && _list_include_item "$aliases" "$cur"; then
local cmd
eval "cmd=\$alias__${cur//-/_}"
COMPREPLY+=( "$cmd" )
else
COMPREPLY+=( $(compgen -W "$commands" -- $cur) )
fi
}
_beet_complete_query() {
local opts
eval "opts=\$opts__${cmd//-/_}"
if [[ $cur == -* ]] || _list_include_item "$opts" "$prev"; then
_beet_complete
elif [[ $cur != \'* && $cur != \"* &&
$cur != *:* ]]; then
# Do not complete quoted queries or those who already have a field
# set.
compopt -o nospace
COMPREPLY+=( $(compgen -S : -W "$fields" -- $cur) )
return 0
fi
}
# Returns true if the space separated list $1 includes $2
_list_include_item() {
[[ " $1 " == *[[:space:]]$2[[:space:]]* ]]
}
# This is where beets dynamically adds the _beet function. This
# function sets the variables $flags, $opts, $commands, and $aliases.
complete -o filenames -F _beet beet
beetbox-beets-c1877b7/beets/util/ 0000775 0000000 0000000 00000000000 15073551743 0016677 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beets/util/__init__.py 0000664 0000000 0000000 00000114446 15073551743 0021022 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Miscellaneous utility functions."""
from __future__ import annotations
import errno
import fnmatch
import os
import platform
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import traceback
import warnings
from collections import Counter
from collections.abc import Sequence
from contextlib import suppress
from enum import Enum
from functools import cache
from importlib import import_module
from multiprocessing.pool import ThreadPool
from pathlib import Path
from re import Pattern
from typing import (
TYPE_CHECKING,
Any,
AnyStr,
Callable,
ClassVar,
Generic,
NamedTuple,
TypeVar,
Union,
)
from unidecode import unidecode
import beets
from beets.util import hidden
if TYPE_CHECKING:
from collections.abc import Iterable, Iterator
from logging import Logger
from beets.library import Item
MAX_FILENAME_LENGTH = 200
WINDOWS_MAGIC_PREFIX = "\\\\?\\"
T = TypeVar("T")
PathLike = Union[str, bytes, Path]
StrPath = Union[str, Path]
Replacements = Sequence[tuple[Pattern[str], str]]
# Here for now to allow for a easy replace later on
# once we can move to a PathLike (mainly used in importer)
PathBytes = bytes
class HumanReadableError(Exception):
"""An Exception that can include a human-readable error message to
be logged without a traceback. Can preserve a traceback for
debugging purposes as well.
Has at least two fields: `reason`, the underlying exception or a
string describing the problem; and `verb`, the action being
performed during the error.
If `tb` is provided, it is a string containing a traceback for the
associated exception. (Note that this is not necessary in Python 3.x
and should be removed when we make the transition.)
"""
error_kind = "Error" # Human-readable description of error type.
def __init__(self, reason, verb, tb=None):
self.reason = reason
self.verb = verb
self.tb = tb
super().__init__(self.get_message())
def _gerund(self):
"""Generate a (likely) gerund form of the English verb."""
if " " in self.verb:
return self.verb
gerund = self.verb[:-1] if self.verb.endswith("e") else self.verb
gerund += "ing"
return gerund
def _reasonstr(self):
"""Get the reason as a string."""
if isinstance(self.reason, str):
return self.reason
elif isinstance(self.reason, bytes):
return self.reason.decode("utf-8", "ignore")
elif hasattr(self.reason, "strerror"): # i.e., EnvironmentError
return self.reason.strerror
else:
return f'"{self.reason}"'
def get_message(self):
"""Create the human-readable description of the error, sans
introduction.
"""
raise NotImplementedError
def log(self, logger):
"""Log to the provided `logger` a human-readable message as an
error and a verbose traceback as a debug message.
"""
if self.tb:
logger.debug(self.tb)
logger.error("{0.error_kind}: {0.args[0]}", self)
class FilesystemError(HumanReadableError):
"""An error that occurred while performing a filesystem manipulation
via a function in this module. The `paths` field is a sequence of
pathnames involved in the operation.
"""
def __init__(self, reason, verb, paths, tb=None):
self.paths = paths
super().__init__(reason, verb, tb)
def get_message(self):
# Use a nicer English phrasing for some specific verbs.
if self.verb in ("move", "copy", "rename"):
clause = (
f"while {self._gerund()} {displayable_path(self.paths[0])} to"
f" {displayable_path(self.paths[1])}"
)
elif self.verb in ("delete", "write", "create", "read"):
clause = f"while {self._gerund()} {displayable_path(self.paths[0])}"
else:
clause = (
f"during {self.verb} of paths"
f" {', '.join(displayable_path(p) for p in self.paths)}"
)
return f"{self._reasonstr()} {clause}"
class MoveOperation(Enum):
"""The file operations that e.g. various move functions can carry out."""
MOVE = 0
COPY = 1
LINK = 2
HARDLINK = 3
REFLINK = 4
REFLINK_AUTO = 5
def normpath(path: PathLike) -> bytes:
"""Provide the canonical form of the path suitable for storing in
the database.
"""
str_path = syspath(path, prefix=False)
str_path = os.path.normpath(os.path.abspath(os.path.expanduser(str_path)))
return bytestring_path(str_path)
def ancestry(path: AnyStr) -> list[AnyStr]:
"""Return a list consisting of path's parent directory, its
grandparent, and so on. For instance:
>>> ancestry(b'/a/b/c')
['/', '/a', '/a/b']
The argument should *not* be the result of a call to `syspath`.
"""
out: list[AnyStr] = []
last_path = None
while path:
path = os.path.dirname(path)
if path == last_path:
break
last_path = path
if path:
# don't yield ''
out.insert(0, path)
return out
def sorted_walk(
path: PathLike,
ignore: Sequence[PathLike] = (),
ignore_hidden: bool = False,
logger: Logger | None = None,
) -> Iterator[tuple[bytes, Sequence[bytes], Sequence[bytes]]]:
"""Like `os.walk`, but yields things in case-insensitive sorted,
breadth-first order. Directory and file names matching any glob
pattern in `ignore` are skipped. If `logger` is provided, then
warning messages are logged there when a directory cannot be listed.
"""
# Make sure the paths aren't Unicode strings.
bytes_path = bytestring_path(path)
ignore_bytes = [ # rename prevents mypy variable shadowing issue
bytestring_path(i) for i in ignore
]
# Get all the directories and files at this level.
try:
contents = os.listdir(syspath(bytes_path))
except OSError:
if logger:
logger.warning(
"could not list directory {}",
displayable_path(bytes_path),
exc_info=True,
)
return
dirs = []
files = []
for str_base in contents:
base = bytestring_path(str_base)
# Skip ignored filenames.
skip = False
for pat in ignore_bytes:
if fnmatch.fnmatch(base, pat):
if logger:
logger.debug(
"ignoring '{}' due to ignore rule '{}'", base, pat
)
skip = True
break
if skip:
continue
# Add to output as either a file or a directory.
cur = os.path.join(bytes_path, base)
if (ignore_hidden and not hidden.is_hidden(cur)) or not ignore_hidden:
if os.path.isdir(syspath(cur)):
dirs.append(base)
else:
files.append(base)
# Sort lists (case-insensitive) and yield the current level.
dirs.sort(key=bytes.lower)
files.sort(key=bytes.lower)
yield (bytes_path, dirs, files)
# Recurse into directories.
for base in dirs:
cur = os.path.join(bytes_path, base)
yield from sorted_walk(cur, ignore_bytes, ignore_hidden, logger)
def path_as_posix(path: bytes) -> bytes:
"""Return the string representation of the path with forward (/)
slashes.
"""
return path.replace(b"\\", b"/")
def mkdirall(path: bytes):
"""Make all the enclosing directories of path (like mkdir -p on the
parent).
"""
for ancestor in ancestry(path):
if not os.path.isdir(syspath(ancestor)):
try:
os.mkdir(syspath(ancestor))
except OSError as exc:
raise FilesystemError(
exc, "create", (ancestor,), traceback.format_exc()
)
def fnmatch_all(names: Sequence[bytes], patterns: Sequence[bytes]) -> bool:
"""Determine whether all strings in `names` match at least one of
the `patterns`, which should be shell glob expressions.
"""
for name in names:
matches = False
for pattern in patterns:
matches = fnmatch.fnmatch(name, pattern)
if matches:
break
if not matches:
return False
return True
def prune_dirs(
path: PathLike,
root: PathLike | None = None,
clutter: Sequence[str] = (".DS_Store", "Thumbs.db"),
):
"""If path is an empty directory, then remove it. Recursively remove
path's ancestry up to root (which is never removed) where there are
empty directories. If path is not contained in root, then nothing is
removed. Glob patterns in clutter are ignored when determining
emptiness. If root is not provided, then only path may be removed
(i.e., no recursive removal).
"""
path = normpath(path)
root = normpath(root) if root else None
ancestors = ancestry(path)
if root is None:
# Only remove the top directory.
ancestors = []
elif root in ancestors:
# Only remove directories below the root_bytes.
ancestors = ancestors[ancestors.index(root) + 1 :]
else:
# Remove nothing.
return
bytes_clutter = [bytestring_path(c) for c in clutter]
# Traverse upward from path.
ancestors.append(path)
ancestors.reverse()
for directory in ancestors:
str_directory = syspath(directory)
if not os.path.exists(directory):
# Directory gone already.
continue
match_paths = [bytestring_path(d) for d in os.listdir(str_directory)]
try:
if fnmatch_all(match_paths, bytes_clutter):
# Directory contains only clutter (or nothing).
shutil.rmtree(str_directory)
else:
break
except OSError:
break
def components(path: AnyStr) -> list[AnyStr]:
"""Return a list of the path components in path. For instance:
>>> components(b'/a/b/c')
['a', 'b', 'c']
The argument should *not* be the result of a call to `syspath`.
"""
comps = []
ances = ancestry(path)
for anc in ances:
comp = os.path.basename(anc)
if comp:
comps.append(comp)
else: # root
comps.append(anc)
last = os.path.basename(path)
if last:
comps.append(last)
return comps
def bytestring_path(path: PathLike) -> bytes:
"""Given a path, which is either a bytes or a unicode, returns a str
path (ensuring that we never deal with Unicode pathnames). Path should be
bytes but has safeguards for strings to be converted.
"""
# Pass through bytestrings.
if isinstance(path, bytes):
return path
str_path = str(path)
# On Windows, remove the magic prefix added by `syspath`. This makes
# ``bytestring_path(syspath(X)) == X``, i.e., we can safely
# round-trip through `syspath`.
if os.path.__name__ == "ntpath" and str_path.startswith(
WINDOWS_MAGIC_PREFIX
):
str_path = str_path[len(WINDOWS_MAGIC_PREFIX) :]
return os.fsencode(str_path)
PATH_SEP: bytes = bytestring_path(os.sep)
def displayable_path(
path: PathLike | Iterable[PathLike], separator: str = "; "
) -> str:
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
"""
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, str):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return str(path)
return os.fsdecode(path)
def syspath(path: PathLike, prefix: bool = True) -> str:
"""Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted
to Unicode before they are sent to the OS. To disable the magic
prefix on Windows, set `prefix` to False---but only do this if you
*really* know what you're doing.
"""
str_path = os.fsdecode(path)
# Don't do anything if we're not on windows
if os.path.__name__ != "ntpath":
return str_path
# Add the magic prefix if it isn't already there.
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
if prefix and not str_path.startswith(WINDOWS_MAGIC_PREFIX):
if str_path.startswith("\\\\"):
# UNC path. Final path should look like \\?\UNC\...
str_path = f"UNC{str_path[1:]}"
str_path = f"{WINDOWS_MAGIC_PREFIX}{str_path}"
return str_path
def samefile(p1: bytes, p2: bytes) -> bool:
"""Safer equality for paths."""
if p1 == p2:
return True
with suppress(OSError):
return os.path.samefile(syspath(p1), syspath(p2))
return False
def remove(path: PathLike, soft: bool = True):
"""Remove the file. If `soft`, then no error will be raised if the
file does not exist.
"""
str_path = syspath(path)
if not str_path or (soft and not os.path.exists(str_path)):
return
try:
os.remove(str_path)
except OSError as exc:
raise FilesystemError(
exc, "delete", (str_path,), traceback.format_exc()
)
def copy(path: bytes, dest: bytes, replace: bool = False):
"""Copy a plain file. Permissions are not copied. If `dest` already
exists, raises a FilesystemError unless `replace` is True. Has no
effect if `path` is the same as `dest`. Paths are translated to
system paths before the syscall.
"""
if samefile(path, dest):
return
str_path = syspath(path)
str_dest = syspath(dest)
if not replace and os.path.exists(str_dest):
raise FilesystemError("file exists", "copy", (str_path, str_dest))
try:
shutil.copyfile(str_path, str_dest)
except OSError as exc:
raise FilesystemError(
exc, "copy", (str_path, str_dest), traceback.format_exc()
)
def move(path: bytes, dest: bytes, replace: bool = False):
"""Rename a file. `dest` may not be a directory. If `dest` already
exists, raises an OSError unless `replace` is True. Has no effect if
`path` is the same as `dest`. Paths are translated to system paths.
"""
if os.path.isdir(syspath(path)):
raise FilesystemError("source is directory", "move", (path, dest))
if os.path.isdir(syspath(dest)):
raise FilesystemError("destination is directory", "move", (path, dest))
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError("file exists", "rename", (path, dest))
# First, try renaming the file.
try:
os.replace(syspath(path), syspath(dest))
except OSError:
# Copy the file to a temporary destination.
basename = os.path.basename(bytestring_path(dest))
dirname = os.path.dirname(bytestring_path(dest))
tmp = tempfile.NamedTemporaryFile(
suffix=".beets",
prefix=f".{os.fsdecode(basename)}.",
dir=syspath(dirname),
delete=False,
)
try:
with open(syspath(path), "rb") as f:
# mypy bug:
# - https://github.com/python/mypy/issues/15031
# - https://github.com/python/mypy/issues/14943
# Fix not yet released:
# - https://github.com/python/mypy/pull/14975
shutil.copyfileobj(f, tmp) # type: ignore[misc]
finally:
tmp.close()
try:
# Copy file metadata
shutil.copystat(syspath(path), tmp.name)
except OSError:
# Ignore errors because it doesn't matter too much. We may be on a
# filesystem that doesn't support this.
pass
# Move the copied file into place.
tmp_filename = tmp.name
try:
os.replace(tmp_filename, syspath(dest))
tmp_filename = ""
os.remove(syspath(path))
except OSError as exc:
raise FilesystemError(
exc, "move", (path, dest), traceback.format_exc()
)
finally:
if tmp_filename:
os.remove(tmp_filename)
def link(path: bytes, dest: bytes, replace: bool = False):
"""Create a symbolic link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError("file exists", "rename", (path, dest))
try:
os.symlink(syspath(path), syspath(dest))
except NotImplementedError:
# raised on python >= 3.2 and Windows versions before Vista
raise FilesystemError(
"OS does not support symbolic links.link",
(path, dest),
traceback.format_exc(),
)
except OSError as exc:
raise FilesystemError(exc, "link", (path, dest), traceback.format_exc())
def hardlink(path: bytes, dest: bytes, replace: bool = False):
"""Create a hard link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError("file exists", "rename", (path, dest))
try:
os.link(syspath(path), syspath(dest))
except NotImplementedError:
raise FilesystemError(
"OS does not support hard links.link",
(path, dest),
traceback.format_exc(),
)
except OSError as exc:
if exc.errno == errno.EXDEV:
raise FilesystemError(
"Cannot hard link across devices.link",
(path, dest),
traceback.format_exc(),
)
else:
raise FilesystemError(
exc, "link", (path, dest), traceback.format_exc()
)
def reflink(
path: bytes,
dest: bytes,
replace: bool = False,
fallback: bool = False,
):
"""Create a reflink from `dest` to `path`.
Raise an `OSError` if `dest` already exists, unless `replace` is
True. If `path` == `dest`, then do nothing.
If `fallback` is enabled, ignore errors and copy the file instead.
Otherwise, errors are re-raised as FilesystemError with an explanation.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError("target exists", "rename", (path, dest))
if fallback:
with suppress(Exception):
return import_module("reflink").reflink(path, dest)
return copy(path, dest, replace)
try:
import_module("reflink").reflink(path, dest)
except (ImportError, OSError):
raise
except Exception as exc:
msg = {
"EXDEV": "Cannot reflink across devices",
"EOPNOTSUPP": "Device does not support reflinks",
}.get(str(exc), "OS does not support reflinks")
raise FilesystemError(
msg, "reflink", (path, dest), traceback.format_exc()
) from exc
def unique_path(path: bytes) -> bytes:
"""Returns a version of ``path`` that does not exist on the
filesystem. Specifically, if ``path` itself already exists, then
something unique is appended to the path.
"""
if not os.path.exists(syspath(path)):
return path
base, ext = os.path.splitext(path)
match = re.search(rb"\.(\d)+$", base)
if match:
num = int(match.group(1))
base = base[: match.start()]
else:
num = 0
while True:
num += 1
suffix = f".{num}".encode() + ext
new_path = base + suffix
if not os.path.exists(new_path):
return new_path
# Note: The Windows "reserved characters" are, of course, allowed on
# Unix. They are forbidden here because they cause problems on Samba
# shares, which are sufficiently common as to cause frequent problems.
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
CHAR_REPLACE = [
(re.compile(r"[\\/]"), "_"), # / and \ -- forbidden everywhere.
(re.compile(r"^\."), "_"), # Leading dot (hidden files on Unix).
(re.compile(r"[\x00-\x1f]"), ""), # Control characters.
(re.compile(r'[<>:"\?\*\|]'), "_"), # Windows "reserved characters".
(re.compile(r"\.$"), "_"), # Trailing dots.
(re.compile(r"\s+$"), ""), # Trailing whitespace.
]
def sanitize_path(path: str, replacements: Replacements | None = None) -> str:
"""Takes a path (as a Unicode string) and makes sure that it is
legal. Returns a new path. Only works with fragments; won't work
reliably on Windows when a path begins with a drive letter. Path
separators (including altsep!) should already be cleaned from the
path components. If replacements is specified, it is used *instead*
of the default set of replacements; it must be a list of (compiled
regex, replacement string) pairs.
"""
replacements = replacements or CHAR_REPLACE
comps = components(path)
if not comps:
return ""
for i, comp in enumerate(comps):
for regex, repl in replacements:
comp = regex.sub(repl, comp)
comps[i] = comp
return os.path.join(*comps)
def truncate_str(s: str, length: int) -> str:
"""Truncate the string to the given byte length.
If we end up truncating a unicode character in the middle (rendering it invalid),
it is removed:
>>> s = "🎹🎶" # 8 bytes
>>> truncate_str(s, 6)
'🎹'
"""
return os.fsencode(s)[:length].decode(sys.getfilesystemencoding(), "ignore")
def truncate_path(str_path: str) -> str:
"""Truncate each path part to a legal length preserving the extension."""
max_length = get_max_filename_length()
path = Path(str_path)
parent_parts = [truncate_str(p, max_length) for p in path.parts[:-1]]
stem = truncate_str(path.stem, max_length - len(path.suffix))
return f"{Path(*parent_parts, stem)}{path.suffix}"
def _legalize_stage(
path: str, replacements: Replacements | None, extension: str
) -> tuple[str, bool]:
"""Perform a single round of path legalization steps
1. sanitation/replacement
2. appending the extension
3. truncation.
Return the path and whether truncation was required.
"""
# Perform an initial sanitization including user replacements.
path = sanitize_path(path, replacements)
# Preserve extension.
path += extension.lower()
# Truncate too-long components.
pre_truncate_path = path
path = truncate_path(path)
return path, path != pre_truncate_path
def legalize_path(
path: str, replacements: Replacements | None, extension: str
) -> tuple[str, bool]:
"""Given a path-like Unicode string, produce a legal path. Return the path
and a flag indicating whether some replacements had to be ignored (see
below).
This function uses `_legalize_stage` function to legalize the path, see its
documentation for the details of what this involves. It is called up to
three times in case truncation conflicts with replacements (as can happen
when truncation creates whitespace at the end of the string, for example).
The limited number of iterations avoids the possibility of an infinite loop
of sanitation and truncation operations, which could be caused by
replacement rules that make the string longer.
The flag returned from this function indicates that the path has to be
truncated twice (indicating that replacements made the string longer again
after it was truncated); the application should probably log some sort of
warning.
"""
suffix = as_string(extension)
first_stage, _ = os.path.splitext(
_legalize_stage(path, replacements, suffix)[0]
)
# Re-sanitize following truncation (including user replacements).
second_stage, truncated = _legalize_stage(first_stage, replacements, suffix)
if not truncated:
return second_stage, False
# If the path was truncated, discard user replacements
# and run through one last legalization stage.
return _legalize_stage(first_stage, None, suffix)[0], True
def str2bool(value: str) -> bool:
"""Returns a boolean reflecting a human-entered string."""
return value.lower() in ("yes", "1", "true", "t", "y")
def as_string(value: Any) -> str:
"""Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded.
"""
if value is None:
return ""
elif isinstance(value, memoryview):
return bytes(value).decode("utf-8", "ignore")
elif isinstance(value, bytes):
return value.decode("utf-8", "ignore")
else:
return str(value)
def plurality(objs: Iterable[T]) -> tuple[T, int]:
"""Given a sequence of hashble objects, returns the object that
is most common in the set and the its number of appearance. The
sequence must contain at least one object.
"""
c = Counter(objs)
if not c:
raise ValueError("sequence must be non-empty")
return c.most_common(1)[0]
def get_most_common_tags(
items: Sequence[Item],
) -> tuple[dict[str, Any], dict[str, Any]]:
"""Extract the likely current metadata for an album given a list of its
items. Return two dictionaries:
- The most common value for each field.
- Whether each field's value was unanimous (values are booleans).
"""
assert items # Must be nonempty.
likelies = {}
consensus = {}
fields = [
"artist",
"album",
"albumartist",
"year",
"disctotal",
"mb_albumid",
"label",
"barcode",
"catalognum",
"country",
"media",
"albumdisambig",
"data_source",
]
for field in fields:
values = [item.get(field) for item in items if item]
likelies[field], freq = plurality(values)
consensus[field] = freq == len(values)
# If there's an album artist consensus, use this for the artist.
if consensus["albumartist"] and likelies["albumartist"]:
likelies["artist"] = likelies["albumartist"]
return likelies, consensus
# stdout and stderr as bytes
class CommandOutput(NamedTuple):
stdout: bytes
stderr: bytes
def command_output(
cmd: list[str] | list[bytes], shell: bool = False
) -> CommandOutput:
"""Runs the command and returns its output after it has exited.
Returns a CommandOutput. The attributes ``stdout`` and ``stderr`` contain
byte strings of the respective output streams.
``cmd`` is a list of arguments starting with the command names. The
arguments are bytes on Unix and strings on Windows.
If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a
shell to execute.
If the process exits with a non-zero return code
``subprocess.CalledProcessError`` is raised. May also raise
``OSError``.
This replaces `subprocess.check_output` which can have problems if lots of
output is sent to stderr.
"""
devnull = subprocess.DEVNULL
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=devnull,
close_fds=platform.system() != "Windows",
shell=shell,
)
stdout, stderr = proc.communicate()
if proc.returncode:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=" ".join(map(os.fsdecode, cmd)),
output=stdout + stderr,
)
return CommandOutput(stdout, stderr)
@cache
def get_max_filename_length() -> int:
"""Attempt to determine the maximum filename length for the
filesystem containing `path`. If the value is greater than `limit`,
then `limit` is used instead (to prevent errors when a filesystem
misreports its capacity). If it cannot be determined (e.g., on
Windows), return `limit`.
"""
if length := beets.config["max_filename_length"].get(int):
return length
limit = MAX_FILENAME_LENGTH
if hasattr(os, "statvfs"):
try:
res = os.statvfs(beets.config["directory"].as_str())
except OSError:
return limit
return min(res[9], limit)
else:
return limit
def open_anything() -> str:
"""Return the system command that dispatches execution to the correct
program.
"""
sys_name = platform.system()
if sys_name == "Darwin":
base_cmd = "open"
elif sys_name == "Windows":
base_cmd = "start"
else: # Assume Unix
base_cmd = "xdg-open"
return base_cmd
def editor_command() -> str:
"""Get a command for opening a text file.
First try environment variable `VISUAL` followed by `EDITOR`. As last resort
fall back to `open_anything()`, the platform-specific tool for opening files
in general.
"""
return (
os.environ.get("VISUAL") or os.environ.get("EDITOR") or open_anything()
)
def interactive_open(targets: Sequence[str], command: str):
"""Open the files in `targets` by `exec`ing a new `command`, given
as a Unicode string. (The new program takes over, and Python
execution ends: this does not fork a subprocess.)
Can raise `OSError`.
"""
assert command
# Split the command string into its arguments.
try:
args = shlex.split(command)
except ValueError: # Malformed shell tokens.
args = [command]
args.insert(0, args[0]) # for argv[0]
args += targets
return os.execlp(*args)
def case_sensitive(path: bytes) -> bool:
"""Check whether the filesystem at the given path is case sensitive.
To work best, the path should point to a file or a directory. If the path
does not exist, assume a case sensitive file system on every platform
except Windows.
Currently only used for absolute paths by beets; may have a trailing
path separator.
"""
# Look at parent paths until we find a path that actually exists, or
# reach the root.
while True:
head, tail = os.path.split(path)
if head == path:
# We have reached the root of the file system.
# By default, the case sensitivity depends on the platform.
return platform.system() != "Windows"
# Trailing path separator, or path does not exist.
if not tail or not os.path.exists(path):
path = head
continue
upper_tail = tail.upper()
lower_tail = tail.lower()
# In case we can't tell from the given path name, look at the
# parent directory.
if upper_tail == lower_tail:
path = head
continue
upper_sys = syspath(os.path.join(head, upper_tail))
lower_sys = syspath(os.path.join(head, lower_tail))
# If either the upper-cased or lower-cased path does not exist, the
# filesystem must be case-sensitive.
# (Otherwise, we have more work to do.)
if not os.path.exists(upper_sys) or not os.path.exists(lower_sys):
return True
# Original and both upper- and lower-cased versions of the path
# exist on the file system. Check whether they refer to different
# files by their inodes (or an alternative method on Windows).
return not os.path.samefile(lower_sys, upper_sys)
def asciify_path(path: str, sep_replace: str) -> str:
"""Decodes all unicode characters in a path into ASCII equivalents.
Substitutions are provided by the unidecode module. Path separators in the
input are preserved.
Keyword arguments:
path -- The path to be asciified.
sep_replace -- the string to be used to replace extraneous path separators.
"""
# if this platform has an os.altsep, change it to os.sep.
if os.altsep:
path = path.replace(os.altsep, os.sep)
path_components: list[str] = path.split(os.sep)
for index, item in enumerate(path_components):
path_components[index] = unidecode(item).replace(os.sep, sep_replace)
if os.altsep:
path_components[index] = unidecode(item).replace(
os.altsep, sep_replace
)
return os.sep.join(path_components)
def par_map(transform: Callable[[T], Any], items: Sequence[T]) -> None:
"""Apply the function `transform` to all the elements in the
iterable `items`, like `map(transform, items)` but with no return
value.
The parallelism uses threads (not processes), so this is only useful
for IO-bound `transform`s.
"""
pool = ThreadPool()
pool.map(transform, items)
pool.close()
pool.join()
class cached_classproperty:
"""Descriptor implementing cached class properties.
Provides class-level dynamic property behavior where the getter function is
called once per class and the result is cached for subsequent access. Unlike
instance properties, this operates on the class rather than instances.
"""
cache: ClassVar[dict[tuple[Any, str], Any]] = {}
name: str
# Ideally, we would like to use `Callable[[type[T]], Any]` here,
# however, `mypy` is unable to see this as a **class** property, and thinks
# that this callable receives an **instance** of the object, failing the
# type check, for example:
# >>> class Album:
# >>> @cached_classproperty
# >>> def foo(cls):
# >>> reveal_type(cls) # mypy: revealed type is "Album"
# >>> return cls.bar
#
# Argument 1 to "cached_classproperty" has incompatible type
# "Callable[[Album], ...]"; expected "Callable[[type[Album]], ...]"
#
# Therefore, we just use `Any` here, which is not ideal, but works.
def __init__(self, getter: Callable[[Any], Any]) -> None:
"""Initialize the descriptor with the property getter function."""
self.getter = getter
def __set_name__(self, owner: Any, name: str) -> None:
"""Capture the attribute name this descriptor is assigned to."""
self.name = name
def __get__(self, instance: Any, owner: type[Any]) -> Any:
"""Compute and cache if needed, and return the property value."""
key = owner, self.name
if key not in self.cache:
self.cache[key] = self.getter(owner)
return self.cache[key]
class LazySharedInstance(Generic[T]):
"""A descriptor that provides access to a lazily-created shared instance of
the containing class, while calling the class constructor to construct a
new object works as usual.
```
ID: int = 0
class Foo:
def __init__():
global ID
self.id = ID
ID += 1
def func(self):
print(self.id)
shared: LazySharedInstance[Foo] = LazySharedInstance()
a0 = Foo()
a1 = Foo.shared
a2 = Foo()
a3 = Foo.shared
a0.func() # 0
a1.func() # 1
a2.func() # 2
a3.func() # 1
```
"""
_instance: T | None = None
def __get__(self, instance: T | None, owner: type[T]) -> T:
if instance is not None:
raise RuntimeError(
"shared instances must be obtained from the class property, "
"not an instance"
)
if self._instance is None:
self._instance = owner()
return self._instance
def get_module_tempdir(module: str) -> Path:
"""Return the temporary directory for the given module.
The directory is created within the `/tmp/beets/` directory on
Linux (or the equivalent temporary directory on other systems).
Dots in the module name are replaced by underscores.
"""
module = module.replace("beets.", "").replace(".", "_")
return Path(tempfile.gettempdir()) / "beets" / module
def clean_module_tempdir(module: str) -> None:
"""Clean the temporary directory for the given module."""
tempdir = get_module_tempdir(module)
shutil.rmtree(tempdir, ignore_errors=True)
with suppress(OSError):
# remove parent (/tmp/beets) directory if it is empty
tempdir.parent.rmdir()
def get_temp_filename(
module: str,
prefix: str = "",
path: PathLike | None = None,
suffix: str = "",
) -> bytes:
"""Return temporary filename for the given module and prefix.
The filename starts with the given `prefix`.
If 'suffix' is given, it is used a the file extension.
If 'path' is given, we use the same suffix.
"""
if not suffix and path:
suffix = Path(os.fsdecode(path)).suffix
tempdir = get_module_tempdir(module)
tempdir.mkdir(parents=True, exist_ok=True)
descriptor, filename = tempfile.mkstemp(
dir=tempdir, prefix=prefix, suffix=suffix
)
os.close(descriptor)
return bytestring_path(filename)
def unique_list(elements: Iterable[T]) -> list[T]:
"""Return a list with unique elements in the original order."""
return list(dict.fromkeys(elements))
def deprecate_imports(
old_module: str, new_module_by_name: dict[str, str], name: str, version: str
) -> Any:
"""Handle deprecated module imports by redirecting to new locations.
Facilitates gradual migration of module structure by intercepting import
attempts for relocated functionality. Issues deprecation warnings while
transparently providing access to the moved implementation, allowing
existing code to continue working during transition periods.
"""
if new_module := new_module_by_name.get(name):
warnings.warn(
(
f"'{old_module}.{name}' is deprecated and will be removed"
f" in {version}. Use '{new_module}.{name}' instead."
),
DeprecationWarning,
stacklevel=2,
)
return getattr(import_module(new_module), name)
raise AttributeError(f"module '{old_module}' has no attribute '{name}'")
beetbox-beets-c1877b7/beets/util/artresizer.py 0000664 0000000 0000000 00000067077 15073551743 0021464 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Fabrice Laporte
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Abstraction layer to resize images using PIL, ImageMagick, or a
public resizing proxy if neither is available.
"""
from __future__ import annotations
import os
import os.path
import platform
import re
import subprocess
from abc import ABC, abstractmethod
from enum import Enum
from itertools import chain
from typing import Any, ClassVar, Mapping
from urllib.parse import urlencode
from beets import logging, util
from beets.util import (
LazySharedInstance,
displayable_path,
get_temp_filename,
syspath,
)
PROXY_URL = "https://images.weserv.nl/"
log = logging.getLogger("beets")
def resize_url(url: str, maxwidth: int, quality: int = 0) -> str:
"""Return a proxied image URL that resizes the original image to
maxwidth (preserving aspect ratio).
"""
params = {
"url": url.replace("http://", ""),
"w": maxwidth,
}
if quality > 0:
params["q"] = quality
return f"{PROXY_URL}?{urlencode(params)}"
class LocalBackendNotAvailableError(Exception):
pass
# Singleton pattern that the typechecker understands:
# https://peps.python.org/pep-0484/#support-for-singleton-types-in-unions
class NotAvailable(Enum):
token = 0
_NOT_AVAILABLE = NotAvailable.token
class LocalBackend(ABC):
NAME: ClassVar[str]
@classmethod
@abstractmethod
def version(cls) -> Any:
"""Return the backend version if its dependencies are satisfied or
raise `LocalBackendNotAvailableError`.
"""
pass
@classmethod
def available(cls) -> bool:
"""Return `True` this backend's dependencies are satisfied and it can
be used, `False` otherwise."""
try:
cls.version()
return True
except LocalBackendNotAvailableError:
return False
@abstractmethod
def resize(
self,
maxwidth: int,
path_in: bytes,
path_out: bytes | None = None,
quality: int = 0,
max_filesize: int = 0,
) -> bytes:
"""Resize an image to the given width and return the output path.
On error, logs a warning and returns `path_in`.
"""
pass
@abstractmethod
def get_size(self, path_in: bytes) -> tuple[int, int] | None:
"""Return the (width, height) of the image or None if unavailable."""
pass
@abstractmethod
def deinterlace(
self,
path_in: bytes,
path_out: bytes | None = None,
) -> bytes:
"""Remove interlacing from an image and return the output path.
On error, logs a warning and returns `path_in`.
"""
pass
@abstractmethod
def get_format(self, path_in: bytes) -> str | None:
"""Return the image format (e.g., 'PNG') or None if undetectable."""
pass
@abstractmethod
def convert_format(
self,
source: bytes,
target: bytes,
deinterlaced: bool,
) -> bytes:
"""Convert an image to a new format and return the new file path.
On error, logs a warning and returns `source`.
"""
pass
@property
def can_compare(self) -> bool:
"""Indicate whether image comparison is supported by this backend."""
return False
def compare(
self,
im1: bytes,
im2: bytes,
compare_threshold: float,
) -> bool | None:
"""Compare two images and return `True` if they are similar enough, or
`None` if there is an error.
This must only be called if `self.can_compare()` returns `True`.
"""
# It is an error to call this when ArtResizer.can_compare is not True.
raise NotImplementedError()
@property
def can_write_metadata(self) -> bool:
"""Indicate whether writing metadata to images is supported."""
return False
def write_metadata(self, file: bytes, metadata: Mapping[str, str]) -> None:
"""Write key-value metadata into the image file.
This must only be called if `self.can_write_metadata()` returns `True`.
"""
# It is an error to call this when ArtResizer.can_write_metadata is not True.
raise NotImplementedError()
class IMBackend(LocalBackend):
NAME = "ImageMagick"
# These fields are used as a cache for `version()`. `_legacy` indicates
# whether the modern `magick` binary is available or whether to fall back
# to the old-style `convert`, `identify`, etc. commands.
_version: tuple[int, int, int] | NotAvailable | None = None
_legacy: bool | None = None
@classmethod
def version(cls) -> tuple[int, int, int]:
"""Obtain and cache ImageMagick version.
Raises `LocalBackendNotAvailableError` if not available.
"""
if cls._version is None:
for cmd_name, legacy in (("magick", False), ("convert", True)):
try:
out = util.command_output([cmd_name, "--version"]).stdout
except (subprocess.CalledProcessError, OSError) as exc:
log.debug("ImageMagick version check failed: {}", exc)
cls._version = _NOT_AVAILABLE
else:
if b"imagemagick" in out.lower():
pattern = rb".+ (\d+)\.(\d+)\.(\d+).*"
match = re.search(pattern, out)
if match:
cls._version = (
int(match.group(1)),
int(match.group(2)),
int(match.group(3)),
)
cls._legacy = legacy
# cls._version is never None here, but mypy doesn't get that
if cls._version is _NOT_AVAILABLE or cls._version is None:
raise LocalBackendNotAvailableError()
else:
return cls._version
convert_cmd: list[str]
identify_cmd: list[str]
compare_cmd: list[str]
def __init__(self) -> None:
"""Initialize a wrapper around ImageMagick for local image operations.
Stores the ImageMagick version and legacy flag. If ImageMagick is not
available, raise an Exception.
"""
self.version()
# Use ImageMagick's magick binary when it's available.
# If it's not, fall back to the older, separate convert
# and identify commands.
if self._legacy:
self.convert_cmd = ["convert"]
self.identify_cmd = ["identify"]
self.compare_cmd = ["compare"]
else:
self.convert_cmd = ["magick"]
self.identify_cmd = ["magick", "identify"]
self.compare_cmd = ["magick", "compare"]
def resize(
self,
maxwidth: int,
path_in: bytes,
path_out: bytes | None = None,
quality: int = 0,
max_filesize: int = 0,
) -> bytes:
"""Resize using ImageMagick.
Use the ``magick`` program or ``convert`` on older versions. Return
the output path of resized image.
"""
if not path_out:
path_out = get_temp_filename(__name__, "resize_IM_", path_in)
log.debug(
"artresizer: ImageMagick resizing {} to {}",
displayable_path(path_in),
displayable_path(path_out),
)
# "-resize WIDTHx>" shrinks images with the width larger
# than the given width while maintaining the aspect ratio
# with regards to the height.
# ImageMagick already seems to default to no interlace, but we include
# it here for the sake of explicitness.
cmd: list[str] = self.convert_cmd + [
syspath(path_in, prefix=False),
"-resize",
f"{maxwidth}x>",
"-interlace",
"none",
]
if quality > 0:
cmd += ["-quality", f"{quality}"]
# "-define jpeg:extent=SIZEb" sets the target filesize for imagemagick
# to SIZE in bytes.
if max_filesize > 0:
cmd += ["-define", f"jpeg:extent={max_filesize}b"]
cmd.append(syspath(path_out, prefix=False))
try:
util.command_output(cmd)
except subprocess.CalledProcessError:
log.warning(
"artresizer: IM convert failed for {}",
displayable_path(path_in),
)
return path_in
return path_out
def get_size(self, path_in: bytes) -> tuple[int, int] | None:
cmd: list[str] = self.identify_cmd + [
"-format",
"%w %h",
syspath(path_in, prefix=False),
]
try:
out = util.command_output(cmd).stdout
except subprocess.CalledProcessError as exc:
log.warning("ImageMagick size query failed")
log.debug(
"`convert` exited with (status {.returncode}) when "
"getting size with command {}:\n{}",
exc,
cmd,
exc.output.strip(),
)
return None
try:
size = tuple(map(int, out.split(b" ")))
except IndexError:
log.warning("Could not understand IM output: {0!r}", out)
return None
if len(size) != 2:
log.warning("Could not understand IM output: {0!r}", out)
return None
return size
def deinterlace(
self,
path_in: bytes,
path_out: bytes | None = None,
) -> bytes:
if not path_out:
path_out = get_temp_filename(__name__, "deinterlace_IM_", path_in)
cmd = self.convert_cmd + [
syspath(path_in, prefix=False),
"-interlace",
"none",
syspath(path_out, prefix=False),
]
try:
util.command_output(cmd)
return path_out
except subprocess.CalledProcessError:
# FIXME: Should probably issue a warning?
return path_in
def get_format(self, path_in: bytes) -> str | None:
cmd = self.identify_cmd + ["-format", "%[magick]", syspath(path_in)]
try:
# Image formats should really only be ASCII strings such as "PNG",
# if anything else is returned, something is off and we return
# None for safety.
return util.command_output(cmd).stdout.decode("ascii", "strict")
except (subprocess.CalledProcessError, UnicodeError):
# FIXME: Should probably issue a warning?
return None
def convert_format(
self,
source: bytes,
target: bytes,
deinterlaced: bool,
) -> bytes:
cmd = self.convert_cmd + [
syspath(source),
*(["-interlace", "none"] if deinterlaced else []),
syspath(target),
]
try:
subprocess.check_call(
cmd, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL
)
return target
except subprocess.CalledProcessError:
# FIXME: Should probably issue a warning?
return source
@property
def can_compare(self) -> bool:
return self.version() > (6, 8, 7)
def compare(
self,
im1: bytes,
im2: bytes,
compare_threshold: float,
) -> bool | None:
is_windows = platform.system() == "Windows"
# Converting images to grayscale tends to minimize the weight
# of colors in the diff score. So we first convert both images
# to grayscale and then pipe them into the `compare` command.
# On Windows, ImageMagick doesn't support the magic \\?\ prefix
# on paths, so we pass `prefix=False` to `syspath`.
convert_cmd = self.convert_cmd + [
syspath(im2, prefix=False),
syspath(im1, prefix=False),
"-colorspace",
"gray",
"MIFF:-",
]
compare_cmd = self.compare_cmd + [
"-define",
"phash:colorspaces=sRGB,HCLp",
"-metric",
"PHASH",
"-",
"null:",
]
log.debug(
"comparing images with pipeline {} | {}", convert_cmd, compare_cmd
)
convert_proc = subprocess.Popen(
convert_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=not is_windows,
)
compare_proc = subprocess.Popen(
compare_cmd,
stdin=convert_proc.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=not is_windows,
)
# help out mypy
assert convert_proc.stdout is not None
assert convert_proc.stderr is not None
# Check the convert output. We're not interested in the
# standard output; that gets piped to the next stage.
convert_proc.stdout.close()
convert_stderr = convert_proc.stderr.read()
convert_proc.stderr.close()
convert_proc.wait()
if convert_proc.returncode:
log.debug(
"ImageMagick convert failed with status {.returncode}: {!r}",
convert_proc,
convert_stderr,
)
return None
# Check the compare output.
stdout, stderr = compare_proc.communicate()
if compare_proc.returncode:
if compare_proc.returncode != 1:
log.debug(
"ImageMagick compare failed: {}, {}",
displayable_path(im2),
displayable_path(im1),
)
return None
out_str = stderr
else:
out_str = stdout
# ImageMagick 7.1.1-44 outputs in a different format.
if b"(" in out_str and out_str.endswith(b")"):
# Extract diff from "... (diff)".
out_str = out_str[out_str.index(b"(") + 1 : -1]
try:
phash_diff = float(out_str)
except ValueError:
log.debug("IM output is not a number: {0!r}", out_str)
return None
log.debug("ImageMagick compare score: {}", phash_diff)
return phash_diff <= compare_threshold
@property
def can_write_metadata(self) -> bool:
return True
def write_metadata(self, file: bytes, metadata: Mapping[str, str]) -> None:
assignments = chain.from_iterable(
("-set", k, v) for k, v in metadata.items()
)
str_file = os.fsdecode(file)
command = self.convert_cmd + [str_file, *assignments, str_file]
util.command_output(command)
class PILBackend(LocalBackend):
NAME = "PIL"
@classmethod
def version(cls) -> None:
try:
__import__("PIL", fromlist=["Image"])
except ImportError:
raise LocalBackendNotAvailableError()
def __init__(self) -> None:
"""Initialize a wrapper around PIL for local image operations.
If PIL is not available, raise an Exception.
"""
self.version()
def resize(
self,
maxwidth: int,
path_in: bytes,
path_out: bytes | None = None,
quality: int = 0,
max_filesize: int = 0,
) -> bytes:
"""Resize using Python Imaging Library (PIL). Return the output path
of resized image.
"""
if not path_out:
path_out = get_temp_filename(__name__, "resize_PIL_", path_in)
from PIL import Image
log.debug(
"artresizer: PIL resizing {} to {}",
displayable_path(path_in),
displayable_path(path_out),
)
try:
im = Image.open(syspath(path_in))
size = maxwidth, maxwidth
im.thumbnail(size, Image.Resampling.LANCZOS)
if quality == 0:
# Use PIL's default quality.
quality = -1
# progressive=False only affects JPEGs and is the default,
# but we include it here for explicitness.
im.save(os.fsdecode(path_out), quality=quality, progressive=False)
if max_filesize > 0:
# If maximum filesize is set, we attempt to lower the quality
# of jpeg conversion by a proportional amount, up to 3 attempts
# First, set the maximum quality to either provided, or 95
if quality > 0:
lower_qual = quality
else:
lower_qual = 95
for i in range(5):
# 5 attempts is an arbitrary choice
filesize = os.stat(syspath(path_out)).st_size
log.debug("PIL Pass {} : Output size: {}B", i, filesize)
if filesize <= max_filesize:
return path_out
# The relationship between filesize & quality will be
# image dependent.
lower_qual -= 10
# Restrict quality dropping below 10
if lower_qual < 10:
lower_qual = 10
# Use optimize flag to improve filesize decrease
im.save(
os.fsdecode(path_out),
quality=lower_qual,
optimize=True,
progressive=False,
)
log.warning(
"PIL Failed to resize file to below {}B", max_filesize
)
return path_out
else:
return path_out
except OSError:
log.error(
"PIL cannot create thumbnail for '{}'",
displayable_path(path_in),
)
return path_in
def get_size(self, path_in: bytes) -> tuple[int, int] | None:
from PIL import Image
try:
im = Image.open(syspath(path_in))
return im.size
except OSError as exc:
log.error(
"PIL could not read file {}: {}", displayable_path(path_in), exc
)
return None
def deinterlace(
self,
path_in: bytes,
path_out: bytes | None = None,
) -> bytes:
if not path_out:
path_out = get_temp_filename(__name__, "deinterlace_PIL_", path_in)
from PIL import Image
try:
im = Image.open(syspath(path_in))
im.save(os.fsdecode(path_out), progressive=False)
return path_out
except OSError:
# FIXME: Should probably issue a warning?
return path_in
def get_format(self, path_in: bytes) -> str | None:
from PIL import Image, UnidentifiedImageError
try:
with Image.open(syspath(path_in)) as im:
return im.format
except (
ValueError,
TypeError,
UnidentifiedImageError,
FileNotFoundError,
):
log.exception("failed to detect image format for {}", path_in)
return None
def convert_format(
self,
source: bytes,
target: bytes,
deinterlaced: bool,
) -> bytes:
from PIL import Image, UnidentifiedImageError
try:
with Image.open(syspath(source)) as im:
im.save(os.fsdecode(target), progressive=not deinterlaced)
return target
except (
ValueError,
TypeError,
UnidentifiedImageError,
FileNotFoundError,
OSError,
):
log.exception("failed to convert image {} -> {}", source, target)
return source
@property
def can_compare(self) -> bool:
return False
def compare(
self,
im1: bytes,
im2: bytes,
compare_threshold: float,
) -> bool | None:
# It is an error to call this when ArtResizer.can_compare is not True.
raise NotImplementedError()
@property
def can_write_metadata(self) -> bool:
return True
def write_metadata(self, file: bytes, metadata: Mapping[str, str]) -> None:
from PIL import Image, PngImagePlugin
# FIXME: Detect and handle other file types (currently, the only user
# is the thumbnails plugin, which generates PNG images).
im = Image.open(syspath(file))
meta = PngImagePlugin.PngInfo()
for k, v in metadata.items():
meta.add_text(k, v, zip=False)
im.save(os.fsdecode(file), "PNG", pnginfo=meta)
BACKEND_CLASSES: list[type[LocalBackend]] = [
IMBackend,
PILBackend,
]
class ArtResizer:
"""A class that dispatches image operations to an available backend."""
local_method: LocalBackend | None
def __init__(self) -> None:
"""Create a resizer object with an inferred method."""
# Check if a local backend is available, and store an instance of the
# backend class. Otherwise, fallback to the web proxy.
for backend_cls in BACKEND_CLASSES:
try:
self.local_method = backend_cls()
log.debug("artresizer: method is {.local_method.NAME}", self)
break
except LocalBackendNotAvailableError:
continue
else:
# FIXME: Turn WEBPROXY into a backend class as well to remove all
# the special casing. Then simply delegate all methods to the
# backends. (How does proxy_url fit in here, however?)
# Use an ABC (or maybe a typing Protocol?) for backend
# methods, such that both individual backends as well as
# ArtResizer implement it.
# It should probably be configurable which backends classes to
# consider, similar to fetchart or lyrics backends (i.e. a list
# of backends sorted by priority).
log.debug("artresizer: method is WEBPROXY")
self.local_method = None
shared: LazySharedInstance[ArtResizer] = LazySharedInstance()
@property
def method(self) -> str:
if self.local_method is not None:
return self.local_method.NAME
else:
return "WEBPROXY"
def resize(
self,
maxwidth: int,
path_in: bytes,
path_out: bytes | None = None,
quality: int = 0,
max_filesize: int = 0,
) -> bytes:
"""Manipulate an image file according to the method, returning a
new path. For PIL or IMAGEMAGIC methods, resizes the image to a
temporary file and encodes with the specified quality level.
For WEBPROXY, returns `path_in` unmodified.
"""
if self.local_method is not None:
return self.local_method.resize(
maxwidth,
path_in,
path_out,
quality=quality,
max_filesize=max_filesize,
)
else:
# Handled by `proxy_url` already.
return path_in
def deinterlace(
self,
path_in: bytes,
path_out: bytes | None = None,
) -> bytes:
"""Deinterlace an image.
Only available locally.
"""
if self.local_method is not None:
return self.local_method.deinterlace(path_in, path_out)
else:
# FIXME: Should probably issue a warning?
return path_in
def proxy_url(self, maxwidth: int, url: str, quality: int = 0) -> str:
"""Modifies an image URL according the method, returning a new
URL. For WEBPROXY, a URL on the proxy server is returned.
Otherwise, the URL is returned unmodified.
"""
if self.local:
# Going to be handled by `resize()`.
return url
else:
return resize_url(url, maxwidth, quality)
@property
def local(self) -> bool:
"""A boolean indicating whether the resizing method is performed
locally (i.e., PIL or ImageMagick).
"""
return self.local_method is not None
def get_size(self, path_in: bytes) -> tuple[int, int] | None:
"""Return the size of an image file as an int couple (width, height)
in pixels.
Only available locally.
"""
if self.local_method is not None:
return self.local_method.get_size(path_in)
else:
raise RuntimeError(
"image cannot be obtained without artresizer backend"
)
def get_format(self, path_in: bytes) -> str | None:
"""Returns the format of the image as a string.
Only available locally.
"""
if self.local_method is not None:
return self.local_method.get_format(path_in)
else:
# FIXME: Should probably issue a warning?
return None
def reformat(
self,
path_in: bytes,
new_format: str,
deinterlaced: bool = True,
) -> bytes:
"""Converts image to desired format, updating its extension, but
keeping the same filename.
Only available locally.
"""
if self.local_method is None:
# FIXME: Should probably issue a warning?
return path_in
new_format = new_format.lower()
# A nonexhaustive map of image "types" to extensions overrides
new_format = {
"jpeg": "jpg",
}.get(new_format, new_format)
fname, ext = os.path.splitext(path_in)
path_new = fname + b"." + new_format.encode("utf8")
# allows the exception to propagate, while still making sure a changed
# file path was removed
result_path = path_in
try:
result_path = self.local_method.convert_format(
path_in, path_new, deinterlaced
)
finally:
if result_path != path_in:
os.unlink(path_in)
return result_path
@property
def can_compare(self) -> bool:
"""A boolean indicating whether image comparison is available"""
if self.local_method is not None:
return self.local_method.can_compare
else:
return False
def compare(
self,
im1: bytes,
im2: bytes,
compare_threshold: float,
) -> bool | None:
"""Return a boolean indicating whether two images are similar.
Only available locally.
"""
if self.local_method is not None:
return self.local_method.compare(im1, im2, compare_threshold)
else:
# FIXME: Should probably issue a warning?
return None
@property
def can_write_metadata(self) -> bool:
"""A boolean indicating whether writing image metadata is supported."""
if self.local_method is not None:
return self.local_method.can_write_metadata
else:
return False
def write_metadata(self, file: bytes, metadata: Mapping[str, str]) -> None:
"""Write key-value metadata to the image file.
Only available locally. Currently, expects the image to be a PNG file.
"""
if self.local_method is not None:
self.local_method.write_metadata(file, metadata)
else:
# FIXME: Should probably issue a warning?
pass
beetbox-beets-c1877b7/beets/util/bluelet.py 0000664 0000000 0000000 00000046721 15073551743 0020717 0 ustar 00root root 0000000 0000000 """Extremely simple pure-Python implementation of coroutine-style
asynchronous socket I/O. Inspired by, but inferior to, Eventlet.
Bluelet can also be thought of as a less-terrible replacement for
asyncore.
Bluelet: easy concurrency without all the messy parallelism.
"""
import collections
import errno
import select
import socket
import sys
import time
import traceback
import types
# Basic events used for thread scheduling.
class Event:
"""Just a base class identifying Bluelet events. An event is an
object yielded from a Bluelet thread coroutine to suspend operation
and communicate with the scheduler.
"""
pass
class WaitableEvent(Event):
"""A waitable event is one encapsulating an action that can be
waited for using a select() call. That is, it's an event with an
associated file descriptor.
"""
def waitables(self):
"""Return "waitable" objects to pass to select(). Should return
three iterables for input readiness, output readiness, and
exceptional conditions (i.e., the three lists passed to
select()).
"""
return (), (), ()
def fire(self):
"""Called when an associated file descriptor becomes ready
(i.e., is returned from a select() call).
"""
pass
class ValueEvent(Event):
"""An event that does nothing but return a fixed value."""
def __init__(self, value):
self.value = value
class ExceptionEvent(Event):
"""Raise an exception at the yield point. Used internally."""
def __init__(self, exc_info):
self.exc_info = exc_info
class SpawnEvent(Event):
"""Add a new coroutine thread to the scheduler."""
def __init__(self, coro):
self.spawned = coro
class JoinEvent(Event):
"""Suspend the thread until the specified child thread has
completed.
"""
def __init__(self, child):
self.child = child
class KillEvent(Event):
"""Unschedule a child thread."""
def __init__(self, child):
self.child = child
class DelegationEvent(Event):
"""Suspend execution of the current thread, start a new thread and,
once the child thread finished, return control to the parent
thread.
"""
def __init__(self, coro):
self.spawned = coro
class ReturnEvent(Event):
"""Return a value the current thread's delegator at the point of
delegation. Ends the current (delegate) thread.
"""
def __init__(self, value):
self.value = value
class SleepEvent(WaitableEvent):
"""Suspend the thread for a given duration."""
def __init__(self, duration):
self.wakeup_time = time.time() + duration
def time_left(self):
return max(self.wakeup_time - time.time(), 0.0)
class ReadEvent(WaitableEvent):
"""Reads from a file-like object."""
def __init__(self, fd, bufsize):
self.fd = fd
self.bufsize = bufsize
def waitables(self):
return (self.fd,), (), ()
def fire(self):
return self.fd.read(self.bufsize)
class WriteEvent(WaitableEvent):
"""Writes to a file-like object."""
def __init__(self, fd, data):
self.fd = fd
self.data = data
def waitable(self):
return (), (self.fd,), ()
def fire(self):
self.fd.write(self.data)
# Core logic for executing and scheduling threads.
def _event_select(events):
"""Perform a select() over all the Events provided, returning the
ones ready to be fired. Only WaitableEvents (including SleepEvents)
matter here; all other events are ignored (and thus postponed).
"""
# Gather waitables and wakeup times.
waitable_to_event = {}
rlist, wlist, xlist = [], [], []
earliest_wakeup = None
for event in events:
if isinstance(event, SleepEvent):
if not earliest_wakeup:
earliest_wakeup = event.wakeup_time
else:
earliest_wakeup = min(earliest_wakeup, event.wakeup_time)
elif isinstance(event, WaitableEvent):
r, w, x = event.waitables()
rlist += r
wlist += w
xlist += x
for waitable in r:
waitable_to_event[("r", waitable)] = event
for waitable in w:
waitable_to_event[("w", waitable)] = event
for waitable in x:
waitable_to_event[("x", waitable)] = event
# If we have a any sleeping threads, determine how long to sleep.
if earliest_wakeup:
timeout = max(earliest_wakeup - time.time(), 0.0)
else:
timeout = None
# Perform select() if we have any waitables.
if rlist or wlist or xlist:
rready, wready, xready = select.select(rlist, wlist, xlist, timeout)
else:
rready, wready, xready = (), (), ()
if timeout:
time.sleep(timeout)
# Gather ready events corresponding to the ready waitables.
ready_events = set()
for ready in rready:
ready_events.add(waitable_to_event[("r", ready)])
for ready in wready:
ready_events.add(waitable_to_event[("w", ready)])
for ready in xready:
ready_events.add(waitable_to_event[("x", ready)])
# Gather any finished sleeps.
for event in events:
if isinstance(event, SleepEvent) and event.time_left() == 0.0:
ready_events.add(event)
return ready_events
class ThreadError(Exception):
def __init__(self, coro, exc_info):
self.coro = coro
self.exc_info = exc_info
def reraise(self):
raise self.exc_info[1].with_traceback(self.exc_info[2])
SUSPENDED = Event() # Special sentinel placeholder for suspended threads.
class Delegated(Event):
"""Placeholder indicating that a thread has delegated execution to a
different thread.
"""
def __init__(self, child):
self.child = child
def run(root_coro):
"""Schedules a coroutine, running it to completion. This
encapsulates the Bluelet scheduler, which the root coroutine can
add to by spawning new coroutines.
"""
# The "threads" dictionary keeps track of all the currently-
# executing and suspended coroutines. It maps coroutines to their
# currently "blocking" event. The event value may be SUSPENDED if
# the coroutine is waiting on some other condition: namely, a
# delegated coroutine or a joined coroutine. In this case, the
# coroutine should *also* appear as a value in one of the below
# dictionaries `delegators` or `joiners`.
threads = {root_coro: ValueEvent(None)}
# Maps child coroutines to delegating parents.
delegators = {}
# Maps child coroutines to joining (exit-waiting) parents.
joiners = collections.defaultdict(list)
def complete_thread(coro, return_value):
"""Remove a coroutine from the scheduling pool, awaking
delegators and joiners as necessary and returning the specified
value to any delegating parent.
"""
del threads[coro]
# Resume delegator.
if coro in delegators:
threads[delegators[coro]] = ValueEvent(return_value)
del delegators[coro]
# Resume joiners.
if coro in joiners:
for parent in joiners[coro]:
threads[parent] = ValueEvent(None)
del joiners[coro]
def advance_thread(coro, value, is_exc=False):
"""After an event is fired, run a given coroutine associated with
it in the threads dict until it yields again. If the coroutine
exits, then the thread is removed from the pool. If the coroutine
raises an exception, it is reraised in a ThreadError. If
is_exc is True, then the value must be an exc_info tuple and the
exception is thrown into the coroutine.
"""
try:
if is_exc:
next_event = coro.throw(*value)
else:
next_event = coro.send(value)
except StopIteration:
# Thread is done.
complete_thread(coro, None)
except BaseException:
# Thread raised some other exception.
del threads[coro]
raise ThreadError(coro, sys.exc_info())
else:
if isinstance(next_event, types.GeneratorType):
# Automatically invoke sub-coroutines. (Shorthand for
# explicit bluelet.call().)
next_event = DelegationEvent(next_event)
threads[coro] = next_event
def kill_thread(coro):
"""Unschedule this thread and its (recursive) delegates."""
# Collect all coroutines in the delegation stack.
coros = [coro]
while isinstance(threads[coro], Delegated):
coro = threads[coro].child
coros.append(coro)
# Complete each coroutine from the top to the bottom of the
# stack.
for coro in reversed(coros):
complete_thread(coro, None)
# Continue advancing threads until root thread exits.
exit_te = None
while threads:
try:
# Look for events that can be run immediately. Continue
# running immediate events until nothing is ready.
while True:
have_ready = False
for coro, event in list(threads.items()):
if isinstance(event, SpawnEvent):
threads[event.spawned] = ValueEvent(None) # Spawn.
advance_thread(coro, None)
have_ready = True
elif isinstance(event, ValueEvent):
advance_thread(coro, event.value)
have_ready = True
elif isinstance(event, ExceptionEvent):
advance_thread(coro, event.exc_info, True)
have_ready = True
elif isinstance(event, DelegationEvent):
threads[coro] = Delegated(event.spawned) # Suspend.
threads[event.spawned] = ValueEvent(None) # Spawn.
delegators[event.spawned] = coro
have_ready = True
elif isinstance(event, ReturnEvent):
# Thread is done.
complete_thread(coro, event.value)
have_ready = True
elif isinstance(event, JoinEvent):
threads[coro] = SUSPENDED # Suspend.
joiners[event.child].append(coro)
have_ready = True
elif isinstance(event, KillEvent):
threads[coro] = ValueEvent(None)
kill_thread(event.child)
have_ready = True
# Only start the select when nothing else is ready.
if not have_ready:
break
# Wait and fire.
event2coro = {v: k for k, v in threads.items()}
for event in _event_select(threads.values()):
# Run the IO operation, but catch socket errors.
try:
value = event.fire()
except OSError as exc:
if (
isinstance(exc.args, tuple)
and exc.args[0] == errno.EPIPE
):
# Broken pipe. Remote host disconnected.
pass
elif (
isinstance(exc.args, tuple)
and exc.args[0] == errno.ECONNRESET
):
# Connection was reset by peer.
pass
else:
traceback.print_exc()
# Abort the coroutine.
threads[event2coro[event]] = ReturnEvent(None)
else:
advance_thread(event2coro[event], value)
except ThreadError as te:
# Exception raised from inside a thread.
event = ExceptionEvent(te.exc_info)
if te.coro in delegators:
# The thread is a delegate. Raise exception in its
# delegator.
threads[delegators[te.coro]] = event
del delegators[te.coro]
else:
# The thread is root-level. Raise in client code.
exit_te = te
break
except BaseException:
# For instance, KeyboardInterrupt during select(). Raise
# into root thread and terminate others.
threads = {root_coro: ExceptionEvent(sys.exc_info())}
# If any threads still remain, kill them.
for coro in threads:
coro.close()
# If we're exiting with an exception, raise it in the client.
if exit_te:
exit_te.reraise()
# Sockets and their associated events.
class SocketClosedError(Exception):
pass
class Listener:
"""A socket wrapper object for listening sockets."""
def __init__(self, host, port):
"""Create a listening socket on the given hostname and port."""
self._closed = False
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((host, port))
self.sock.listen(5)
def accept(self):
"""An event that waits for a connection on the listening socket.
When a connection is made, the event returns a Connection
object.
"""
if self._closed:
raise SocketClosedError()
return AcceptEvent(self)
def close(self):
"""Immediately close the listening socket. (Not an event.)"""
self._closed = True
self.sock.close()
class Connection:
"""A socket wrapper object for connected sockets."""
def __init__(self, sock, addr):
self.sock = sock
self.addr = addr
self._buf = b""
self._closed = False
def close(self):
"""Close the connection."""
self._closed = True
self.sock.close()
def recv(self, size):
"""Read at most size bytes of data from the socket."""
if self._closed:
raise SocketClosedError()
if self._buf:
# We already have data read previously.
out = self._buf[:size]
self._buf = self._buf[size:]
return ValueEvent(out)
else:
return ReceiveEvent(self, size)
def send(self, data):
"""Sends data on the socket, returning the number of bytes
successfully sent.
"""
if self._closed:
raise SocketClosedError()
return SendEvent(self, data)
def sendall(self, data):
"""Send all of data on the socket."""
if self._closed:
raise SocketClosedError()
return SendEvent(self, data, True)
def readline(self, terminator=b"\n", bufsize=1024):
"""Reads a line (delimited by terminator) from the socket."""
if self._closed:
raise SocketClosedError()
while True:
if terminator in self._buf:
line, self._buf = self._buf.split(terminator, 1)
line += terminator
yield ReturnEvent(line)
break
data = yield ReceiveEvent(self, bufsize)
if data:
self._buf += data
else:
line = self._buf
self._buf = b""
yield ReturnEvent(line)
break
class AcceptEvent(WaitableEvent):
"""An event for Listener objects (listening sockets) that suspends
execution until the socket gets a connection.
"""
def __init__(self, listener):
self.listener = listener
def waitables(self):
return (self.listener.sock,), (), ()
def fire(self):
sock, addr = self.listener.sock.accept()
return Connection(sock, addr)
class ReceiveEvent(WaitableEvent):
"""An event for Connection objects (connected sockets) for
asynchronously reading data.
"""
def __init__(self, conn, bufsize):
self.conn = conn
self.bufsize = bufsize
def waitables(self):
return (self.conn.sock,), (), ()
def fire(self):
return self.conn.sock.recv(self.bufsize)
class SendEvent(WaitableEvent):
"""An event for Connection objects (connected sockets) for
asynchronously writing data.
"""
def __init__(self, conn, data, sendall=False):
self.conn = conn
self.data = data
self.sendall = sendall
def waitables(self):
return (), (self.conn.sock,), ()
def fire(self):
if self.sendall:
return self.conn.sock.sendall(self.data)
else:
return self.conn.sock.send(self.data)
# Public interface for threads; each returns an event object that
# can immediately be "yield"ed.
def null():
"""Event: yield to the scheduler without doing anything special."""
return ValueEvent(None)
def spawn(coro):
"""Event: add another coroutine to the scheduler. Both the parent
and child coroutines run concurrently.
"""
if not isinstance(coro, types.GeneratorType):
raise ValueError(f"{coro} is not a coroutine")
return SpawnEvent(coro)
def call(coro):
"""Event: delegate to another coroutine. The current coroutine
is resumed once the sub-coroutine finishes. If the sub-coroutine
returns a value using end(), then this event returns that value.
"""
if not isinstance(coro, types.GeneratorType):
raise ValueError(f"{coro} is not a coroutine")
return DelegationEvent(coro)
def end(value=None):
"""Event: ends the coroutine and returns a value to its
delegator.
"""
return ReturnEvent(value)
def read(fd, bufsize=None):
"""Event: read from a file descriptor asynchronously."""
if bufsize is None:
# Read all.
def reader():
buf = []
while True:
data = yield read(fd, 1024)
if not data:
break
buf.append(data)
yield ReturnEvent("".join(buf))
return DelegationEvent(reader())
else:
return ReadEvent(fd, bufsize)
def write(fd, data):
"""Event: write to a file descriptor asynchronously."""
return WriteEvent(fd, data)
def connect(host, port):
"""Event: connect to a network address and return a Connection
object for communicating on the socket.
"""
addr = (host, port)
sock = socket.create_connection(addr)
return ValueEvent(Connection(sock, addr))
def sleep(duration):
"""Event: suspend the thread for ``duration`` seconds."""
return SleepEvent(duration)
def join(coro):
"""Suspend the thread until another, previously `spawn`ed thread
completes.
"""
return JoinEvent(coro)
def kill(coro):
"""Halt the execution of a different `spawn`ed thread."""
return KillEvent(coro)
# Convenience function for running socket servers.
def server(host, port, func):
"""A coroutine that runs a network server. Host and port specify the
listening address. func should be a coroutine that takes a single
parameter, a Connection object. The coroutine is invoked for every
incoming connection on the listening socket.
"""
def handler(conn):
try:
yield func(conn)
finally:
conn.close()
listener = Listener(host, port)
try:
while True:
conn = yield listener.accept()
yield spawn(handler(conn))
except KeyboardInterrupt:
pass
finally:
listener.close()
beetbox-beets-c1877b7/beets/util/config.py 0000664 0000000 0000000 00000004302 15073551743 0020515 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Collection, Sequence
def sanitize_choices(
choices: Sequence[str], choices_all: Collection[str]
) -> list[str]:
"""Clean up a stringlist configuration attribute: keep only choices
elements present in choices_all, remove duplicate elements, expand '*'
wildcard while keeping original stringlist order.
"""
seen: set[str] = set()
others = [x for x in choices_all if x not in choices]
res: list[str] = []
for s in choices:
if s not in seen:
if s in list(choices_all):
res.append(s)
elif s == "*":
res.extend(others)
seen.add(s)
return res
def sanitize_pairs(
pairs: Sequence[tuple[str, str]], pairs_all: Sequence[tuple[str, str]]
) -> list[tuple[str, str]]:
"""Clean up a single-element mapping configuration attribute as returned
by Confuse's `Pairs` template: keep only two-element tuples present in
pairs_all, remove duplicate elements, expand ('str', '*') and ('*', '*')
wildcards while keeping the original order. Note that ('*', '*') and
('*', 'whatever') have the same effect.
For example,
>>> sanitize_pairs(
... [('foo', 'baz bar'), ('key', '*'), ('*', '*')],
... [('foo', 'bar'), ('foo', 'baz'), ('foo', 'foobar'),
... ('key', 'value')]
... )
[('foo', 'baz'), ('foo', 'bar'), ('key', 'value'), ('foo', 'foobar')]
"""
pairs_all = list(pairs_all)
seen: set[tuple[str, str]] = set()
others = [x for x in pairs_all if x not in pairs]
res: list[tuple[str, str]] = []
for k, values in pairs:
for v in values.split():
x = (k, v)
if x in pairs_all:
if x not in seen:
seen.add(x)
res.append(x)
elif k == "*":
new = [o for o in others if o not in seen]
seen.update(new)
res.extend(new)
elif v == "*":
new = [o for o in others if o not in seen and o[0] == k]
seen.update(new)
res.extend(new)
return res
beetbox-beets-c1877b7/beets/util/functemplate.py 0000664 0000000 0000000 00000045661 15073551743 0021754 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module implements a string formatter based on the standard PEP
292 string.Template class extended with function calls. Variables, as
with string.Template, are indicated with $ and functions are delimited
with %.
This module assumes that everything is Unicode: the template and the
substitution values. Bytestrings are not supported. Also, the templates
always behave like the ``safe_substitute`` method in the standard
library: unknown symbols are left intact.
This is sort of like a tiny, horrible degeneration of a real templating
engine like Jinja2 or Mustache.
"""
import ast
import dis
import functools
import re
import types
SYMBOL_DELIM = "$"
FUNC_DELIM = "%"
GROUP_OPEN = "{"
GROUP_CLOSE = "}"
ARG_SEP = ","
ESCAPE_CHAR = "$"
VARIABLE_PREFIX = "__var_"
FUNCTION_PREFIX = "__func_"
class Environment:
"""Contains the values and functions to be substituted into a
template.
"""
def __init__(self, values, functions):
self.values = values
self.functions = functions
# Code generation helpers.
def ex_rvalue(name):
"""A variable store expression."""
return ast.Name(name, ast.Load())
def ex_literal(val):
"""An int, float, long, bool, string, or None literal with the given
value.
"""
return ast.Constant(val)
def ex_call(func, args):
"""A function-call expression with only positional parameters. The
function may be an expression or the name of a function. Each
argument may be an expression or a value to be used as a literal.
"""
if isinstance(func, str):
func = ex_rvalue(func)
args = list(args)
for i in range(len(args)):
if not isinstance(args[i], ast.expr):
args[i] = ex_literal(args[i])
return ast.Call(func, args, [])
def compile_func(arg_names, statements, name="_the_func", debug=False):
"""Compile a list of statements as the body of a function and return
the resulting Python function. If `debug`, then print out the
bytecode of the compiled function.
"""
args_fields = {
"args": [ast.arg(arg=n, annotation=None) for n in arg_names],
"kwonlyargs": [],
"kw_defaults": [],
"defaults": [ex_literal(None) for _ in arg_names],
}
args_fields["posonlyargs"] = []
args = ast.arguments(**args_fields)
func_def = ast.FunctionDef(
name=name,
args=args,
body=statements,
decorator_list=[],
)
# The ast.Module signature changed in 3.8 to accept a list of types to
# ignore.
mod = ast.Module([func_def], [])
ast.fix_missing_locations(mod)
prog = compile(mod, "", "exec")
# Debug: show bytecode.
if debug:
dis.dis(prog)
for const in prog.co_consts:
if isinstance(const, types.CodeType):
dis.dis(const)
the_locals = {}
exec(prog, {}, the_locals)
return the_locals[name]
# AST nodes for the template language.
class Symbol:
"""A variable-substitution symbol in a template."""
def __init__(self, ident, original):
self.ident = ident
self.original = original
def __repr__(self):
return f"Symbol({self.ident!r})"
def evaluate(self, env):
"""Evaluate the symbol in the environment, returning a Unicode
string.
"""
if self.ident in env.values:
# Substitute for a value.
return env.values[self.ident]
else:
# Keep original text.
return self.original
def translate(self):
"""Compile the variable lookup."""
ident = self.ident
expr = ex_rvalue(f"{VARIABLE_PREFIX}{ident}")
return [expr], {ident}, set()
class Call:
"""A function call in a template."""
def __init__(self, ident, args, original):
self.ident = ident
self.args = args
self.original = original
def __repr__(self):
return f"Call({self.ident!r}, {self.args!r}, {self.original!r})"
def evaluate(self, env):
"""Evaluate the function call in the environment, returning a
Unicode string.
"""
if self.ident in env.functions:
arg_vals = [expr.evaluate(env) for expr in self.args]
try:
out = env.functions[self.ident](*arg_vals)
except Exception as exc:
# Function raised exception! Maybe inlining the name of
# the exception will help debug.
return f"<{exc}>"
return str(out)
else:
return self.original
def translate(self):
"""Compile the function call."""
varnames = set()
funcnames = {self.ident}
arg_exprs = []
for arg in self.args:
subexprs, subvars, subfuncs = arg.translate()
varnames.update(subvars)
funcnames.update(subfuncs)
# Create a subexpression that joins the result components of
# the arguments.
arg_exprs.append(
ex_call(
ast.Attribute(ex_literal(""), "join", ast.Load()),
[
ex_call(
"map",
[
ex_rvalue(str.__name__),
ast.List(subexprs, ast.Load()),
],
)
],
)
)
subexpr_call = ex_call(f"{FUNCTION_PREFIX}{self.ident}", arg_exprs)
return [subexpr_call], varnames, funcnames
class Expression:
"""Top-level template construct: contains a list of text blobs,
Symbols, and Calls.
"""
def __init__(self, parts):
self.parts = parts
def __repr__(self):
return f"Expression({self.parts!r})"
def evaluate(self, env):
"""Evaluate the entire expression in the environment, returning
a Unicode string.
"""
out = []
for part in self.parts:
if isinstance(part, str):
out.append(part)
else:
out.append(part.evaluate(env))
return "".join(map(str, out))
def translate(self):
"""Compile the expression to a list of Python AST expressions, a
set of variable names used, and a set of function names.
"""
expressions = []
varnames = set()
funcnames = set()
for part in self.parts:
if isinstance(part, str):
expressions.append(ex_literal(part))
else:
e, v, f = part.translate()
expressions.extend(e)
varnames.update(v)
funcnames.update(f)
return expressions, varnames, funcnames
# Parser.
class ParseError(Exception):
pass
class Parser:
"""Parses a template expression string. Instantiate the class with
the template source and call ``parse_expression``. The ``pos`` field
will indicate the character after the expression finished and
``parts`` will contain a list of Unicode strings, Symbols, and Calls
reflecting the concatenated portions of the expression.
This is a terrible, ad-hoc parser implementation based on a
left-to-right scan with no lexing step to speak of; it's probably
both inefficient and incorrect. Maybe this should eventually be
replaced with a real, accepted parsing technique (PEG, parser
generator, etc.).
"""
def __init__(self, string, in_argument=False):
"""Create a new parser.
:param in_arguments: boolean that indicates the parser is to be
used for parsing function arguments, ie. considering commas
(`ARG_SEP`) a special character
"""
self.string = string
self.in_argument = in_argument
self.pos = 0
self.parts = []
# Common parsing resources.
special_chars = (
SYMBOL_DELIM,
FUNC_DELIM,
GROUP_OPEN,
GROUP_CLOSE,
ESCAPE_CHAR,
)
escapable_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP)
terminator_chars = (GROUP_CLOSE,)
def parse_expression(self):
"""Parse a template expression starting at ``pos``. Resulting
components (Unicode strings, Symbols, and Calls) are added to
the ``parts`` field, a list. The ``pos`` field is updated to be
the next character after the expression.
"""
# Append comma (ARG_SEP) to the list of special characters only when
# parsing function arguments.
extra_special_chars = (ARG_SEP,) if self.in_argument else ()
special_chars = (*self.special_chars, *extra_special_chars)
special_char_re = re.compile(
rf"[{''.join(map(re.escape, special_chars))}]|\Z"
)
text_parts = []
while self.pos < len(self.string):
char = self.string[self.pos]
if char not in special_chars:
# A non-special character. Skip to the next special
# character, treating the interstice as literal text.
next_pos = (
special_char_re.search(self.string[self.pos :]).start()
+ self.pos
)
text_parts.append(self.string[self.pos : next_pos])
self.pos = next_pos
continue
if self.pos == len(self.string) - 1:
# The last character can never begin a structure, so we
# just interpret it as a literal character (unless it
# terminates the expression, as with , and }).
if char not in self.terminator_chars + extra_special_chars:
text_parts.append(char)
self.pos += 1
break
next_char = self.string[self.pos + 1]
if char == ESCAPE_CHAR and next_char in (
self.escapable_chars + extra_special_chars
):
# An escaped special character ($$, $}, etc.). Note that
# ${ is not an escape sequence: this is ambiguous with
# the start of a symbol and it's not necessary (just
# using { suffices in all cases).
text_parts.append(next_char)
self.pos += 2 # Skip the next character.
continue
# Shift all characters collected so far into a single string.
if text_parts:
self.parts.append("".join(text_parts))
text_parts = []
if char == SYMBOL_DELIM:
# Parse a symbol.
self.parse_symbol()
elif char == FUNC_DELIM:
# Parse a function call.
self.parse_call()
elif char in self.terminator_chars + extra_special_chars:
# Template terminated.
break
elif char == GROUP_OPEN:
# Start of a group has no meaning hear; just pass
# through the character.
text_parts.append(char)
self.pos += 1
else:
assert False
# If any parsed characters remain, shift them into a string.
if text_parts:
self.parts.append("".join(text_parts))
def parse_symbol(self):
"""Parse a variable reference (like ``$foo`` or ``${foo}``)
starting at ``pos``. Possibly appends a Symbol object (or,
failing that, text) to the ``parts`` field and updates ``pos``.
The character at ``pos`` must, as a precondition, be ``$``.
"""
assert self.pos < len(self.string)
assert self.string[self.pos] == SYMBOL_DELIM
if self.pos == len(self.string) - 1:
# Last character.
self.parts.append(SYMBOL_DELIM)
self.pos += 1
return
next_char = self.string[self.pos + 1]
start_pos = self.pos
self.pos += 1
if next_char == GROUP_OPEN:
# A symbol like ${this}.
self.pos += 1 # Skip opening.
closer = self.string.find(GROUP_CLOSE, self.pos)
if closer == -1 or closer == self.pos:
# No closing brace found or identifier is empty.
self.parts.append(self.string[start_pos : self.pos])
else:
# Closer found.
ident = self.string[self.pos : closer]
self.pos = closer + 1
self.parts.append(
Symbol(ident, self.string[start_pos : self.pos])
)
else:
# A bare-word symbol.
ident = self._parse_ident()
if ident:
# Found a real symbol.
self.parts.append(
Symbol(ident, self.string[start_pos : self.pos])
)
else:
# A standalone $.
self.parts.append(SYMBOL_DELIM)
def parse_call(self):
"""Parse a function call (like ``%foo{bar,baz}``) starting at
``pos``. Possibly appends a Call object to ``parts`` and update
``pos``. The character at ``pos`` must be ``%``.
"""
assert self.pos < len(self.string)
assert self.string[self.pos] == FUNC_DELIM
start_pos = self.pos
self.pos += 1
ident = self._parse_ident()
if not ident:
# No function name.
self.parts.append(FUNC_DELIM)
return
if self.pos >= len(self.string):
# Identifier terminates string.
self.parts.append(self.string[start_pos : self.pos])
return
if self.string[self.pos] != GROUP_OPEN:
# Argument list not opened.
self.parts.append(self.string[start_pos : self.pos])
return
# Skip past opening brace and try to parse an argument list.
self.pos += 1
args = self.parse_argument_list()
if self.pos >= len(self.string) or self.string[self.pos] != GROUP_CLOSE:
# Arguments unclosed.
self.parts.append(self.string[start_pos : self.pos])
return
self.pos += 1 # Move past closing brace.
self.parts.append(Call(ident, args, self.string[start_pos : self.pos]))
def parse_argument_list(self):
"""Parse a list of arguments starting at ``pos``, returning a
list of Expression objects. Does not modify ``parts``. Should
leave ``pos`` pointing to a } character or the end of the
string.
"""
# Try to parse a subexpression in a subparser.
expressions = []
while self.pos < len(self.string):
subparser = Parser(self.string[self.pos :], in_argument=True)
subparser.parse_expression()
# Extract and advance past the parsed expression.
expressions.append(Expression(subparser.parts))
self.pos += subparser.pos
if (
self.pos >= len(self.string)
or self.string[self.pos] == GROUP_CLOSE
):
# Argument list terminated by EOF or closing brace.
break
# Only other way to terminate an expression is with ,.
# Continue to the next argument.
assert self.string[self.pos] == ARG_SEP
self.pos += 1
return expressions
def _parse_ident(self):
"""Parse an identifier and return it (possibly an empty string).
Updates ``pos``.
"""
remainder = self.string[self.pos :]
ident = re.match(r"\w*", remainder).group(0)
self.pos += len(ident)
return ident
def _parse(template):
"""Parse a top-level template string Expression. Any extraneous text
is considered literal text.
"""
parser = Parser(template)
parser.parse_expression()
parts = parser.parts
remainder = parser.string[parser.pos :]
if remainder:
parts.append(remainder)
return Expression(parts)
@functools.lru_cache(maxsize=128)
def template(fmt):
return Template(fmt)
# External interface.
class Template:
"""A string template, including text, Symbols, and Calls."""
def __init__(self, template):
self.expr = _parse(template)
self.original = template
self.compiled = self.translate()
def __eq__(self, other):
return self.original == other.original
def interpret(self, values={}, functions={}):
"""Like `substitute`, but forces the interpreter (rather than
the compiled version) to be used. The interpreter includes
exception-handling code for missing variables and buggy template
functions but is much slower.
"""
return self.expr.evaluate(Environment(values, functions))
def substitute(self, values={}, functions={}):
"""Evaluate the template given the values and functions."""
try:
res = self.compiled(values, functions)
except Exception: # Handle any exceptions thrown by compiled version.
res = self.interpret(values, functions)
return res
def translate(self):
"""Compile the template to a Python function."""
expressions, varnames, funcnames = self.expr.translate()
argnames = []
for varname in varnames:
argnames.append(f"{VARIABLE_PREFIX}{varname}")
for funcname in funcnames:
argnames.append(f"{FUNCTION_PREFIX}{funcname}")
func = compile_func(
argnames,
[ast.Return(ast.List(expressions, ast.Load()))],
)
def wrapper_func(values={}, functions={}):
args = {}
for varname in varnames:
args[f"{VARIABLE_PREFIX}{varname}"] = values[varname]
for funcname in funcnames:
args[f"{FUNCTION_PREFIX}{funcname}"] = functions[funcname]
parts = func(**args)
return "".join(parts)
return wrapper_func
# Performance tests.
if __name__ == "__main__":
import timeit
_tmpl = Template("foo $bar %baz{foozle $bar barzle} $bar")
_vars = {"bar": "qux"}
_funcs = {"baz": str.upper}
interp_time = timeit.timeit(
"_tmpl.interpret(_vars, _funcs)",
"from __main__ import _tmpl, _vars, _funcs",
number=10000,
)
print(interp_time)
comp_time = timeit.timeit(
"_tmpl.substitute(_vars, _funcs)",
"from __main__ import _tmpl, _vars, _funcs",
number=10000,
)
print(comp_time)
print("Speedup:", interp_time / comp_time)
beetbox-beets-c1877b7/beets/util/hidden.py 0000664 0000000 0000000 00000004076 15073551743 0020513 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
# Copyright 2024, Arav K.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Simple library to work out if a file is hidden on different platforms."""
import ctypes
import os
import stat
import sys
from pathlib import Path
from typing import Union
def is_hidden(path: Union[bytes, Path]) -> bool:
"""
Determine whether the given path is treated as a 'hidden file' by the OS.
"""
if isinstance(path, bytes):
path = Path(os.fsdecode(path))
# TODO: Avoid doing a platform check on every invocation of the function.
# TODO: Stop supporting 'bytes' inputs once 'pathlib' is fully integrated.
if sys.platform == "win32":
# On Windows, we check for an FS-provided attribute.
# FILE_ATTRIBUTE_HIDDEN = 2 (0x2) from GetFileAttributes documentation.
hidden_mask = 2
# Retrieve the attributes for the file.
attrs = ctypes.windll.kernel32.GetFileAttributesW(str(path))
# Ensure the attribute mask is valid.
if attrs < 0:
return False
# Check for the hidden attribute.
return attrs & hidden_mask
# On OS X, we check for an FS-provided attribute.
if sys.platform == "darwin":
if hasattr(os.stat_result, "st_flags") and hasattr(stat, "UF_HIDDEN"):
if path.lstat().st_flags & stat.UF_HIDDEN:
return True
# On all non-Windows platforms, we check for a '.'-prefixed file name.
if path.name.startswith("."):
return True
return False
beetbox-beets-c1877b7/beets/util/id_extractors.py 0000664 0000000 0000000 00000005152 15073551743 0022126 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Helpers around the extraction of album/track ID's from metadata sources."""
from __future__ import annotations
import re
from beets import logging
log = logging.getLogger("beets")
PATTERN_BY_SOURCE = {
"spotify": re.compile(r"(?:^|open\.spotify\.com/[^/]+/)([0-9A-Za-z]{22})"),
"deezer": re.compile(r"(?:^|deezer\.com/)(?:[a-z]*/)?(?:[^/]+/)?(\d+)"),
"beatport": re.compile(r"(?:^|beatport\.com/release/.+/)(\d+)$"),
"musicbrainz": re.compile(r"(\w{8}(?:-\w{4}){3}-\w{12})"),
# - plain integer, optionally wrapped in brackets and prefixed by an
# 'r', as this is how discogs displays the release ID on its webpage.
# - legacy url format: discogs.com//release/
# - legacy url short format: discogs.com/release/
# - current url format: discogs.com/release/-
# See #291, #4080 and #4085 for the discussions leading up to these
# patterns.
"discogs": re.compile(
r"(?:^|\[?r|discogs\.com/(?:[^/]+/)?release/)(\d+)\b"
),
# There is no such thing as a Bandcamp album or artist ID, the URL can be
# used as the identifier. The Bandcamp metadata source plugin works that way
# - https://github.com/snejus/beetcamp. Bandcamp album URLs usually look
# like: https://nameofartist.bandcamp.com/album/nameofalbum
"bandcamp": re.compile(r"(.+)"),
"tidal": re.compile(r"([^/]+)$"),
}
def extract_release_id(source: str, id_: str) -> str | None:
"""Extract the release ID from a given source and ID.
Normally, the `id_` is a url string which contains the ID of the
release. This function extracts the ID from the URL based on the
`source` provided.
"""
try:
source_pattern = PATTERN_BY_SOURCE[source.lower()]
except KeyError:
log.debug(
"Unknown source '{}' for ID extraction. Returning id/url as-is.",
source,
)
return id_
if m := source_pattern.search(str(id_)):
return m[1]
return None
beetbox-beets-c1877b7/beets/util/m3u.py 0000664 0000000 0000000 00000007063 15073551743 0017763 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2022, J0J0 Todos.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides utilities to read, write and manipulate m3u playlist files."""
import traceback
from beets.util import FilesystemError, mkdirall, normpath, syspath
class EmptyPlaylistError(Exception):
"""Raised when a playlist file without media files is saved or loaded."""
pass
class M3UFile:
"""Reads and writes m3u or m3u8 playlist files."""
def __init__(self, path):
"""``path`` is the absolute path to the playlist file.
The playlist file type, m3u or m3u8 is determined by 1) the ending
being m3u8 and 2) the file paths contained in the list being utf-8
encoded. Since the list is passed from the outside, this is currently
out of control of this class.
"""
self.path = path
self.extm3u = False
self.media_list = []
def load(self):
"""Reads the m3u file from disk and sets the object's attributes."""
pl_normpath = normpath(self.path)
try:
with open(syspath(pl_normpath), "rb") as pl_file:
raw_contents = pl_file.readlines()
except OSError as exc:
raise FilesystemError(
exc, "read", (pl_normpath,), traceback.format_exc()
)
self.extm3u = True if raw_contents[0].rstrip() == b"#EXTM3U" else False
for line in raw_contents[1:]:
if line.startswith(b"#"):
# Support for specific EXTM3U comments could be added here.
continue
self.media_list.append(normpath(line.rstrip()))
if not self.media_list:
raise EmptyPlaylistError
def set_contents(self, media_list, extm3u=True):
"""Sets self.media_list to a list of media file paths.
Also sets additional flags, changing the final m3u-file's format.
``media_list`` is a list of paths to media files that should be added
to the playlist (relative or absolute paths, that's the responsibility
of the caller). By default the ``extm3u`` flag is set, to ensure a
save-operation writes an m3u-extended playlist (comment "#EXTM3U" at
the top of the file).
"""
self.media_list = media_list
self.extm3u = extm3u
def write(self):
"""Writes the m3u file to disk.
Handles the creation of potential parent directories.
"""
header = [b"#EXTM3U"] if self.extm3u else []
if not self.media_list:
raise EmptyPlaylistError
contents = header + self.media_list
pl_normpath = normpath(self.path)
mkdirall(pl_normpath)
try:
with open(syspath(pl_normpath), "wb") as pl_file:
for line in contents:
pl_file.write(line + b"\n")
pl_file.write(b"\n") # Final linefeed to prevent noeol file.
except OSError as exc:
raise FilesystemError(
exc, "create", (pl_normpath,), traceback.format_exc()
)
beetbox-beets-c1877b7/beets/util/pipeline.py 0000664 0000000 0000000 00000036051 15073551743 0021063 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Simple but robust implementation of generator/coroutine-based
pipelines in Python. The pipelines may be run either sequentially
(single-threaded) or in parallel (one thread per pipeline stage).
This implementation supports pipeline bubbles (indications that the
processing for a certain item should abort). To use them, yield the
BUBBLE constant from any stage coroutine except the last.
In the parallel case, the implementation transparently handles thread
shutdown when the processing is complete and when a stage raises an
exception. KeyboardInterrupts (^C) are also handled.
When running a parallel pipeline, it is also possible to use
multiple coroutines for the same pipeline stage; this lets you speed
up a bottleneck stage by dividing its work among multiple threads.
To do so, pass an iterable of coroutines to the Pipeline constructor
in place of any single coroutine.
"""
from __future__ import annotations
import queue
import sys
from threading import Lock, Thread
from typing import Callable, Generator, TypeVar
from typing_extensions import TypeVarTuple, Unpack
BUBBLE = "__PIPELINE_BUBBLE__"
POISON = "__PIPELINE_POISON__"
DEFAULT_QUEUE_SIZE = 16
Tq = TypeVar("Tq")
def _invalidate_queue(q, val=None, sync=True):
"""Breaks a Queue such that it never blocks, always has size 1,
and has no maximum size. get()ing from the queue returns `val`,
which defaults to None. `sync` controls whether a lock is
required (because it's not reentrant!).
"""
def _qsize(len=len):
return 1
def _put(item):
pass
def _get():
return val
if sync:
q.mutex.acquire()
try:
# Originally, we set `maxsize` to 0 here, which is supposed to mean
# an unlimited queue size. However, there is a race condition since
# Python 3.2 when this attribute is changed while another thread is
# waiting in put()/get() due to a full/empty queue.
# Setting it to 2 is still hacky because Python does not give any
# guarantee what happens if Queue methods/attributes are overwritten
# when it is already in use. However, because of our dummy _put()
# and _get() methods, it provides a workaround to let the queue appear
# to be never empty or full.
# See issue https://github.com/beetbox/beets/issues/2078
q.maxsize = 2
q._qsize = _qsize
q._put = _put
q._get = _get
q.not_empty.notify_all()
q.not_full.notify_all()
finally:
if sync:
q.mutex.release()
class CountedQueue(queue.Queue[Tq]):
"""A queue that keeps track of the number of threads that are
still feeding into it. The queue is poisoned when all threads are
finished with the queue.
"""
def __init__(self, maxsize=0):
queue.Queue.__init__(self, maxsize)
self.nthreads = 0
self.poisoned = False
def acquire(self):
"""Indicate that a thread will start putting into this queue.
Should not be called after the queue is already poisoned.
"""
with self.mutex:
assert not self.poisoned
assert self.nthreads >= 0
self.nthreads += 1
def release(self):
"""Indicate that a thread that was putting into this queue has
exited. If this is the last thread using the queue, the queue
is poisoned.
"""
with self.mutex:
self.nthreads -= 1
assert self.nthreads >= 0
if self.nthreads == 0:
# All threads are done adding to this queue. Poison it
# when it becomes empty.
self.poisoned = True
# Replacement _get invalidates when no items remain.
_old_get = self._get
def _get():
out = _old_get()
if not self.queue:
_invalidate_queue(self, POISON, False)
return out
if self.queue:
# Items remain.
self._get = _get
else:
# No items. Invalidate immediately.
_invalidate_queue(self, POISON, False)
class MultiMessage:
"""A message yielded by a pipeline stage encapsulating multiple
values to be sent to the next stage.
"""
def __init__(self, messages):
self.messages = messages
def multiple(messages):
"""Yield multiple([message, ..]) from a pipeline stage to send
multiple values to the next pipeline stage.
"""
return MultiMessage(messages)
A = TypeVarTuple("A") # Arguments of a function (omitting the task)
T = TypeVar("T") # Type of the task
# Normally these are concatenated i.e. (*args, task)
# Return type of the function (should normally be task but sadly
# we cant enforce this with the current stage functions without
# a refactor)
R = TypeVar("R")
def stage(
func: Callable[
[Unpack[A], T],
R | None,
],
):
"""Decorate a function to become a simple stage.
>>> @stage
... def add(n, i):
... return i + n
>>> pipe = Pipeline([
... iter([1, 2, 3]),
... add(2),
... ])
>>> list(pipe.pull())
[3, 4, 5]
"""
def coro(*args: Unpack[A]) -> Generator[R | T | None, T, None]:
task: R | T | None = None
while True:
task = yield task
task = func(*(args + (task,)))
return coro
def mutator_stage(func: Callable[[Unpack[A], T], R]):
"""Decorate a function that manipulates items in a coroutine to
become a simple stage.
>>> @mutator_stage
... def setkey(key, item):
... item[key] = True
>>> pipe = Pipeline([
... iter([{'x': False}, {'a': False}]),
... setkey('x'),
... ])
>>> list(pipe.pull())
[{'x': True}, {'a': False, 'x': True}]
"""
def coro(*args: Unpack[A]) -> Generator[T | None, T, None]:
task = None
while True:
task = yield task
func(*(args + (task,)))
return coro
def _allmsgs(obj):
"""Returns a list of all the messages encapsulated in obj. If obj
is a MultiMessage, returns its enclosed messages. If obj is BUBBLE,
returns an empty list. Otherwise, returns a list containing obj.
"""
if isinstance(obj, MultiMessage):
return obj.messages
elif obj == BUBBLE:
return []
else:
return [obj]
class PipelineThread(Thread):
"""Abstract base class for pipeline-stage threads."""
def __init__(self, all_threads):
super().__init__()
self.abort_lock = Lock()
self.abort_flag = False
self.all_threads = all_threads
self.exc_info = None
def abort(self):
"""Shut down the thread at the next chance possible."""
with self.abort_lock:
self.abort_flag = True
# Ensure that we are not blocking on a queue read or write.
if hasattr(self, "in_queue"):
_invalidate_queue(self.in_queue, POISON)
if hasattr(self, "out_queue"):
_invalidate_queue(self.out_queue, POISON)
def abort_all(self, exc_info):
"""Abort all other threads in the system for an exception."""
self.exc_info = exc_info
for thread in self.all_threads:
thread.abort()
class FirstPipelineThread(PipelineThread):
"""The thread running the first stage in a parallel pipeline setup.
The coroutine should just be a generator.
"""
def __init__(self, coro, out_queue, all_threads):
super().__init__(all_threads)
self.coro = coro
self.out_queue = out_queue
self.out_queue.acquire()
def run(self):
try:
while True:
with self.abort_lock:
if self.abort_flag:
return
# Get the value from the generator.
try:
msg = next(self.coro)
except StopIteration:
break
# Send messages to the next stage.
for msg in _allmsgs(msg):
with self.abort_lock:
if self.abort_flag:
return
self.out_queue.put(msg)
except BaseException:
self.abort_all(sys.exc_info())
return
# Generator finished; shut down the pipeline.
self.out_queue.release()
class MiddlePipelineThread(PipelineThread):
"""A thread running any stage in the pipeline except the first or
last.
"""
def __init__(self, coro, in_queue, out_queue, all_threads):
super().__init__(all_threads)
self.coro = coro
self.in_queue = in_queue
self.out_queue = out_queue
self.out_queue.acquire()
def run(self):
try:
# Prime the coroutine.
next(self.coro)
while True:
with self.abort_lock:
if self.abort_flag:
return
# Get the message from the previous stage.
msg = self.in_queue.get()
if msg is POISON:
break
with self.abort_lock:
if self.abort_flag:
return
# Invoke the current stage.
out = self.coro.send(msg)
# Send messages to next stage.
for msg in _allmsgs(out):
with self.abort_lock:
if self.abort_flag:
return
self.out_queue.put(msg)
except BaseException:
self.abort_all(sys.exc_info())
return
# Pipeline is shutting down normally.
self.out_queue.release()
class LastPipelineThread(PipelineThread):
"""A thread running the last stage in a pipeline. The coroutine
should yield nothing.
"""
def __init__(self, coro, in_queue, all_threads):
super().__init__(all_threads)
self.coro = coro
self.in_queue = in_queue
def run(self):
# Prime the coroutine.
next(self.coro)
try:
while True:
with self.abort_lock:
if self.abort_flag:
return
# Get the message from the previous stage.
msg = self.in_queue.get()
if msg is POISON:
break
with self.abort_lock:
if self.abort_flag:
return
# Send to consumer.
self.coro.send(msg)
except BaseException:
self.abort_all(sys.exc_info())
return
class Pipeline:
"""Represents a staged pattern of work. Each stage in the pipeline
is a coroutine that receives messages from the previous stage and
yields messages to be sent to the next stage.
"""
def __init__(self, stages):
"""Makes a new pipeline from a list of coroutines. There must
be at least two stages.
"""
if len(stages) < 2:
raise ValueError("pipeline must have at least two stages")
self.stages = []
for stage in stages:
if isinstance(stage, (list, tuple)):
self.stages.append(stage)
else:
# Default to one thread per stage.
self.stages.append((stage,))
def run_sequential(self):
"""Run the pipeline sequentially in the current thread. The
stages are run one after the other. Only the first coroutine
in each stage is used.
"""
list(self.pull())
def run_parallel(self, queue_size=DEFAULT_QUEUE_SIZE):
"""Run the pipeline in parallel using one thread per stage. The
messages between the stages are stored in queues of the given
size.
"""
queue_count = len(self.stages) - 1
queues = [CountedQueue(queue_size) for i in range(queue_count)]
threads = []
# Set up first stage.
for coro in self.stages[0]:
threads.append(FirstPipelineThread(coro, queues[0], threads))
# Middle stages.
for i in range(1, queue_count):
for coro in self.stages[i]:
threads.append(
MiddlePipelineThread(
coro, queues[i - 1], queues[i], threads
)
)
# Last stage.
for coro in self.stages[-1]:
threads.append(LastPipelineThread(coro, queues[-1], threads))
# Start threads.
for thread in threads:
thread.start()
# Wait for termination. The final thread lasts the longest.
try:
# Using a timeout allows us to receive KeyboardInterrupt
# exceptions during the join().
while threads[-1].is_alive():
threads[-1].join(1)
except BaseException:
# Stop all the threads immediately.
for thread in threads:
thread.abort()
raise
finally:
# Make completely sure that all the threads have finished
# before we return. They should already be either finished,
# in normal operation, or aborted, in case of an exception.
for thread in threads[:-1]:
thread.join()
for thread in threads:
exc_info = thread.exc_info
if exc_info:
# Make the exception appear as it was raised originally.
raise exc_info[1].with_traceback(exc_info[2])
def pull(self):
"""Yield elements from the end of the pipeline. Runs the stages
sequentially until the last yields some messages. Each of the messages
is then yielded by ``pulled.next()``. If the pipeline has a consumer,
that is the last stage does not yield any messages, then pull will not
yield any messages. Only the first coroutine in each stage is used
"""
coros = [stage[0] for stage in self.stages]
# "Prime" the coroutines.
for coro in coros[1:]:
next(coro)
# Begin the pipeline.
for out in coros[0]:
msgs = _allmsgs(out)
for coro in coros[1:]:
next_msgs = []
for msg in msgs:
out = coro.send(msg)
next_msgs.extend(_allmsgs(out))
msgs = next_msgs
for msg in msgs:
yield msg
beetbox-beets-c1877b7/beets/util/units.py 0000664 0000000 0000000 00000003205 15073551743 0020413 0 ustar 00root root 0000000 0000000 import re
def raw_seconds_short(string: str) -> float:
"""Formats a human-readable M:SS string as a float (number of seconds).
Raises ValueError if the conversion cannot take place due to `string` not
being in the right format.
"""
match = re.match(r"^(\d+):([0-5]\d)$", string)
if not match:
raise ValueError("String not in M:SS format")
minutes, seconds = map(int, match.groups())
return float(minutes * 60 + seconds)
def human_seconds_short(interval):
"""Formats a number of seconds as a short human-readable M:SS
string.
"""
interval = int(interval)
return f"{interval // 60}:{interval % 60:02d}"
def human_bytes(size):
"""Formats size, a number of bytes, in a human-readable way."""
powers = ["", "K", "M", "G", "T", "P", "E", "Z", "Y", "H"]
unit = "B"
for power in powers:
if size < 1024:
return f"{size:3.1f} {power}{unit}"
size /= 1024.0
unit = "iB"
return "big"
def human_seconds(interval):
"""Formats interval, a number of seconds, as a human-readable time
interval using English words.
"""
units = [
(1, "second"),
(60, "minute"),
(60, "hour"),
(24, "day"),
(7, "week"),
(52, "year"),
(10, "decade"),
]
for i in range(len(units) - 1):
increment, suffix = units[i]
next_increment, _ = units[i + 1]
interval /= float(increment)
if interval < next_increment:
break
else:
# Last unit.
increment, suffix = units[-1]
interval /= float(increment)
return f"{interval:3.1f} {suffix}s"
beetbox-beets-c1877b7/beetsplug/ 0000775 0000000 0000000 00000000000 15073551743 0016612 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beetsplug/_typing.py 0000664 0000000 0000000 00000007061 15073551743 0020641 0 ustar 00root root 0000000 0000000 from __future__ import annotations
from typing import Any
from typing_extensions import NotRequired, TypedDict
JSONDict = dict[str, Any]
class LRCLibAPI:
class Item(TypedDict):
"""Lyrics data item returned by the LRCLib API."""
id: int
name: str
trackName: str
artistName: str
albumName: str
duration: float | None
instrumental: bool
plainLyrics: str
syncedLyrics: str | None
class GeniusAPI:
"""Genius API data types.
This documents *only* the fields that are used in the plugin.
:attr:`SearchResult` is an exception, since I thought some of the other
fields might be useful in the future.
"""
class DateComponents(TypedDict):
year: int
month: int
day: int
class Artist(TypedDict):
api_path: str
header_image_url: str
id: int
image_url: str
is_meme_verified: bool
is_verified: bool
name: str
url: str
class Stats(TypedDict):
unreviewed_annotations: int
hot: bool
class SearchResult(TypedDict):
annotation_count: int
api_path: str
artist_names: str
full_title: str
header_image_thumbnail_url: str
header_image_url: str
id: int
lyrics_owner_id: int
lyrics_state: str
path: str
primary_artist_names: str
pyongs_count: int | None
relationships_index_url: str
release_date_components: GeniusAPI.DateComponents
release_date_for_display: str
release_date_with_abbreviated_month_for_display: str
song_art_image_thumbnail_url: str
song_art_image_url: str
stats: GeniusAPI.Stats
title: str
title_with_featured: str
url: str
featured_artists: list[GeniusAPI.Artist]
primary_artist: GeniusAPI.Artist
primary_artists: list[GeniusAPI.Artist]
class SearchHit(TypedDict):
result: GeniusAPI.SearchResult
class SearchResponse(TypedDict):
hits: list[GeniusAPI.SearchHit]
class Search(TypedDict):
response: GeniusAPI.SearchResponse
class GoogleCustomSearchAPI:
class Response(TypedDict):
"""Search response from the Google Custom Search API.
If the search returns no results, the :attr:`items` field is not found.
"""
items: NotRequired[list[GoogleCustomSearchAPI.Item]]
class Item(TypedDict):
"""A Google Custom Search API result item.
:attr:`title` field is shown to the user in the search interface, thus
it gets truncated with an ellipsis for longer queries. For most
results, the full title is available as ``og:title`` metatag found
under the :attr:`pagemap` field. Note neither this metatag nor the
``pagemap`` field is guaranteed to be present in the data.
"""
title: str
link: str
pagemap: NotRequired[GoogleCustomSearchAPI.Pagemap]
class Pagemap(TypedDict):
"""Pagemap data with a single meta tags dict in a list."""
metatags: list[JSONDict]
class TranslatorAPI:
class Language(TypedDict):
"""Language data returned by the translator API."""
language: str
score: float
class Translation(TypedDict):
"""Translation data returned by the translator API."""
text: str
to: str
class Response(TypedDict):
"""Response from the translator API."""
detectedLanguage: TranslatorAPI.Language
translations: list[TranslatorAPI.Translation]
beetbox-beets-c1877b7/beetsplug/_utils/ 0000775 0000000 0000000 00000000000 15073551743 0020111 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beetsplug/_utils/__init__.py 0000664 0000000 0000000 00000000061 15073551743 0022217 0 ustar 00root root 0000000 0000000 from . import art, vfs
__all__ = ["art", "vfs"]
beetbox-beets-c1877b7/beetsplug/_utils/art.py 0000664 0000000 0000000 00000013437 15073551743 0021261 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""High-level utilities for manipulating image files associated with
music and items' embedded album art.
"""
import os
from tempfile import NamedTemporaryFile
import mediafile
from beets.util import bytestring_path, displayable_path, syspath
from beets.util.artresizer import ArtResizer
def mediafile_image(image_path, maxwidth=None):
"""Return a `mediafile.Image` object for the path."""
with open(syspath(image_path), "rb") as f:
data = f.read()
return mediafile.Image(data, type=mediafile.ImageType.front)
def get_art(log, item):
# Extract the art.
try:
mf = mediafile.MediaFile(syspath(item.path))
except mediafile.UnreadableFileError as exc:
log.warning("Could not extract art from {.filepath}: {}", item, exc)
return
return mf.art
def embed_item(
log,
item,
imagepath,
maxwidth=None,
itempath=None,
compare_threshold=0,
ifempty=False,
as_album=False,
id3v23=None,
quality=0,
):
"""Embed an image into the item's media file."""
# Conditions.
if compare_threshold:
is_similar = check_art_similarity(
log, item, imagepath, compare_threshold
)
if is_similar is None:
log.warning("Error while checking art similarity; skipping.")
return
elif not is_similar:
log.info("Image not similar; skipping.")
return
if ifempty and get_art(log, item):
log.info("media file already contained art")
return
# Filters.
if maxwidth and not as_album:
imagepath = resize_image(log, imagepath, maxwidth, quality)
# Get the `Image` object from the file.
try:
log.debug("embedding {}", displayable_path(imagepath))
image = mediafile_image(imagepath, maxwidth)
except OSError as exc:
log.warning("could not read image file: {}", exc)
return
# Make sure the image kind is safe (some formats only support PNG
# and JPEG).
if image.mime_type not in ("image/jpeg", "image/png"):
log.info("not embedding image of unsupported type: {.mime_type}", image)
return
item.try_write(path=itempath, tags={"images": [image]}, id3v23=id3v23)
def embed_album(
log,
album,
maxwidth=None,
quiet=False,
compare_threshold=0,
ifempty=False,
quality=0,
):
"""Embed album art into all of the album's items."""
imagepath = album.artpath
if not imagepath:
log.info("No album art present for {}", album)
return
if not os.path.isfile(syspath(imagepath)):
log.info(
"Album art not found at {} for {}",
displayable_path(imagepath),
album,
)
return
if maxwidth:
imagepath = resize_image(log, imagepath, maxwidth, quality)
log.info("Embedding album art into {}", album)
for item in album.items():
embed_item(
log,
item,
imagepath,
maxwidth,
None,
compare_threshold,
ifempty,
as_album=True,
quality=quality,
)
def resize_image(log, imagepath, maxwidth, quality):
"""Returns path to an image resized to maxwidth and encoded with the
specified quality level.
"""
log.debug(
"Resizing album art to {} pixels wide and encoding at quality level {}",
maxwidth,
quality,
)
imagepath = ArtResizer.shared.resize(
maxwidth, syspath(imagepath), quality=quality
)
return imagepath
def check_art_similarity(
log,
item,
imagepath,
compare_threshold,
artresizer=None,
):
"""A boolean indicating if an image is similar to embedded item art.
If no embedded art exists, always return `True`. If the comparison fails
for some reason, the return value is `None`.
This must only be called if `ArtResizer.shared.can_compare` is `True`.
"""
with NamedTemporaryFile(delete=True) as f:
art = extract(log, f.name, item)
if not art:
return True
if artresizer is None:
artresizer = ArtResizer.shared
return artresizer.compare(art, imagepath, compare_threshold)
def extract(log, outpath, item):
art = get_art(log, item)
outpath = bytestring_path(outpath)
if not art:
log.info("No album art present in {}, skipping.", item)
return
# Add an extension to the filename.
ext = mediafile.image_extension(art)
if not ext:
log.warning("Unknown image type in {.filepath}.", item)
return
outpath += bytestring_path(f".{ext}")
log.info(
"Extracting album art from: {} to: {}",
item,
displayable_path(outpath),
)
with open(syspath(outpath), "wb") as f:
f.write(art)
return outpath
def extract_first(log, outpath, items):
for item in items:
real_path = extract(log, outpath, item)
if real_path:
return real_path
def clear(log, lib, query):
items = lib.items(query)
log.info("Clearing album art from {} items", len(items))
for item in items:
log.debug("Clearing art for {}", item)
item.try_write(tags={"images": None})
beetbox-beets-c1877b7/beetsplug/_utils/vfs.py 0000664 0000000 0000000 00000003755 15073551743 0021273 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A simple utility for constructing filesystem-like trees from beets
libraries.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, NamedTuple
from beets import util
if TYPE_CHECKING:
from beets.library import Library
class Node(NamedTuple):
files: dict[str, int]
# Maps filenames to Item ids.
dirs: dict[str, Node]
# Maps directory names to child nodes.
def _insert(node: Node, path: list[str], itemid: int):
"""Insert an item into a virtual filesystem node."""
if len(path) == 1:
# Last component. Insert file.
node.files[path[0]] = itemid
else:
# In a directory.
dirname = path[0]
rest = path[1:]
if dirname not in node.dirs:
node.dirs[dirname] = Node({}, {})
_insert(node.dirs[dirname], rest, itemid)
def libtree(lib: Library) -> Node:
"""Generates a filesystem-like directory tree for the files
contained in `lib`. Filesystem nodes are (files, dirs) named
tuples in which both components are dictionaries. The first
maps filenames to Item ids. The second maps directory names to
child node tuples.
"""
root = Node({}, {})
for item in lib.items():
dest = item.destination(relative_to_libdir=True)
parts = util.components(util.as_string(dest))
_insert(root, parts, item.id)
return root
beetbox-beets-c1877b7/beetsplug/absubmit.py 0000664 0000000 0000000 00000017010 15073551743 0020771 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Pieter Mulder.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Calculate acoustic information and submit to AcousticBrainz."""
import errno
import hashlib
import json
import os
import shutil
import subprocess
import tempfile
import requests
from beets import plugins, ui, util
# We use this field to check whether AcousticBrainz info is present.
PROBE_FIELD = "mood_acoustic"
class ABSubmitError(Exception):
"""Raised when failing to analyse file with extractor."""
def call(args):
"""Execute the command and return its output.
Raise a AnalysisABSubmitError on failure.
"""
try:
return util.command_output(args).stdout
except subprocess.CalledProcessError as e:
raise ABSubmitError(f"{args[0]} exited with status {e.returncode}")
class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self._log.warning("This plugin is deprecated.")
self.config.add(
{"extractor": "", "force": False, "pretend": False, "base_url": ""}
)
self.extractor = self.config["extractor"].as_str()
if self.extractor:
self.extractor = util.normpath(self.extractor)
# Explicit path to extractor
if not os.path.isfile(self.extractor):
raise ui.UserError(
f"Extractor command does not exist: {self.extractor}."
)
else:
# Implicit path to extractor, search for it in path
self.extractor = "streaming_extractor_music"
try:
call([self.extractor])
except OSError:
raise ui.UserError(
"No extractor command found: please install the extractor"
" binary from https://essentia.upf.edu/"
)
except ABSubmitError:
# Extractor found, will exit with an error if not called with
# the correct amount of arguments.
pass
# Get the executable location on the system, which we need
# to calculate the SHA-1 hash.
self.extractor = shutil.which(self.extractor)
# Calculate extractor hash.
self.extractor_sha = hashlib.sha1()
with open(self.extractor, "rb") as extractor:
self.extractor_sha.update(extractor.read())
self.extractor_sha = self.extractor_sha.hexdigest()
self.url = ""
base_url = self.config["base_url"].as_str()
if base_url:
if not base_url.startswith("http"):
raise ui.UserError(
"AcousticBrainz server base URL must start "
"with an HTTP scheme"
)
elif base_url[-1] != "/":
base_url = f"{base_url}/"
self.url = f"{base_url}{{mbid}}/low-level"
def commands(self):
cmd = ui.Subcommand(
"absubmit", help="calculate and submit AcousticBrainz analysis"
)
cmd.parser.add_option(
"-f",
"--force",
dest="force_refetch",
action="store_true",
default=False,
help="re-download data when already present",
)
cmd.parser.add_option(
"-p",
"--pretend",
dest="pretend_fetch",
action="store_true",
default=False,
help=(
"pretend to perform action, but show only files which would be"
" processed"
),
)
cmd.func = self.command
return [cmd]
def command(self, lib, opts, args):
if not self.url:
raise ui.UserError(
"This plugin is deprecated since AcousticBrainz no longer "
"accepts new submissions. See the base_url configuration "
"option."
)
else:
# Get items from arguments
items = lib.items(args)
self.opts = opts
util.par_map(self.analyze_submit, items)
def analyze_submit(self, item):
analysis = self._get_analysis(item)
if analysis:
self._submit_data(item, analysis)
def _get_analysis(self, item):
mbid = item["mb_trackid"]
# Avoid re-analyzing files that already have AB data.
if not self.opts.force_refetch and not self.config["force"]:
if item.get(PROBE_FIELD):
return None
# If file has no MBID, skip it.
if not mbid:
self._log.info(
"Not analysing {}, missing musicbrainz track id.", item
)
return None
if self.opts.pretend_fetch or self.config["pretend"]:
self._log.info("pretend action - extract item: {}", item)
return None
# Temporary file to save extractor output to, extractor only works
# if an output file is given. Here we use a temporary file to copy
# the data into a python object and then remove the file from the
# system.
tmp_file, filename = tempfile.mkstemp(suffix=".json")
try:
# Close the file, so the extractor can overwrite it.
os.close(tmp_file)
try:
call([self.extractor, util.syspath(item.path), filename])
except ABSubmitError as e:
self._log.warning(
"Failed to analyse {item} for AcousticBrainz: {error}",
item=item,
error=e,
)
return None
with open(filename) as tmp_file:
analysis = json.load(tmp_file)
# Add the hash to the output.
analysis["metadata"]["version"]["essentia_build_sha"] = (
self.extractor_sha
)
return analysis
finally:
try:
os.remove(filename)
except OSError as e:
# ENOENT means file does not exist, just ignore this error.
if e.errno != errno.ENOENT:
raise
def _submit_data(self, item, data):
mbid = item["mb_trackid"]
headers = {"Content-Type": "application/json"}
response = requests.post(
self.url.format(mbid=mbid),
json=data,
headers=headers,
timeout=10,
)
# Test that request was successful and raise an error on failure.
if response.status_code != 200:
try:
message = response.json()["message"]
except (ValueError, KeyError) as e:
message = f"unable to get error message: {e}"
self._log.error(
"Failed to submit AcousticBrainz analysis of {item}: "
"{message}).",
item=item,
message=message,
)
else:
self._log.debug(
"Successfully submitted AcousticBrainz analysis for {}.",
item,
)
beetbox-beets-c1877b7/beetsplug/acousticbrainz.py 0000664 0000000 0000000 00000026712 15073551743 0022214 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2015-2016, Ohm Patel.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetch various AcousticBrainz metadata using MBID."""
from collections import defaultdict
import requests
from beets import plugins, ui
from beets.dbcore import types
LEVELS = ["/low-level", "/high-level"]
ABSCHEME = {
"highlevel": {
"danceability": {"all": {"danceable": "danceable"}},
"gender": {"value": "gender"},
"genre_rosamerica": {"value": "genre_rosamerica"},
"mood_acoustic": {"all": {"acoustic": "mood_acoustic"}},
"mood_aggressive": {"all": {"aggressive": "mood_aggressive"}},
"mood_electronic": {"all": {"electronic": "mood_electronic"}},
"mood_happy": {"all": {"happy": "mood_happy"}},
"mood_party": {"all": {"party": "mood_party"}},
"mood_relaxed": {"all": {"relaxed": "mood_relaxed"}},
"mood_sad": {"all": {"sad": "mood_sad"}},
"moods_mirex": {"value": "moods_mirex"},
"ismir04_rhythm": {"value": "rhythm"},
"tonal_atonal": {"all": {"tonal": "tonal"}},
"timbre": {"value": "timbre"},
"voice_instrumental": {"value": "voice_instrumental"},
},
"lowlevel": {"average_loudness": "average_loudness"},
"rhythm": {"bpm": "bpm"},
"tonal": {
"chords_changes_rate": "chords_changes_rate",
"chords_key": "chords_key",
"chords_number_rate": "chords_number_rate",
"chords_scale": "chords_scale",
"key_key": ("initial_key", 0),
"key_scale": ("initial_key", 1),
"key_strength": "key_strength",
},
}
class AcousticPlugin(plugins.BeetsPlugin):
item_types = {
"average_loudness": types.Float(6),
"chords_changes_rate": types.Float(6),
"chords_key": types.STRING,
"chords_number_rate": types.Float(6),
"chords_scale": types.STRING,
"danceable": types.Float(6),
"gender": types.STRING,
"genre_rosamerica": types.STRING,
"initial_key": types.STRING,
"key_strength": types.Float(6),
"mood_acoustic": types.Float(6),
"mood_aggressive": types.Float(6),
"mood_electronic": types.Float(6),
"mood_happy": types.Float(6),
"mood_party": types.Float(6),
"mood_relaxed": types.Float(6),
"mood_sad": types.Float(6),
"moods_mirex": types.STRING,
"rhythm": types.Float(6),
"timbre": types.STRING,
"tonal": types.Float(6),
"voice_instrumental": types.STRING,
}
def __init__(self):
super().__init__()
self._log.warning("This plugin is deprecated.")
self.config.add(
{"auto": True, "force": False, "tags": [], "base_url": ""}
)
self.base_url = self.config["base_url"].as_str()
if self.base_url:
if not self.base_url.startswith("http"):
raise ui.UserError(
"AcousticBrainz server base URL must start "
"with an HTTP scheme"
)
elif self.base_url[-1] != "/":
self.base_url = f"{self.base_url}/"
if self.config["auto"]:
self.register_listener("import_task_files", self.import_task_files)
def commands(self):
cmd = ui.Subcommand(
"acousticbrainz", help="fetch metadata from AcousticBrainz"
)
cmd.parser.add_option(
"-f",
"--force",
dest="force_refetch",
action="store_true",
default=False,
help="re-download data when already present",
)
def func(lib, opts, args):
items = lib.items(args)
self._fetch_info(
items,
ui.should_write(),
opts.force_refetch or self.config["force"],
)
cmd.func = func
return [cmd]
def import_task_files(self, session, task):
"""Function is called upon beet import."""
self._fetch_info(task.imported_items(), False, True)
def _get_data(self, mbid):
if not self.base_url:
raise ui.UserError(
"This plugin is deprecated since AcousticBrainz has shut "
"down. See the base_url configuration option."
)
data = {}
for url in _generate_urls(self.base_url, mbid):
self._log.debug("fetching URL: {}", url)
try:
res = requests.get(url, timeout=10)
except requests.RequestException as exc:
self._log.info("request error: {}", exc)
return {}
if res.status_code == 404:
self._log.info("recording ID {} not found", mbid)
return {}
try:
data.update(res.json())
except ValueError:
self._log.debug("Invalid Response: {.text}", res)
return {}
return data
def _fetch_info(self, items, write, force):
"""Fetch additional information from AcousticBrainz for the `item`s."""
tags = self.config["tags"].as_str_seq()
for item in items:
# If we're not forcing re-downloading for all tracks, check
# whether the data is already present. We use one
# representative field name to check for previously fetched
# data.
if not force:
mood_str = item.get("mood_acoustic", "")
if mood_str:
self._log.info("data already present for: {}", item)
continue
# We can only fetch data for tracks with MBIDs.
if not item.mb_trackid:
continue
self._log.info("getting data for: {}", item)
data = self._get_data(item.mb_trackid)
if data:
for attr, val in self._map_data_to_scheme(data, ABSCHEME):
if not tags or attr in tags:
self._log.debug(
"attribute {} of {} set to {}", attr, item, val
)
setattr(item, attr, val)
else:
self._log.debug(
"skipping attribute {} of {}"
" (value {}) due to config",
attr,
item,
val,
)
item.store()
if write:
item.try_write()
def _map_data_to_scheme(self, data, scheme):
"""Given `data` as a structure of nested dictionaries, and
`scheme` as a structure of nested dictionaries , `yield` tuples
`(attr, val)` where `attr` and `val` are corresponding leaf
nodes in `scheme` and `data`.
As its name indicates, `scheme` defines how the data is structured,
so this function tries to find leaf nodes in `data` that correspond
to the leafs nodes of `scheme`, and not the other way around.
Leaf nodes of `data` that do not exist in the `scheme` do not matter.
If a leaf node of `scheme` is not present in `data`,
no value is yielded for that attribute and a simple warning is issued.
Finally, to account for attributes of which the value is split between
several leaf nodes in `data`, leaf nodes of `scheme` can be tuples
`(attr, order)` where `attr` is the attribute to which the leaf node
belongs, and `order` is the place at which it should appear in the
value. The different `value`s belonging to the same `attr` are simply
joined with `' '`. This is hardcoded and not very flexible, but it gets
the job done.
For example:
>>> scheme = {
'key1': 'attribute',
'key group': {
'subkey1': 'subattribute',
'subkey2': ('composite attribute', 0)
},
'key2': ('composite attribute', 1)
}
>>> data = {
'key1': 'value',
'key group': {
'subkey1': 'subvalue',
'subkey2': 'part 1 of composite attr'
},
'key2': 'part 2'
}
>>> print(list(_map_data_to_scheme(data, scheme)))
[('subattribute', 'subvalue'),
('attribute', 'value'),
('composite attribute', 'part 1 of composite attr part 2')]
"""
# First, we traverse `scheme` and `data`, `yield`ing all the non
# composites attributes straight away and populating the dictionary
# `composites` with the composite attributes.
# When we are finished traversing `scheme`, `composites` should
# map each composite attribute to an ordered list of the values
# belonging to the attribute, for example:
# `composites = {'initial_key': ['B', 'minor']}`.
# The recursive traversal.
composites = defaultdict(list)
yield from self._data_to_scheme_child(data, scheme, composites)
# When composites has been populated, yield the composite attributes
# by joining their parts.
for composite_attr, value_parts in composites.items():
yield composite_attr, " ".join(value_parts)
def _data_to_scheme_child(self, subdata, subscheme, composites):
"""The recursive business logic of :meth:`_map_data_to_scheme`:
Traverse two structures of nested dictionaries in parallel and `yield`
tuples of corresponding leaf nodes.
If a leaf node belongs to a composite attribute (is a `tuple`),
populate `composites` rather than yielding straight away.
All the child functions for a single traversal share the same
`composites` instance, which is passed along.
"""
for k, v in subscheme.items():
if k in subdata:
if isinstance(v, dict):
yield from self._data_to_scheme_child(
subdata[k], v, composites
)
elif isinstance(v, tuple):
composite_attribute, part_number = v
attribute_parts = composites[composite_attribute]
# Parts are not guaranteed to be inserted in order
while len(attribute_parts) <= part_number:
attribute_parts.append("")
attribute_parts[part_number] = subdata[k]
else:
yield v, subdata[k]
else:
self._log.warning(
"Acousticbrainz did not provide info about {}", k
)
self._log.debug(
"Data {} could not be mapped to scheme {} "
"because key {} was not found",
subdata,
v,
k,
)
def _generate_urls(base_url, mbid):
"""Generates AcousticBrainz end point urls for given `mbid`."""
for level in LEVELS:
yield f"{base_url}{mbid}{level}"
beetbox-beets-c1877b7/beetsplug/advancedrewrite.py 0000664 0000000 0000000 00000015617 15073551743 0022345 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2023, Max Rumpf.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Plugin to rewrite fields based on a given query."""
import re
import shlex
from collections import defaultdict
import confuse
from beets.dbcore import AndQuery, query_from_strings
from beets.dbcore.types import MULTI_VALUE_DSV
from beets.library import Album, Item
from beets.plugins import BeetsPlugin
from beets.ui import UserError
def rewriter(field, simple_rules, advanced_rules):
"""Template field function factory.
Create a template field function that rewrites the given field
with the given rewriting rules.
``simple_rules`` must be a list of (pattern, replacement) pairs.
``advanced_rules`` must be a list of (query, replacement) pairs.
"""
def fieldfunc(item):
value = item._values_fixed[field]
for pattern, replacement in simple_rules:
if pattern.match(value.lower()):
# Rewrite activated.
return replacement
for query, replacement in advanced_rules:
if query.match(item):
# Rewrite activated.
return replacement
# Not activated; return original value.
return value
return fieldfunc
class AdvancedRewritePlugin(BeetsPlugin):
"""Plugin to rewrite fields based on a given query."""
def __init__(self):
"""Parse configuration and register template fields for rewriting."""
super().__init__()
self.register_listener("pluginload", self.loaded)
def loaded(self):
template = confuse.Sequence(
confuse.OneOf(
[
confuse.MappingValues(str),
{
"match": str,
"replacements": confuse.MappingValues(
confuse.OneOf([str, confuse.Sequence(str)]),
),
},
]
)
)
# Used to apply the same rewrite to the corresponding album field.
corresponding_album_fields = {
"artist": "albumartist",
"artists": "albumartists",
"artist_sort": "albumartist_sort",
"artists_sort": "albumartists_sort",
}
# Gather all the rewrite rules for each field.
class RulesContainer:
def __init__(self):
self.simple = []
self.advanced = []
rules = defaultdict(RulesContainer)
for rule in self.config.get(template):
if "match" not in rule:
# Simple syntax
if len(rule) != 1:
raise UserError(
"Simple rewrites must have only one rule, "
"but found multiple entries. "
"Did you forget to prepend a dash (-)?"
)
key, value = next(iter(rule.items()))
try:
fieldname, pattern = key.split(None, 1)
except ValueError:
raise UserError(
f"Invalid simple rewrite specification {key}"
)
if fieldname not in Item._fields:
raise UserError(
f"invalid field name {fieldname} in rewriter"
)
self._log.debug(
f"adding simple rewrite '{pattern}' → '{value}' "
f"for field {fieldname}"
)
pattern = re.compile(pattern.lower())
rules[fieldname].simple.append((pattern, value))
# Apply the same rewrite to the corresponding album field.
if fieldname in corresponding_album_fields:
album_fieldname = corresponding_album_fields[fieldname]
rules[album_fieldname].simple.append((pattern, value))
else:
# Advanced syntax
match = rule["match"]
replacements = rule["replacements"]
if len(replacements) == 0:
raise UserError(
"Advanced rewrites must have at least one replacement"
)
query = query_from_strings(
AndQuery,
Item,
prefixes={},
query_parts=shlex.split(match),
)
for fieldname, replacement in replacements.items():
if fieldname not in Item._fields:
raise UserError(
f"Invalid field name {fieldname} in rewriter"
)
self._log.debug(
f"adding advanced rewrite to '{replacement}' "
f"for field {fieldname}"
)
if isinstance(replacement, list):
if Item._fields[fieldname] is not MULTI_VALUE_DSV:
raise UserError(
f"Field {fieldname} is not a multi-valued field "
f"but a list was given: {', '.join(replacement)}"
)
elif isinstance(replacement, str):
if Item._fields[fieldname] is MULTI_VALUE_DSV:
replacement = [replacement]
else:
raise UserError(
f"Invalid type of replacement {replacement} "
f"for field {fieldname}"
)
rules[fieldname].advanced.append((query, replacement))
# Apply the same rewrite to the corresponding album field.
if fieldname in corresponding_album_fields:
album_fieldname = corresponding_album_fields[fieldname]
rules[album_fieldname].advanced.append(
(query, replacement)
)
# Replace each template field with the new rewriter function.
for fieldname, fieldrules in rules.items():
getter = rewriter(fieldname, fieldrules.simple, fieldrules.advanced)
self.template_fields[fieldname] = getter
if fieldname in Album._fields:
self.album_template_fields[fieldname] = getter
beetbox-beets-c1877b7/beetsplug/albumtypes.py 0000664 0000000 0000000 00000004471 15073551743 0021357 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2021, Edgars Supe.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds an album template field for formatted album types."""
from beets.library import Album
from beets.plugins import BeetsPlugin
from .musicbrainz import VARIOUS_ARTISTS_ID
class AlbumTypesPlugin(BeetsPlugin):
"""Adds an album template field for formatted album types."""
def __init__(self):
"""Init AlbumTypesPlugin."""
super().__init__()
self.album_template_fields["atypes"] = self._atypes
self.config.add(
{
"types": [
("ep", "EP"),
("single", "Single"),
("soundtrack", "OST"),
("live", "Live"),
("compilation", "Anthology"),
("remix", "Remix"),
],
"ignore_va": ["compilation"],
"bracket": "[]",
}
)
def _atypes(self, item: Album):
"""Returns a formatted string based on album's types."""
types = self.config["types"].as_pairs()
ignore_va = self.config["ignore_va"].as_str_seq()
bracket = self.config["bracket"].as_str()
# Assign a left and right bracket or leave blank if argument is empty.
if len(bracket) == 2:
bracket_l = bracket[0]
bracket_r = bracket[1]
else:
bracket_l = ""
bracket_r = ""
res = ""
albumtypes = item.albumtypes
is_va = item.mb_albumartistid == VARIOUS_ARTISTS_ID
for type in types:
if type[0] in albumtypes and type[1]:
if not is_va or (type[0] not in ignore_va and is_va):
res += f"{bracket_l}{type[1]}{bracket_r}"
return res
beetbox-beets-c1877b7/beetsplug/aura.py 0000664 0000000 0000000 00000100254 15073551743 0020116 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2020, Callum Brown.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""An AURA server using Flask."""
import os
import re
from collections.abc import Mapping
from dataclasses import dataclass
from mimetypes import guess_type
from typing import ClassVar
from flask import (
Blueprint,
Flask,
current_app,
make_response,
request,
send_file,
)
from typing_extensions import Self
from beets import config
from beets.dbcore.query import (
AndQuery,
FixedFieldSort,
MatchQuery,
MultipleSort,
NotQuery,
RegexpQuery,
SlowFieldSort,
SQLiteType,
)
from beets.library import Album, Item, LibModel, Library
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, _open_library
# Constants
# AURA server information
# TODO: Add version information
SERVER_INFO = {
"aura-version": "0",
"server": "beets-aura",
"server-version": "0.1",
"auth-required": False,
"features": ["albums", "artists", "images"],
}
# Maps AURA Track attribute to beets Item attribute
TRACK_ATTR_MAP = {
# Required
"title": "title",
"artist": "artist",
# Optional
"album": "album",
"track": "track", # Track number on album
"tracktotal": "tracktotal",
"disc": "disc",
"disctotal": "disctotal",
"year": "year",
"month": "month",
"day": "day",
"bpm": "bpm",
"genre": "genre",
"recording-mbid": "mb_trackid", # beets trackid is MB recording
"track-mbid": "mb_releasetrackid",
"composer": "composer",
"albumartist": "albumartist",
"comments": "comments",
# Optional for Audio Metadata
# TODO: Support the mimetype attribute, format != mime type
# "mimetype": track.format,
"duration": "length",
"framerate": "samplerate",
# I don't think beets has a framecount field
# "framecount": ???,
"channels": "channels",
"bitrate": "bitrate",
"bitdepth": "bitdepth",
"size": "filesize",
}
# Maps AURA Album attribute to beets Album attribute
ALBUM_ATTR_MAP = {
# Required
"title": "album",
"artist": "albumartist",
# Optional
"tracktotal": "albumtotal",
"disctotal": "disctotal",
"year": "year",
"month": "month",
"day": "day",
"genre": "genre",
"release-mbid": "mb_albumid",
"release-group-mbid": "mb_releasegroupid",
}
# Maps AURA Artist attribute to beets Item field
# Artists are not first-class in beets, so information is extracted from
# beets Items.
ARTIST_ATTR_MAP = {
# Required
"name": "artist",
# Optional
"artist-mbid": "mb_artistid",
}
@dataclass
class AURADocument:
"""Base class for building AURA documents."""
model_cls: ClassVar[type[LibModel]]
lib: Library
args: Mapping[str, str]
@classmethod
def from_app(cls) -> Self:
"""Initialise the document using the global app and request."""
return cls(current_app.config["lib"], request.args)
@staticmethod
def error(status, title, detail):
"""Make a response for an error following the JSON:API spec.
Args:
status: An HTTP status code string, e.g. "404 Not Found".
title: A short, human-readable summary of the problem.
detail: A human-readable explanation specific to this
occurrence of the problem.
"""
document = {
"errors": [{"status": status, "title": title, "detail": detail}]
}
return make_response(document, status)
@classmethod
def get_attribute_converter(cls, beets_attr: str) -> type[SQLiteType]:
"""Work out what data type an attribute should be for beets.
Args:
beets_attr: The name of the beets attribute, e.g. "title".
"""
try:
# Look for field in list of Album fields
# and get python type of database type.
# See beets.library.Album and beets.dbcore.types
return cls.model_cls._fields[beets_attr].model_type
except KeyError:
# Fall back to string (NOTE: probably not good)
return str
def translate_filters(self):
"""Translate filters from request arguments to a beets Query."""
# The format of each filter key in the request parameter is:
# filter[]. This regex extracts .
pattern = re.compile(r"filter\[(?P[a-zA-Z0-9_-]+)\]")
queries = []
for key, value in self.args.items():
match = pattern.match(key)
if match:
# Extract attribute name from key
aura_attr = match.group("attribute")
# Get the beets version of the attribute name
beets_attr = self.attribute_map.get(aura_attr, aura_attr)
converter = self.get_attribute_converter(beets_attr)
value = converter(value)
# Add exact match query to list
# Use a slow query so it works with all fields
queries.append(
self.model_cls.field_query(beets_attr, value, MatchQuery)
)
# NOTE: AURA doesn't officially support multiple queries
return AndQuery(queries)
def translate_sorts(self, sort_arg):
"""Translate an AURA sort parameter into a beets Sort.
Args:
sort_arg: The value of the 'sort' query parameter; a comma
separated list of fields to sort by, in order.
E.g. "-year,title".
"""
# Change HTTP query parameter to a list
aura_sorts = sort_arg.strip(",").split(",")
sorts = []
for aura_attr in aura_sorts:
if aura_attr[0] == "-":
ascending = False
# Remove leading "-"
aura_attr = aura_attr[1:]
else:
# JSON:API default
ascending = True
# Get the beets version of the attribute name
beets_attr = self.attribute_map.get(aura_attr, aura_attr)
# Use slow sort so it works with all fields (inc. computed)
sorts.append(SlowFieldSort(beets_attr, ascending=ascending))
return MultipleSort(sorts)
def paginate(self, collection):
"""Get a page of the collection and the URL to the next page.
Args:
collection: The raw data from which resource objects can be
built. Could be an sqlite3.Cursor object (tracks and
albums) or a list of strings (artists).
"""
# Pages start from zero
page = self.args.get("page", 0, int)
# Use page limit defined in config by default.
default_limit = config["aura"]["page_limit"].get(int)
limit = self.args.get("limit", default_limit, int)
# start = offset of first item to return
start = page * limit
# end = offset of last item + 1
end = start + limit
if end > len(collection):
end = len(collection)
next_url = None
else:
# Not the last page so work out links.next url
if not self.args:
# No existing arguments, so current page is 0
next_url = f"{request.url}?page=1"
elif not self.args.get("page", None):
# No existing page argument, so add one to the end
next_url = f"{request.url}&page=1"
else:
# Increment page token by 1
next_url = request.url.replace(
f"page={page}", f"page={page + 1}"
)
# Get only the items in the page range
data = [
self.get_resource_object(self.lib, collection[i])
for i in range(start, end)
]
return data, next_url
def get_included(self, data, include_str):
"""Build a list of resource objects for inclusion.
Args:
data: An array of dicts in the form of resource objects.
include_str: A comma separated list of resource types to
include. E.g. "tracks,images".
"""
# Change HTTP query parameter to a list
to_include = include_str.strip(",").split(",")
# Build a list of unique type and id combinations
# For each resource object in the primary data, iterate over it's
# relationships. If a relationship matches one of the types
# requested for inclusion (e.g. "albums") then add each type-id pair
# under the "data" key to unique_identifiers, checking first that
# it has not already been added. This ensures that no resources are
# included more than once.
unique_identifiers = []
for res_obj in data:
for rel_name, rel_obj in res_obj["relationships"].items():
if rel_name in to_include:
# NOTE: Assumes relationship is to-many
for identifier in rel_obj["data"]:
if identifier not in unique_identifiers:
unique_identifiers.append(identifier)
# TODO: I think this could be improved
included = []
for identifier in unique_identifiers:
res_type = identifier["type"]
if res_type == "track":
track_id = int(identifier["id"])
track = self.lib.get_item(track_id)
included.append(
TrackDocument.get_resource_object(self.lib, track)
)
elif res_type == "album":
album_id = int(identifier["id"])
album = self.lib.get_album(album_id)
included.append(
AlbumDocument.get_resource_object(self.lib, album)
)
elif res_type == "artist":
artist_id = identifier["id"]
included.append(
ArtistDocument.get_resource_object(self.lib, artist_id)
)
elif res_type == "image":
image_id = identifier["id"]
included.append(
ImageDocument.get_resource_object(self.lib, image_id)
)
else:
raise ValueError(f"Invalid resource type: {res_type}")
return included
def all_resources(self):
"""Build document for /tracks, /albums or /artists."""
query = self.translate_filters()
sort_arg = self.args.get("sort", None)
if sort_arg:
sort = self.translate_sorts(sort_arg)
# For each sort field add a query which ensures all results
# have a non-empty, non-zero value for that field.
query.subqueries.extend(
NotQuery(
self.model_cls.field_query(s.field, "(^$|^0$)", RegexpQuery)
)
for s in sort.sorts
)
else:
sort = None
# Get information from the library
collection = self.get_collection(query=query, sort=sort)
# Convert info to AURA form and paginate it
data, next_url = self.paginate(collection)
document = {"data": data}
# If there are more pages then provide a way to access them
if next_url:
document["links"] = {"next": next_url}
# Include related resources for each element in "data"
include_str = self.args.get("include", None)
if include_str:
document["included"] = self.get_included(data, include_str)
return document
def single_resource_document(self, resource_object):
"""Build document for a specific requested resource.
Args:
resource_object: A dictionary in the form of a JSON:API
resource object.
"""
document = {"data": resource_object}
include_str = self.args.get("include", None)
if include_str:
# [document["data"]] is because arg needs to be list
document["included"] = self.get_included(
[document["data"]], include_str
)
return document
class TrackDocument(AURADocument):
"""Class for building documents for /tracks endpoints."""
model_cls = Item
attribute_map = TRACK_ATTR_MAP
def get_collection(self, query=None, sort=None):
"""Get Item objects from the library.
Args:
query: A beets Query object or a beets query string.
sort: A beets Sort object.
"""
return self.lib.items(query, sort)
@classmethod
def get_attribute_converter(cls, beets_attr: str) -> type[SQLiteType]:
"""Work out what data type an attribute should be for beets.
Args:
beets_attr: The name of the beets attribute, e.g. "title".
"""
# filesize is a special field (read from disk not db?)
if beets_attr == "filesize":
return int
return super().get_attribute_converter(beets_attr)
@staticmethod
def get_resource_object(lib: Library, track):
"""Construct a JSON:API resource object from a beets Item.
Args:
track: A beets Item object.
"""
attributes = {}
# Use aura => beets attribute map, e.g. size => filesize
for aura_attr, beets_attr in TRACK_ATTR_MAP.items():
a = getattr(track, beets_attr)
# Only set attribute if it's not None, 0, "", etc.
# NOTE: This could result in required attributes not being set
if a:
attributes[aura_attr] = a
# JSON:API one-to-many relationship to parent album
relationships = {
"artists": {"data": [{"type": "artist", "id": track.artist}]}
}
# Only add album relationship if not singleton
if not track.singleton:
relationships["albums"] = {
"data": [{"type": "album", "id": str(track.album_id)}]
}
return {
"type": "track",
"id": str(track.id),
"attributes": attributes,
"relationships": relationships,
}
def single_resource(self, track_id):
"""Get track from the library and build a document.
Args:
track_id: The beets id of the track (integer).
"""
track = self.lib.get_item(track_id)
if not track:
return self.error(
"404 Not Found",
"No track with the requested id.",
f"There is no track with an id of {track_id} in the library.",
)
return self.single_resource_document(
self.get_resource_object(self.lib, track)
)
class AlbumDocument(AURADocument):
"""Class for building documents for /albums endpoints."""
model_cls = Album
attribute_map = ALBUM_ATTR_MAP
def get_collection(self, query=None, sort=None):
"""Get Album objects from the library.
Args:
query: A beets Query object or a beets query string.
sort: A beets Sort object.
"""
return self.lib.albums(query, sort)
@staticmethod
def get_resource_object(lib: Library, album):
"""Construct a JSON:API resource object from a beets Album.
Args:
album: A beets Album object.
"""
attributes = {}
# Use aura => beets attribute name map
for aura_attr, beets_attr in ALBUM_ATTR_MAP.items():
a = getattr(album, beets_attr)
# Only set attribute if it's not None, 0, "", etc.
# NOTE: This could mean required attributes are not set
if a:
attributes[aura_attr] = a
# Get beets Item objects for all tracks in the album sorted by
# track number. Sorting is not required but it's nice.
query = MatchQuery("album_id", album.id)
sort = FixedFieldSort("track", ascending=True)
tracks = lib.items(query, sort)
# JSON:API one-to-many relationship to tracks on the album
relationships = {
"tracks": {
"data": [{"type": "track", "id": str(t.id)} for t in tracks]
}
}
# Add images relationship if album has associated images
if album.artpath:
path = os.fsdecode(album.artpath)
filename = path.split("/")[-1]
image_id = f"album-{album.id}-{filename}"
relationships["images"] = {
"data": [{"type": "image", "id": image_id}]
}
# Add artist relationship if artist name is same on tracks
# Tracks are used to define artists so don't albumartist
# Check for all tracks in case some have featured artists
if album.albumartist in [t.artist for t in tracks]:
relationships["artists"] = {
"data": [{"type": "artist", "id": album.albumartist}]
}
return {
"type": "album",
"id": str(album.id),
"attributes": attributes,
"relationships": relationships,
}
def single_resource(self, album_id):
"""Get album from the library and build a document.
Args:
album_id: The beets id of the album (integer).
"""
album = self.lib.get_album(album_id)
if not album:
return self.error(
"404 Not Found",
"No album with the requested id.",
f"There is no album with an id of {album_id} in the library.",
)
return self.single_resource_document(
self.get_resource_object(self.lib, album)
)
class ArtistDocument(AURADocument):
"""Class for building documents for /artists endpoints."""
model_cls = Item
attribute_map = ARTIST_ATTR_MAP
def get_collection(self, query=None, sort=None):
"""Get a list of artist names from the library.
Args:
query: A beets Query object or a beets query string.
sort: A beets Sort object.
"""
# Gets only tracks with matching artist information
tracks = self.lib.items(query, sort)
collection = []
for track in tracks:
# Do not add duplicates
if track.artist not in collection:
collection.append(track.artist)
return collection
@staticmethod
def get_resource_object(lib: Library, artist_id):
"""Construct a JSON:API resource object for the given artist.
Args:
artist_id: A string which is the artist's name.
"""
# Get tracks where artist field exactly matches artist_id
query = MatchQuery("artist", artist_id)
tracks = lib.items(query)
if not tracks:
return None
# Get artist information from the first track
# NOTE: It could be that the first track doesn't have a
# MusicBrainz id but later tracks do, which isn't ideal.
attributes = {}
# Use aura => beets attribute map, e.g. artist => name
for aura_attr, beets_attr in ARTIST_ATTR_MAP.items():
a = getattr(tracks[0], beets_attr)
# Only set attribute if it's not None, 0, "", etc.
# NOTE: This could mean required attributes are not set
if a:
attributes[aura_attr] = a
relationships = {
"tracks": {
"data": [{"type": "track", "id": str(t.id)} for t in tracks]
}
}
album_query = MatchQuery("albumartist", artist_id)
albums = lib.albums(query=album_query)
if len(albums) != 0:
relationships["albums"] = {
"data": [{"type": "album", "id": str(a.id)} for a in albums]
}
return {
"type": "artist",
"id": artist_id,
"attributes": attributes,
"relationships": relationships,
}
def single_resource(self, artist_id):
"""Get info for the requested artist and build a document.
Args:
artist_id: A string which is the artist's name.
"""
artist_resource = self.get_resource_object(self.lib, artist_id)
if not artist_resource:
return self.error(
"404 Not Found",
"No artist with the requested id.",
f"There is no artist with an id of {artist_id} in the library.",
)
return self.single_resource_document(artist_resource)
def safe_filename(fn):
"""Check whether a string is a simple (non-path) filename.
For example, `foo.txt` is safe because it is a "plain" filename. But
`foo/bar.txt` and `../foo.txt` and `.` are all non-safe because they
can traverse to other directories other than the current one.
"""
# Rule out any directories.
if os.path.basename(fn) != fn:
return False
# In single names, rule out Unix directory traversal names.
if fn in (".", ".."):
return False
return True
class ImageDocument(AURADocument):
"""Class for building documents for /images/(id) endpoints."""
model_cls = Album
@staticmethod
def get_image_path(lib: Library, image_id):
"""Works out the full path to the image with the given id.
Returns None if there is no such image.
Args:
image_id: A string in the form
"--".
"""
# Split image_id into its constituent parts
id_split = image_id.split("-")
if len(id_split) < 3:
# image_id is not in the required format
return None
parent_type = id_split[0]
parent_id = id_split[1]
img_filename = "-".join(id_split[2:])
if not safe_filename(img_filename):
return None
# Get the path to the directory parent's images are in
if parent_type == "album":
album = lib.get_album(int(parent_id))
if not album or not album.artpath:
return None
# Cut the filename off of artpath
# This is in preparation for supporting images in the same
# directory that are not tracked by beets.
artpath = os.fsdecode(album.artpath)
dir_path = "/".join(artpath.split("/")[:-1])
else:
# Images for other resource types are not supported
return None
img_path = os.path.join(dir_path, img_filename)
# Check the image actually exists
if os.path.isfile(img_path):
return img_path
else:
return None
@staticmethod
def get_resource_object(lib: Library, image_id):
"""Construct a JSON:API resource object for the given image.
Args:
image_id: A string in the form
"--".
"""
# Could be called as a static method, so can't use
# self.get_image_path()
image_path = ImageDocument.get_image_path(lib, image_id)
if not image_path:
return None
attributes = {
"role": "cover",
"mimetype": guess_type(image_path)[0],
"size": os.path.getsize(image_path),
}
try:
from PIL import Image
except ImportError:
pass
else:
im = Image.open(image_path)
attributes["width"] = im.width
attributes["height"] = im.height
relationships = {}
# Split id into [parent_type, parent_id, filename]
id_split = image_id.split("-")
relationships[f"{id_split[0]}s"] = {
"data": [{"type": id_split[0], "id": id_split[1]}]
}
return {
"id": image_id,
"type": "image",
# Remove attributes that are None, 0, "", etc.
"attributes": {k: v for k, v in attributes.items() if v},
"relationships": relationships,
}
def single_resource(self, image_id):
"""Get info for the requested image and build a document.
Args:
image_id: A string in the form
"--".
"""
image_resource = self.get_resource_object(self.lib, image_id)
if not image_resource:
return self.error(
"404 Not Found",
"No image with the requested id.",
f"There is no image with an id of {image_id} in the library.",
)
return self.single_resource_document(image_resource)
# Initialise flask blueprint
aura_bp = Blueprint("aura_bp", __name__)
@aura_bp.route("/server")
def server_info():
"""Respond with info about the server."""
return {"data": {"type": "server", "id": "0", "attributes": SERVER_INFO}}
# Track endpoints
@aura_bp.route("/tracks")
def all_tracks():
"""Respond with a list of all tracks and related information."""
return TrackDocument.from_app().all_resources()
@aura_bp.route("/tracks/")
def single_track(track_id):
"""Respond with info about the specified track.
Args:
track_id: The id of the track provided in the URL (integer).
"""
return TrackDocument.from_app().single_resource(track_id)
@aura_bp.route("/tracks//audio")
def audio_file(track_id):
"""Supply an audio file for the specified track.
Args:
track_id: The id of the track provided in the URL (integer).
"""
track = current_app.config["lib"].get_item(track_id)
if not track:
return AURADocument.error(
"404 Not Found",
"No track with the requested id.",
f"There is no track with an id of {track_id} in the library.",
)
path = os.fsdecode(track.path)
if not os.path.isfile(path):
return AURADocument.error(
"404 Not Found",
"No audio file for the requested track.",
f"There is no audio file for track {track_id} at the expected"
" location",
)
file_mimetype = guess_type(path)[0]
if not file_mimetype:
return AURADocument.error(
"500 Internal Server Error",
"Requested audio file has an unknown mimetype.",
f"The audio file for track {track_id} has an unknown mimetype. "
f"Its file extension is {path.split('.')[-1]}.",
)
# Check that the Accept header contains the file's mimetype
# Takes into account */* and audio/*
# Adding support for the bitrate parameter would require some effort so I
# left it out. This means the client could be sent an error even if the
# audio doesn't need transcoding.
if not request.accept_mimetypes.best_match([file_mimetype]):
return AURADocument.error(
"406 Not Acceptable",
"Unsupported MIME type or bitrate parameter in Accept header.",
f"The audio file for track {track_id} is only available as"
f" {file_mimetype} and bitrate parameters are not supported.",
)
return send_file(
path,
mimetype=file_mimetype,
# Handles filename in Content-Disposition header
as_attachment=True,
# Tries to upgrade the stream to support range requests
conditional=True,
)
# Album endpoints
@aura_bp.route("/albums")
def all_albums():
"""Respond with a list of all albums and related information."""
return AlbumDocument.from_app().all_resources()
@aura_bp.route("/albums/")
def single_album(album_id):
"""Respond with info about the specified album.
Args:
album_id: The id of the album provided in the URL (integer).
"""
return AlbumDocument.from_app().single_resource(album_id)
# Artist endpoints
# Artist ids are their names
@aura_bp.route("/artists")
def all_artists():
"""Respond with a list of all artists and related information."""
return ArtistDocument.from_app().all_resources()
# Using the path converter allows slashes in artist_id
@aura_bp.route("/artists/")
def single_artist(artist_id):
"""Respond with info about the specified artist.
Args:
artist_id: The id of the artist provided in the URL. A string
which is the artist's name.
"""
return ArtistDocument.from_app().single_resource(artist_id)
# Image endpoints
# Image ids are in the form --
# For example: album-13-cover.jpg
@aura_bp.route("/images/")
def single_image(image_id):
"""Respond with info about the specified image.
Args:
image_id: The id of the image provided in the URL. A string in
the form "--".
"""
return ImageDocument.from_app().single_resource(image_id)
@aura_bp.route("/images//file")
def image_file(image_id):
"""Supply an image file for the specified image.
Args:
image_id: The id of the image provided in the URL. A string in
the form "--".
"""
img_path = ImageDocument.get_image_path(current_app.config["lib"], image_id)
if not img_path:
return AURADocument.error(
"404 Not Found",
"No image with the requested id.",
f"There is no image with an id of {image_id} in the library",
)
return send_file(img_path)
# WSGI app
def create_app():
"""An application factory for use by a WSGI server."""
config["aura"].add(
{
"host": "127.0.0.1",
"port": 8337,
"cors": [],
"cors_supports_credentials": False,
"page_limit": 500,
}
)
app = Flask(__name__)
# Register AURA blueprint view functions under a URL prefix
app.register_blueprint(aura_bp, url_prefix="/aura")
# AURA specifies mimetype MUST be this
app.config["JSONIFY_MIMETYPE"] = "application/vnd.api+json"
# Disable auto-sorting of JSON keys
app.config["JSON_SORT_KEYS"] = False
# Provide a way to access the beets library
# The normal method of using the Library and config provided in the
# command function is not used because create_app() could be called
# by an external WSGI server.
# NOTE: this uses a 'private' function from beets.ui.__init__
app.config["lib"] = _open_library(config)
# Enable CORS if required
cors = config["aura"]["cors"].as_str_seq(list)
if cors:
from flask_cors import CORS
# "Accept" is the only header clients use
app.config["CORS_ALLOW_HEADERS"] = "Accept"
app.config["CORS_RESOURCES"] = {r"/aura/*": {"origins": cors}}
app.config["CORS_SUPPORTS_CREDENTIALS"] = config["aura"][
"cors_supports_credentials"
].get(bool)
CORS(app)
return app
# Beets Plugin Hook
class AURAPlugin(BeetsPlugin):
"""The BeetsPlugin subclass for the AURA server plugin."""
def __init__(self):
"""Add configuration options for the AURA plugin."""
super().__init__()
def commands(self):
"""Add subcommand used to run the AURA server."""
def run_aura(lib, opts, args):
"""Run the application using Flask's built in-server.
Args:
lib: A beets Library object (not used).
opts: Command line options. An optparse.Values object.
args: The list of arguments to process (not used).
"""
app = create_app()
# Start the built-in server (not intended for production)
app.run(
host=self.config["host"].get(str),
port=self.config["port"].get(int),
debug=opts.debug,
threaded=True,
)
run_aura_cmd = Subcommand("aura", help="run an AURA server")
run_aura_cmd.parser.add_option(
"-d",
"--debug",
action="store_true",
default=False,
help="use Flask debug mode",
)
run_aura_cmd.func = run_aura
return [run_aura_cmd]
beetbox-beets-c1877b7/beetsplug/autobpm.py 0000664 0000000 0000000 00000005512 15073551743 0020636 0 ustar 00root root 0000000 0000000 # This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Uses Librosa to calculate the `bpm` field."""
from __future__ import annotations
from typing import TYPE_CHECKING
import librosa
import numpy as np
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, should_write
if TYPE_CHECKING:
from beets.importer import ImportTask
from beets.library import Item, Library
class AutoBPMPlugin(BeetsPlugin):
def __init__(self) -> None:
super().__init__()
self.config.add(
{
"auto": True,
"overwrite": False,
"beat_track_kwargs": {},
}
)
if self.config["auto"]:
self.import_stages = [self.imported]
def commands(self) -> list[Subcommand]:
cmd = Subcommand(
"autobpm", help="detect and add bpm from audio using Librosa"
)
cmd.func = self.command
return [cmd]
def command(self, lib: Library, _, args: list[str]) -> None:
self.calculate_bpm(list(lib.items(args)), write=should_write())
def imported(self, _, task: ImportTask) -> None:
self.calculate_bpm(task.imported_items())
def calculate_bpm(self, items: list[Item], write: bool = False) -> None:
for item in items:
path = item.filepath
if bpm := item.bpm:
self._log.info("BPM for {} already exists: {}", path, bpm)
if not self.config["overwrite"]:
continue
try:
y, sr = librosa.load(item.filepath, res_type="kaiser_fast")
except Exception as exc:
self._log.error("Failed to load {}: {}", path, exc)
continue
kwargs = self.config["beat_track_kwargs"].flatten()
try:
tempo, _ = librosa.beat.beat_track(y=y, sr=sr, **kwargs)
except Exception as exc:
self._log.error("Failed to measure BPM for {}: {}", path, exc)
continue
bpm = round(
float(tempo[0] if isinstance(tempo, np.ndarray) else tempo)
)
item["bpm"] = bpm
self._log.info("Computed BPM for {}: {}", path, bpm)
if write:
item.try_write()
item.store()
beetbox-beets-c1877b7/beetsplug/badfiles.py 0000664 0000000 0000000 00000016200 15073551743 0020734 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, François-Xavier Thomas.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Use command-line tools to check for audio file corruption."""
import errno
import os
import shlex
import sys
from subprocess import STDOUT, CalledProcessError, check_output, list2cmdline
import confuse
from beets import importer, ui
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets.util import displayable_path, par_map
class CheckerCommandError(Exception):
"""Raised when running a checker failed.
Attributes:
checker: Checker command name.
path: Path to the file being validated.
errno: Error number from the checker execution error.
msg: Message from the checker execution error.
"""
def __init__(self, cmd, oserror):
self.checker = cmd[0]
self.path = cmd[-1]
self.errno = oserror.errno
self.msg = str(oserror)
class BadFiles(BeetsPlugin):
def __init__(self):
super().__init__()
self.verbose = False
self.register_listener("import_task_start", self.on_import_task_start)
self.register_listener(
"import_task_before_choice", self.on_import_task_before_choice
)
def run_command(self, cmd):
self._log.debug(
"running command: {}", displayable_path(list2cmdline(cmd))
)
try:
output = check_output(cmd, stderr=STDOUT)
errors = 0
status = 0
except CalledProcessError as e:
output = e.output
errors = 1
status = e.returncode
except OSError as e:
raise CheckerCommandError(cmd, e)
output = output.decode(sys.getdefaultencoding(), "replace")
return status, errors, [line for line in output.split("\n") if line]
def check_mp3val(self, path):
status, errors, output = self.run_command(["mp3val", path])
if status == 0:
output = [line for line in output if line.startswith("WARNING:")]
errors = len(output)
return status, errors, output
def check_flac(self, path):
return self.run_command(["flac", "-wst", path])
def check_custom(self, command):
def checker(path):
cmd = shlex.split(command)
cmd.append(path)
return self.run_command(cmd)
return checker
def get_checker(self, ext):
ext = ext.lower()
try:
command = self.config["commands"].get(dict).get(ext)
except confuse.NotFoundError:
command = None
if command:
return self.check_custom(command)
if ext == "mp3":
return self.check_mp3val
if ext == "flac":
return self.check_flac
def check_item(self, item):
# First, check whether the path exists. If not, the user
# should probably run `beet update` to cleanup your library.
dpath = displayable_path(item.path)
self._log.debug("checking path: {}", dpath)
if not os.path.exists(item.path):
ui.print_(
f"{ui.colorize('text_error', dpath)}: file does not exist"
)
# Run the checker against the file if one is found
ext = os.path.splitext(item.path)[1][1:].decode("utf8", "ignore")
checker = self.get_checker(ext)
if not checker:
self._log.error("no checker specified in the config for {}", ext)
return []
path = item.path
if not isinstance(path, str):
path = item.path.decode(sys.getfilesystemencoding())
try:
status, errors, output = checker(path)
except CheckerCommandError as e:
if e.errno == errno.ENOENT:
self._log.error(
"command not found: {0.checker} when validating file: {0.path}",
e,
)
else:
self._log.error("error invoking {0.checker}: {0.msg}", e)
return []
error_lines = []
if status > 0:
error_lines.append(
f"{ui.colorize('text_error', dpath)}: checker exited with"
f" status {status}"
)
for line in output:
error_lines.append(f" {line}")
elif errors > 0:
error_lines.append(
f"{ui.colorize('text_warning', dpath)}: checker found"
f" {status} errors or warnings"
)
for line in output:
error_lines.append(f" {line}")
elif self.verbose:
error_lines.append(f"{ui.colorize('text_success', dpath)}: ok")
return error_lines
def on_import_task_start(self, task, session):
if not self.config["check_on_import"].get(False):
return
checks_failed = []
for item in task.items:
error_lines = self.check_item(item)
if error_lines:
checks_failed.append(error_lines)
if checks_failed:
task._badfiles_checks_failed = checks_failed
def on_import_task_before_choice(self, task, session):
if hasattr(task, "_badfiles_checks_failed"):
ui.print_(
f"{ui.colorize('text_warning', 'BAD')} one or more files failed"
" checks:"
)
for error in task._badfiles_checks_failed:
for error_line in error:
ui.print_(error_line)
ui.print_()
ui.print_("What would you like to do?")
sel = ui.input_options(["aBort", "skip", "continue"])
if sel == "s":
return importer.Action.SKIP
elif sel == "c":
return None
elif sel == "b":
raise importer.ImportAbortError()
else:
raise Exception(f"Unexpected selection: {sel}")
def command(self, lib, opts, args):
# Get items from arguments
items = lib.items(args)
self.verbose = opts.verbose
def check_and_print(item):
for error_line in self.check_item(item):
ui.print_(error_line)
par_map(check_and_print, items)
def commands(self):
bad_command = Subcommand(
"bad", help="check for corrupt or missing files"
)
bad_command.parser.add_option(
"-v",
"--verbose",
action="store_true",
default=False,
dest="verbose",
help="view results for both the bad and uncorrupted files",
)
bad_command.func = self.command
return [bad_command]
beetbox-beets-c1877b7/beetsplug/bareasc.py 0000664 0000000 0000000 00000006104 15073551743 0020565 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Philippe Mongeau.
# Copyright 2021, Graham R. Cobb.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and ascociated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# This module is adapted from Fuzzy in accordance to the licence of
# that module
"""Provides a bare-ASCII matching query."""
from unidecode import unidecode
from beets import ui
from beets.dbcore.query import StringFieldQuery
from beets.plugins import BeetsPlugin
from beets.ui import print_
class BareascQuery(StringFieldQuery[str]):
"""Compare items using bare ASCII, without accents etc."""
@classmethod
def string_match(cls, pattern, val):
"""Convert both pattern and string to plain ASCII before matching.
If pattern is all lower case, also convert string to lower case so
match is also case insensitive
"""
# smartcase
if pattern.islower():
val = val.lower()
pattern = unidecode(pattern)
val = unidecode(val)
return pattern in val
def col_clause(self):
"""Compare ascii version of the pattern."""
clause = f"unidecode({self.field})"
if self.pattern.islower():
clause = f"lower({clause})"
return rf"{clause} LIKE ? ESCAPE '\'", [f"%{unidecode(self.pattern)}%"]
class BareascPlugin(BeetsPlugin):
"""Plugin to provide bare-ASCII option for beets matching."""
def __init__(self):
"""Default prefix for selecting bare-ASCII matching is #."""
super().__init__()
self.config.add(
{
"prefix": "#",
}
)
def queries(self):
"""Register bare-ASCII matching."""
prefix = self.config["prefix"].as_str()
return {prefix: BareascQuery}
def commands(self):
"""Add bareasc command as unidecode version of 'list'."""
cmd = ui.Subcommand(
"bareasc", help="unidecode version of beet list command"
)
cmd.parser.usage += (
"\nExample: %prog -f '$album: $title' artist:beatles"
)
cmd.parser.add_all_common_options()
cmd.func = self.unidecode_list
return [cmd]
def unidecode_list(self, lib, opts, args):
"""Emulate normal 'list' command but with unidecode output."""
album = opts.album
# Copied from commands.py - list_items
if album:
for album in lib.albums(args):
bare = unidecode(str(album))
print_(bare)
else:
for item in lib.items(args):
bare = unidecode(str(item))
print_(bare)
beetbox-beets-c1877b7/beetsplug/beatport.py 0000664 0000000 0000000 00000044730 15073551743 0021014 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Beatport release and track search support to the autotagger"""
from __future__ import annotations
import json
import re
from datetime import datetime, timedelta
from typing import (
TYPE_CHECKING,
Iterable,
Iterator,
Literal,
Sequence,
overload,
)
import confuse
from requests_oauthlib import OAuth1Session
from requests_oauthlib.oauth1_session import (
TokenMissing,
TokenRequestDenied,
VerifierMissing,
)
import beets
import beets.ui
from beets.autotag.hooks import AlbumInfo, TrackInfo
from beets.metadata_plugins import MetadataSourcePlugin
if TYPE_CHECKING:
from beets.importer import ImportSession
from beets.library import Item
from ._typing import JSONDict
AUTH_ERRORS = (TokenRequestDenied, TokenMissing, VerifierMissing)
USER_AGENT = f"beets/{beets.__version__} +https://beets.io/"
class BeatportAPIError(Exception):
pass
class BeatportClient:
_api_base = "https://oauth-api.beatport.com"
def __init__(self, c_key, c_secret, auth_key=None, auth_secret=None):
"""Initiate the client with OAuth information.
For the initial authentication with the backend `auth_key` and
`auth_secret` can be `None`. Use `get_authorize_url` and
`get_access_token` to obtain them for subsequent uses of the API.
:param c_key: OAuth1 client key
:param c_secret: OAuth1 client secret
:param auth_key: OAuth1 resource owner key
:param auth_secret: OAuth1 resource owner secret
"""
self.api = OAuth1Session(
client_key=c_key,
client_secret=c_secret,
resource_owner_key=auth_key,
resource_owner_secret=auth_secret,
callback_uri="oob",
)
self.api.headers = {"User-Agent": USER_AGENT}
def get_authorize_url(self) -> str:
"""Generate the URL for the user to authorize the application.
Retrieves a request token from the Beatport API and returns the
corresponding authorization URL on their end that the user has
to visit.
This is the first step of the initial authorization process with the
API. Once the user has visited the URL, call
:py:method:`get_access_token` with the displayed data to complete
the process.
:returns: Authorization URL for the user to visit
:rtype: unicode
"""
self.api.fetch_request_token(
self._make_url("/identity/1/oauth/request-token")
)
return self.api.authorization_url(
self._make_url("/identity/1/oauth/authorize")
)
def get_access_token(self, auth_data: str) -> tuple[str, str]:
"""Obtain the final access token and secret for the API.
:param auth_data: URL-encoded authorization data as displayed at
the authorization url (obtained via
:py:meth:`get_authorize_url`) after signing in
:returns: OAuth resource owner key and secret as unicode
"""
self.api.parse_authorization_response(
f"https://beets.io/auth?{auth_data}"
)
access_data = self.api.fetch_access_token(
self._make_url("/identity/1/oauth/access-token")
)
return access_data["oauth_token"], access_data["oauth_token_secret"]
@overload
def search(
self,
query: str,
release_type: Literal["release"],
details: bool = True,
) -> Iterator[BeatportRelease]: ...
@overload
def search(
self,
query: str,
release_type: Literal["track"],
details: bool = True,
) -> Iterator[BeatportTrack]: ...
def search(
self,
query: str,
release_type: Literal["release", "track"],
details=True,
) -> Iterator[BeatportRelease | BeatportTrack]:
"""Perform a search of the Beatport catalogue.
:param query: Query string
:param release_type: Type of releases to search for.
:param details: Retrieve additional information about the
search results. Currently this will fetch
the tracklist for releases and do nothing for
tracks
:returns: Search results
"""
response = self._get(
"catalog/3/search",
query=query,
perPage=5,
facets=[f"fieldType:{release_type}"],
)
for item in response:
if release_type == "release":
release = BeatportRelease(item)
if details:
release.tracks = self.get_release_tracks(item["id"])
yield release
elif release_type == "track":
yield BeatportTrack(item)
def get_release(self, beatport_id: str) -> BeatportRelease | None:
"""Get information about a single release.
:param beatport_id: Beatport ID of the release
:returns: The matching release
"""
response = self._get("/catalog/3/releases", id=beatport_id)
if response:
release = BeatportRelease(response[0])
release.tracks = self.get_release_tracks(beatport_id)
return release
return None
def get_release_tracks(self, beatport_id: str) -> list[BeatportTrack]:
"""Get all tracks for a given release.
:param beatport_id: Beatport ID of the release
:returns: Tracks in the matching release
"""
response = self._get(
"/catalog/3/tracks", releaseId=beatport_id, perPage=100
)
return [BeatportTrack(t) for t in response]
def get_track(self, beatport_id: str) -> BeatportTrack:
"""Get information about a single track.
:param beatport_id: Beatport ID of the track
:returns: The matching track
"""
response = self._get("/catalog/3/tracks", id=beatport_id)
return BeatportTrack(response[0])
def _make_url(self, endpoint: str) -> str:
"""Get complete URL for a given API endpoint."""
if not endpoint.startswith("/"):
endpoint = f"/{endpoint}"
return f"{self._api_base}{endpoint}"
def _get(self, endpoint: str, **kwargs) -> list[JSONDict]:
"""Perform a GET request on a given API endpoint.
Automatically extracts result data from the response and converts HTTP
exceptions into :py:class:`BeatportAPIError` objects.
"""
try:
response = self.api.get(self._make_url(endpoint), params=kwargs)
except Exception as e:
raise BeatportAPIError(f"Error connecting to Beatport API: {e}")
if not response:
raise BeatportAPIError(
f"Error {response.status_code} for '{response.request.path_url}"
)
return response.json()["results"]
class BeatportObject:
beatport_id: str
name: str
release_date: datetime | None = None
artists: list[tuple[str, str]] | None = None
# tuple of artist id and artist name
def __init__(self, data: JSONDict):
self.beatport_id = str(data["id"]) # given as int in the response
self.name = str(data["name"])
if "releaseDate" in data:
self.release_date = datetime.strptime(
data["releaseDate"], "%Y-%m-%d"
)
if "artists" in data:
self.artists = [(x["id"], str(x["name"])) for x in data["artists"]]
if "genres" in data:
self.genres = [str(x["name"]) for x in data["genres"]]
def artists_str(self) -> str | None:
if self.artists is not None:
if len(self.artists) < 4:
artist_str = ", ".join(x[1] for x in self.artists)
else:
artist_str = "Various Artists"
else:
artist_str = None
return artist_str
class BeatportRelease(BeatportObject):
catalog_number: str | None
label_name: str | None
category: str | None
url: str | None
genre: str | None
tracks: list[BeatportTrack] | None = None
def __init__(self, data: JSONDict):
super().__init__(data)
self.catalog_number = data.get("catalogNumber")
self.label_name = data.get("label", {}).get("name")
self.category = data.get("category")
self.genre = data.get("genre")
if "slug" in data:
self.url = (
f"https://beatport.com/release/{data['slug']}/{data['id']}"
)
def __str__(self) -> str:
return (
""
)
class BeatportTrack(BeatportObject):
title: str | None
mix_name: str | None
length: timedelta
url: str | None
track_number: int | None
bpm: str | None
initial_key: str | None
genre: str | None
def __init__(self, data: JSONDict):
super().__init__(data)
if "title" in data:
self.title = str(data["title"])
if "mixName" in data:
self.mix_name = str(data["mixName"])
self.length = timedelta(milliseconds=data.get("lengthMs", 0) or 0)
if not self.length:
try:
min, sec = data.get("length", "0:0").split(":")
self.length = timedelta(minutes=int(min), seconds=int(sec))
except ValueError:
pass
if "slug" in data:
self.url = f"https://beatport.com/track/{data['slug']}/{data['id']}"
self.track_number = data.get("trackNumber")
self.bpm = data.get("bpm")
self.initial_key = str((data.get("key") or {}).get("shortName"))
# Use 'subgenre' and if not present, 'genre' as a fallback.
if data.get("subGenres"):
self.genre = str(data["subGenres"][0].get("name"))
elif data.get("genres"):
self.genre = str(data["genres"][0].get("name"))
class BeatportPlugin(MetadataSourcePlugin):
_client: BeatportClient | None = None
def __init__(self):
super().__init__()
self.config.add(
{
"apikey": "57713c3906af6f5def151b33601389176b37b429",
"apisecret": "b3fe08c93c80aefd749fe871a16cd2bb32e2b954",
"tokenfile": "beatport_token.json",
}
)
self.config["apikey"].redact = True
self.config["apisecret"].redact = True
self.register_listener("import_begin", self.setup)
@property
def client(self) -> BeatportClient:
if self._client is None:
raise ValueError(
"Beatport client not initialized. Call setup() first."
)
return self._client
def setup(self, session: ImportSession):
c_key: str = self.config["apikey"].as_str()
c_secret: str = self.config["apisecret"].as_str()
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except OSError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata["token"]
secret = tokendata["secret"]
self._client = BeatportClient(c_key, c_secret, token, secret)
def authenticate(self, c_key: str, c_secret: str) -> tuple[str, str]:
# Get the link for the OAuth page.
auth_client = BeatportClient(c_key, c_secret)
try:
url = auth_client.get_authorize_url()
except AUTH_ERRORS as e:
self._log.debug("authentication error: {}", e)
raise beets.ui.UserError("communication with Beatport failed")
beets.ui.print_("To authenticate with Beatport, visit:")
beets.ui.print_(url)
# Ask for the verifier data and validate it.
data = beets.ui.input_("Enter the string displayed in your browser:")
try:
token, secret = auth_client.get_access_token(data)
except AUTH_ERRORS as e:
self._log.debug("authentication error: {}", e)
raise beets.ui.UserError("Beatport token request failed")
# Save the token for later use.
self._log.debug("Beatport token {}, secret {}", token, secret)
with open(self._tokenfile(), "w") as f:
json.dump({"token": token, "secret": secret}, f)
return token, secret
def _tokenfile(self) -> str:
"""Get the path to the JSON file for storing the OAuth token."""
return self.config["tokenfile"].get(confuse.Filename(in_app_dir=True))
def candidates(
self,
items: Sequence[Item],
artist: str,
album: str,
va_likely: bool,
) -> Iterator[AlbumInfo]:
if va_likely:
query = album
else:
query = f"{artist} {album}"
try:
yield from self._get_releases(query)
except BeatportAPIError as e:
self._log.debug("API Error: {} (query: {})", e, query)
return
def item_candidates(
self, item: Item, artist: str, title: str
) -> Iterable[TrackInfo]:
query = f"{artist} {title}"
try:
return self._get_tracks(query)
except BeatportAPIError as e:
self._log.debug("API Error: {} (query: {})", e, query)
return []
def album_for_id(self, album_id: str):
"""Fetches a release by its Beatport ID and returns an AlbumInfo object
or None if the query is not a valid ID or release is not found.
"""
self._log.debug("Searching for release {}", album_id)
if not (release_id := self._extract_id(album_id)):
self._log.debug("Not a valid Beatport release ID.")
return None
release = self.client.get_release(release_id)
if release:
return self._get_album_info(release)
return None
def track_for_id(self, track_id: str):
"""Fetches a track by its Beatport ID and returns a TrackInfo object
or None if the track is not a valid Beatport ID or track is not found.
"""
self._log.debug("Searching for track {}", track_id)
# TODO: move to extractor
match = re.search(r"(^|beatport\.com/track/.+/)(\d+)$", track_id)
if not match:
self._log.debug("Not a valid Beatport track ID.")
return None
bp_track = self.client.get_track(match.group(2))
if bp_track is not None:
return self._get_track_info(bp_track)
return None
def _get_releases(self, query: str) -> Iterator[AlbumInfo]:
"""Returns a list of AlbumInfo objects for a beatport search query."""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
query = re.sub(r"\W+", " ", query, flags=re.UNICODE)
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r"\b(CD|disc)\s*\d+", "", query, flags=re.I)
for beatport_release in self.client.search(query, "release"):
if beatport_release is None:
continue
yield self._get_album_info(beatport_release)
def _get_album_info(self, release: BeatportRelease) -> AlbumInfo:
"""Returns an AlbumInfo object for a Beatport Release object."""
va = release.artists is not None and len(release.artists) > 3
artist, artist_id = self._get_artist(release.artists)
if va:
artist = "Various Artists"
tracks: list[TrackInfo] = []
if release.tracks is not None:
tracks = [self._get_track_info(x) for x in release.tracks]
release_date = release.release_date
return AlbumInfo(
album=release.name,
album_id=release.beatport_id,
beatport_album_id=release.beatport_id,
artist=artist,
artist_id=artist_id,
tracks=tracks,
albumtype=release.category,
va=va,
label=release.label_name,
catalognum=release.catalog_number,
media="Digital",
data_source=self.data_source,
data_url=release.url,
genre=release.genre,
year=release_date.year if release_date else None,
month=release_date.month if release_date else None,
day=release_date.day if release_date else None,
)
def _get_track_info(self, track: BeatportTrack) -> TrackInfo:
"""Returns a TrackInfo object for a Beatport Track object."""
title = track.name
if track.mix_name != "Original Mix":
title += f" ({track.mix_name})"
artist, artist_id = self._get_artist(track.artists)
length = track.length.total_seconds()
return TrackInfo(
title=title,
track_id=track.beatport_id,
artist=artist,
artist_id=artist_id,
length=length,
index=track.track_number,
medium_index=track.track_number,
data_source=self.data_source,
data_url=track.url,
bpm=track.bpm,
initial_key=track.initial_key,
genre=track.genre,
)
def _get_artist(self, artists):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of Beatport release or track artists.
"""
return self.get_artist(artists=artists, id_key=0, name_key=1)
def _get_tracks(self, query):
"""Returns a list of TrackInfo objects for a Beatport query."""
bp_tracks = self.client.search(query, release_type="track")
tracks = [self._get_track_info(x) for x in bp_tracks]
return tracks
beetbox-beets-c1877b7/beetsplug/bench.py 0000664 0000000 0000000 00000010004 15073551743 0020236 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Some simple performance benchmarks for beets."""
import cProfile
import timeit
from beets import importer, library, plugins, ui
from beets.autotag import match
from beets.plugins import BeetsPlugin
from beets.util.functemplate import Template
from beetsplug._utils import vfs
def aunique_benchmark(lib, prof):
def _build_tree():
vfs.libtree(lib)
# Measure path generation performance with %aunique{} included.
lib.path_formats = [
(
library.PF_KEY_DEFAULT,
Template("$albumartist/$album%aunique{}/$track $title"),
),
]
if prof:
cProfile.runctx(
"_build_tree()",
{},
{"_build_tree": _build_tree},
"paths.withaunique.prof",
)
else:
interval = timeit.timeit(_build_tree, number=1)
print("With %aunique:", interval)
# And with %aunique replaceed with a "cheap" no-op function.
lib.path_formats = [
(
library.PF_KEY_DEFAULT,
Template("$albumartist/$album%lower{}/$track $title"),
),
]
if prof:
cProfile.runctx(
"_build_tree()",
{},
{"_build_tree": _build_tree},
"paths.withoutaunique.prof",
)
else:
interval = timeit.timeit(_build_tree, number=1)
print("Without %aunique:", interval)
def match_benchmark(lib, prof, query=None, album_id=None):
# If no album ID is provided, we'll match against a suitably huge
# album.
if not album_id:
album_id = "9c5c043e-bc69-4edb-81a4-1aaf9c81e6dc"
# Get an album from the library to use as the source for the match.
items = lib.albums(query).get().items()
# Ensure fingerprinting is invoked (if enabled).
plugins.send(
"import_task_start",
task=importer.ImportTask(None, None, items),
session=importer.ImportSession(lib, None, None, None),
)
# Run the match.
def _run_match():
match.tag_album(items, search_ids=[album_id])
if prof:
cProfile.runctx(
"_run_match()", {}, {"_run_match": _run_match}, "match.prof"
)
else:
interval = timeit.timeit(_run_match, number=1)
print("match duration:", interval)
class BenchmarkPlugin(BeetsPlugin):
"""A plugin for performing some simple performance benchmarks."""
def commands(self):
aunique_bench_cmd = ui.Subcommand(
"bench_aunique", help="benchmark for %aunique{}"
)
aunique_bench_cmd.parser.add_option(
"-p",
"--profile",
action="store_true",
default=False,
help="performance profiling",
)
aunique_bench_cmd.func = lambda lib, opts, args: aunique_benchmark(
lib, opts.profile
)
match_bench_cmd = ui.Subcommand(
"bench_match", help="benchmark for track matching"
)
match_bench_cmd.parser.add_option(
"-p",
"--profile",
action="store_true",
default=False,
help="performance profiling",
)
match_bench_cmd.parser.add_option(
"-i", "--id", default=None, help="album ID to match against"
)
match_bench_cmd.func = lambda lib, opts, args: match_benchmark(
lib, opts.profile, args, opts.id
)
return [aunique_bench_cmd, match_bench_cmd]
beetbox-beets-c1877b7/beetsplug/bpd/ 0000775 0000000 0000000 00000000000 15073551743 0017357 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beetsplug/bpd/__init__.py 0000664 0000000 0000000 00000156163 15073551743 0021504 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A clone of the Music Player Daemon (MPD) that plays music from a
Beets library. Attempts to implement a compatible protocol to allow
use of the wide range of MPD clients.
"""
import inspect
import math
import random
import re
import socket
import sys
import time
import traceback
from string import Template
from typing import TYPE_CHECKING
import beets
import beets.ui
from beets import dbcore, logging
from beets.library import Item
from beets.plugins import BeetsPlugin
from beets.util import as_string, bluelet
from beetsplug._utils import vfs
if TYPE_CHECKING:
from beets.dbcore.query import Query
log = logging.getLogger(__name__)
try:
from . import gstplayer
except ImportError as e:
raise ImportError(
"Gstreamer Python bindings not found."
' Install "gstreamer1.0" and "python-gi" or similar package to use BPD.'
) from e
PROTOCOL_VERSION = "0.16.0"
BUFSIZE = 1024
HELLO = f"OK MPD {PROTOCOL_VERSION}"
CLIST_BEGIN = "command_list_begin"
CLIST_VERBOSE_BEGIN = "command_list_ok_begin"
CLIST_END = "command_list_end"
RESP_OK = "OK"
RESP_CLIST_VERBOSE = "list_OK"
RESP_ERR = "ACK"
NEWLINE = "\n"
ERROR_NOT_LIST = 1
ERROR_ARG = 2
ERROR_PASSWORD = 3
ERROR_PERMISSION = 4
ERROR_UNKNOWN = 5
ERROR_NO_EXIST = 50
ERROR_PLAYLIST_MAX = 51
ERROR_SYSTEM = 52
ERROR_PLAYLIST_LOAD = 53
ERROR_UPDATE_ALREADY = 54
ERROR_PLAYER_SYNC = 55
ERROR_EXIST = 56
VOLUME_MIN = 0
VOLUME_MAX = 100
SAFE_COMMANDS = (
# Commands that are available when unauthenticated.
"close",
"commands",
"notcommands",
"password",
"ping",
)
# List of subsystems/events used by the `idle` command.
SUBSYSTEMS = [
"update",
"player",
"mixer",
"options",
"playlist",
"database",
# Related to unsupported commands:
"stored_playlist",
"output",
"subscription",
"sticker",
"message",
"partition",
]
# Error-handling, exceptions, parameter parsing.
class BPDError(Exception):
"""An error that should be exposed to the client to the BPD
server.
"""
def __init__(self, code, message, cmd_name="", index=0):
self.code = code
self.message = message
self.cmd_name = cmd_name
self.index = index
template = Template("$resp [$code@$index] {$cmd_name} $message")
def response(self):
"""Returns a string to be used as the response code for the
erring command.
"""
return self.template.substitute(
{
"resp": RESP_ERR,
"code": self.code,
"index": self.index,
"cmd_name": self.cmd_name,
"message": self.message,
}
)
def make_bpd_error(s_code, s_message):
"""Create a BPDError subclass for a static code and message."""
class NewBPDError(BPDError):
code = s_code
message = s_message
cmd_name = ""
index = 0
def __init__(self):
pass
return NewBPDError
ArgumentTypeError = make_bpd_error(ERROR_ARG, "invalid type for argument")
ArgumentIndexError = make_bpd_error(ERROR_ARG, "argument out of range")
ArgumentNotFoundError = make_bpd_error(ERROR_NO_EXIST, "argument not found")
def cast_arg(t, val):
"""Attempts to call t on val, raising a ArgumentTypeError
on ValueError.
If 't' is the special string 'intbool', attempts to cast first
to an int and then to a bool (i.e., 1=True, 0=False).
"""
if t == "intbool":
return cast_arg(bool, cast_arg(int, val))
else:
try:
return t(val)
except ValueError:
raise ArgumentTypeError()
class BPDCloseError(Exception):
"""Raised by a command invocation to indicate that the connection
should be closed.
"""
class BPDIdleError(Exception):
"""Raised by a command to indicate the client wants to enter the idle state
and should be notified when a relevant event happens.
"""
def __init__(self, subsystems):
super().__init__()
self.subsystems = set(subsystems)
# Generic server infrastructure, implementing the basic protocol.
class BaseServer:
"""A MPD-compatible music player server.
The functions with the `cmd_` prefix are invoked in response to
client commands. For instance, if the client says `status`,
`cmd_status` will be invoked. The arguments to the client's commands
are used as function arguments following the connection issuing the
command. The functions may send data on the connection. They may
also raise BPDError exceptions to report errors.
This is a generic superclass and doesn't support many commands.
"""
def __init__(self, host, port, password, ctrl_port, log, ctrl_host=None):
"""Create a new server bound to address `host` and listening
on port `port`. If `password` is given, it is required to do
anything significant on the server.
A separate control socket is established listening to `ctrl_host` on
port `ctrl_port` which is used to forward notifications from the player
and can be sent debug commands (e.g. using netcat).
"""
self.host, self.port, self.password = host, port, password
self.ctrl_host, self.ctrl_port = ctrl_host or host, ctrl_port
self.ctrl_sock = None
self._log = log
# Default server values.
self.random = False
self.repeat = False
self.consume = False
self.single = False
self.volume = VOLUME_MAX
self.crossfade = 0
self.mixrampdb = 0.0
self.mixrampdelay = float("nan")
self.replay_gain_mode = "off"
self.playlist = []
self.playlist_version = 0
self.current_index = -1
self.paused = False
self.error = None
# Current connections
self.connections = set()
# Object for random numbers generation
self.random_obj = random.Random()
def connect(self, conn):
"""A new client has connected."""
self.connections.add(conn)
def disconnect(self, conn):
"""Client has disconnected; clean up residual state."""
self.connections.remove(conn)
def run(self):
"""Block and start listening for connections from clients. An
interrupt (^C) closes the server.
"""
self.startup_time = time.time()
def start():
yield bluelet.spawn(
bluelet.server(
self.ctrl_host,
self.ctrl_port,
ControlConnection.handler(self),
)
)
yield bluelet.server(
self.host, self.port, MPDConnection.handler(self)
)
bluelet.run(start())
def dispatch_events(self):
"""If any clients have idle events ready, send them."""
# We need a copy of `self.connections` here since clients might
# disconnect once we try and send to them, changing `self.connections`.
for conn in list(self.connections):
yield bluelet.spawn(conn.send_notifications())
def _ctrl_send(self, message):
"""Send some data over the control socket.
If it's our first time, open the socket. The message should be a
string without a terminal newline.
"""
if not self.ctrl_sock:
self.ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ctrl_sock.connect((self.ctrl_host, self.ctrl_port))
self.ctrl_sock.sendall((f"{message}\n").encode("utf-8"))
def _send_event(self, event):
"""Notify subscribed connections of an event."""
for conn in self.connections:
conn.notify(event)
def _item_info(self, item):
"""An abstract method that should response lines containing a
single song's metadata.
"""
raise NotImplementedError
def _item_id(self, item):
"""An abstract method returning the integer id for an item."""
raise NotImplementedError
def _id_to_index(self, track_id):
"""Searches the playlist for a song with the given id and
returns its index in the playlist.
"""
track_id = cast_arg(int, track_id)
for index, track in enumerate(self.playlist):
if self._item_id(track) == track_id:
return index
# Loop finished with no track found.
raise ArgumentNotFoundError()
def _random_idx(self):
"""Returns a random index different from the current one.
If there are no songs in the playlist it returns -1.
If there is only one song in the playlist it returns 0.
"""
if len(self.playlist) < 2:
return len(self.playlist) - 1
new_index = self.random_obj.randint(0, len(self.playlist) - 1)
while new_index == self.current_index:
new_index = self.random_obj.randint(0, len(self.playlist) - 1)
return new_index
def _succ_idx(self):
"""Returns the index for the next song to play.
It also considers random, single and repeat flags.
No boundaries are checked.
"""
if self.repeat and self.single:
return self.current_index
if self.random:
return self._random_idx()
return self.current_index + 1
def _prev_idx(self):
"""Returns the index for the previous song to play.
It also considers random and repeat flags.
No boundaries are checked.
"""
if self.repeat and self.single:
return self.current_index
if self.random:
return self._random_idx()
return self.current_index - 1
def cmd_ping(self, conn):
"""Succeeds."""
pass
def cmd_idle(self, conn, *subsystems):
subsystems = subsystems or SUBSYSTEMS
for system in subsystems:
if system not in SUBSYSTEMS:
raise BPDError(ERROR_ARG, f"Unrecognised idle event: {system}")
raise BPDIdleError(subsystems) # put the connection into idle mode
def cmd_kill(self, conn):
"""Exits the server process."""
sys.exit(0)
def cmd_close(self, conn):
"""Closes the connection."""
raise BPDCloseError()
def cmd_password(self, conn, password):
"""Attempts password authentication."""
if password == self.password:
conn.authenticated = True
else:
conn.authenticated = False
raise BPDError(ERROR_PASSWORD, "incorrect password")
def cmd_commands(self, conn):
"""Lists the commands available to the user."""
if self.password and not conn.authenticated:
# Not authenticated. Show limited list of commands.
for cmd in SAFE_COMMANDS:
yield f"command: {cmd}"
else:
# Authenticated. Show all commands.
for func in dir(self):
if func.startswith("cmd_"):
yield f"command: {func[4:]}"
def cmd_notcommands(self, conn):
"""Lists all unavailable commands."""
if self.password and not conn.authenticated:
# Not authenticated. Show privileged commands.
for func in dir(self):
if func.startswith("cmd_"):
cmd = func[4:]
if cmd not in SAFE_COMMANDS:
yield f"command: {cmd}"
else:
# Authenticated. No commands are unavailable.
pass
def cmd_status(self, conn):
"""Returns some status information for use with an
implementation of cmd_status.
Gives a list of response-lines for: volume, repeat, random,
playlist, playlistlength, and xfade.
"""
yield (
f"repeat: {int(self.repeat)}",
f"random: {int(self.random)}",
f"consume: {int(self.consume)}",
f"single: {int(self.single)}",
f"playlist: {self.playlist_version}",
f"playlistlength: {len(self.playlist)}",
f"mixrampdb: {self.mixrampdb}",
)
if self.volume > 0:
yield f"volume: {self.volume}"
if not math.isnan(self.mixrampdelay):
yield f"mixrampdelay: {self.mixrampdelay}"
if self.crossfade > 0:
yield f"xfade: {self.crossfade}"
if self.current_index == -1:
state = "stop"
elif self.paused:
state = "pause"
else:
state = "play"
yield f"state: {state}"
if self.current_index != -1: # i.e., paused or playing
current_id = self._item_id(self.playlist[self.current_index])
yield f"song: {self.current_index}"
yield f"songid: {current_id}"
if len(self.playlist) > self.current_index + 1:
# If there's a next song, report its index too.
next_id = self._item_id(self.playlist[self.current_index + 1])
yield f"nextsong: {self.current_index + 1}"
yield f"nextsongid: {next_id}"
if self.error:
yield f"error: {self.error}"
def cmd_clearerror(self, conn):
"""Removes the persistent error state of the server. This
error is set when a problem arises not in response to a
command (for instance, when playing a file).
"""
self.error = None
def cmd_random(self, conn, state):
"""Set or unset random (shuffle) mode."""
self.random = cast_arg("intbool", state)
self._send_event("options")
def cmd_repeat(self, conn, state):
"""Set or unset repeat mode."""
self.repeat = cast_arg("intbool", state)
self._send_event("options")
def cmd_consume(self, conn, state):
"""Set or unset consume mode."""
self.consume = cast_arg("intbool", state)
self._send_event("options")
def cmd_single(self, conn, state):
"""Set or unset single mode."""
# TODO support oneshot in addition to 0 and 1 [MPD 0.20]
self.single = cast_arg("intbool", state)
self._send_event("options")
def cmd_setvol(self, conn, vol):
"""Set the player's volume level (0-100)."""
vol = cast_arg(int, vol)
if vol < VOLUME_MIN or vol > VOLUME_MAX:
raise BPDError(ERROR_ARG, "volume out of range")
self.volume = vol
self._send_event("mixer")
def cmd_volume(self, conn, vol_delta):
"""Deprecated command to change the volume by a relative amount."""
vol_delta = cast_arg(int, vol_delta)
return self.cmd_setvol(conn, self.volume + vol_delta)
def cmd_crossfade(self, conn, crossfade):
"""Set the number of seconds of crossfading."""
crossfade = cast_arg(int, crossfade)
if crossfade < 0:
raise BPDError(ERROR_ARG, "crossfade time must be nonnegative")
self._log.warning("crossfade is not implemented in bpd")
self.crossfade = crossfade
self._send_event("options")
def cmd_mixrampdb(self, conn, db):
"""Set the mixramp normalised max volume in dB."""
db = cast_arg(float, db)
if db > 0:
raise BPDError(ERROR_ARG, "mixrampdb time must be negative")
self._log.warning("mixramp is not implemented in bpd")
self.mixrampdb = db
self._send_event("options")
def cmd_mixrampdelay(self, conn, delay):
"""Set the mixramp delay in seconds."""
delay = cast_arg(float, delay)
if delay < 0:
raise BPDError(ERROR_ARG, "mixrampdelay time must be nonnegative")
self._log.warning("mixramp is not implemented in bpd")
self.mixrampdelay = delay
self._send_event("options")
def cmd_replay_gain_mode(self, conn, mode):
"""Set the replay gain mode."""
if mode not in ["off", "track", "album", "auto"]:
raise BPDError(ERROR_ARG, "Unrecognised replay gain mode")
self._log.warning("replay gain is not implemented in bpd")
self.replay_gain_mode = mode
self._send_event("options")
def cmd_replay_gain_status(self, conn):
"""Get the replaygain mode."""
yield f"replay_gain_mode: {self.replay_gain_mode}"
def cmd_clear(self, conn):
"""Clear the playlist."""
self.playlist = []
self.playlist_version += 1
self.cmd_stop(conn)
self._send_event("playlist")
def cmd_delete(self, conn, index):
"""Remove the song at index from the playlist."""
index = cast_arg(int, index)
try:
del self.playlist[index]
except IndexError:
raise ArgumentIndexError()
self.playlist_version += 1
if self.current_index == index: # Deleted playing song.
self.cmd_stop(conn)
elif index < self.current_index: # Deleted before playing.
# Shift playing index down.
self.current_index -= 1
self._send_event("playlist")
def cmd_deleteid(self, conn, track_id):
self.cmd_delete(conn, self._id_to_index(track_id))
def cmd_move(self, conn, idx_from, idx_to):
"""Move a track in the playlist."""
idx_from = cast_arg(int, idx_from)
idx_to = cast_arg(int, idx_to)
try:
track = self.playlist.pop(idx_from)
self.playlist.insert(idx_to, track)
except IndexError:
raise ArgumentIndexError()
# Update currently-playing song.
if idx_from == self.current_index:
self.current_index = idx_to
elif idx_from < self.current_index <= idx_to:
self.current_index -= 1
elif idx_from > self.current_index >= idx_to:
self.current_index += 1
self.playlist_version += 1
self._send_event("playlist")
def cmd_moveid(self, conn, idx_from, idx_to):
idx_from = self._id_to_index(idx_from)
return self.cmd_move(conn, idx_from, idx_to)
def cmd_swap(self, conn, i, j):
"""Swaps two tracks in the playlist."""
i = cast_arg(int, i)
j = cast_arg(int, j)
try:
track_i = self.playlist[i]
track_j = self.playlist[j]
except IndexError:
raise ArgumentIndexError()
self.playlist[j] = track_i
self.playlist[i] = track_j
# Update currently-playing song.
if self.current_index == i:
self.current_index = j
elif self.current_index == j:
self.current_index = i
self.playlist_version += 1
self._send_event("playlist")
def cmd_swapid(self, conn, i_id, j_id):
i = self._id_to_index(i_id)
j = self._id_to_index(j_id)
return self.cmd_swap(conn, i, j)
def cmd_urlhandlers(self, conn):
"""Indicates supported URL schemes. None by default."""
pass
def cmd_playlistinfo(self, conn, index=None):
"""Gives metadata information about the entire playlist or a
single track, given by its index.
"""
if index is None:
for track in self.playlist:
yield self._item_info(track)
else:
indices = self._parse_range(index, accept_single_number=True)
try:
tracks = [self.playlist[i] for i in indices]
except IndexError:
raise ArgumentIndexError()
for track in tracks:
yield self._item_info(track)
def cmd_playlistid(self, conn, track_id=None):
if track_id is not None:
track_id = cast_arg(int, track_id)
track_id = self._id_to_index(track_id)
return self.cmd_playlistinfo(conn, track_id)
def cmd_plchanges(self, conn, version):
"""Sends playlist changes since the given version.
This is a "fake" implementation that ignores the version and
just returns the entire playlist (rather like version=0). This
seems to satisfy many clients.
"""
return self.cmd_playlistinfo(conn)
def cmd_plchangesposid(self, conn, version):
"""Like plchanges, but only sends position and id.
Also a dummy implementation.
"""
for idx, track in enumerate(self.playlist):
yield f"cpos: {idx}"
yield f"Id: {track.id}"
def cmd_currentsong(self, conn):
"""Sends information about the currently-playing song."""
if self.current_index != -1: # -1 means stopped.
track = self.playlist[self.current_index]
yield self._item_info(track)
def cmd_next(self, conn):
"""Advance to the next song in the playlist."""
old_index = self.current_index
self.current_index = self._succ_idx()
if self.consume:
# TODO how does consume interact with single+repeat?
self.playlist.pop(old_index)
if self.current_index > old_index:
self.current_index -= 1
self.playlist_version += 1
self._send_event("playlist")
if self.current_index >= len(self.playlist):
# Fallen off the end. Move to stopped state or loop.
if self.repeat:
self.current_index = -1
return self.cmd_play(conn)
return self.cmd_stop(conn)
elif self.single and not self.repeat:
return self.cmd_stop(conn)
else:
return self.cmd_play(conn)
def cmd_previous(self, conn):
"""Step back to the last song."""
old_index = self.current_index
self.current_index = self._prev_idx()
if self.consume:
self.playlist.pop(old_index)
if self.current_index < 0:
if self.repeat:
self.current_index = len(self.playlist) - 1
else:
self.current_index = 0
return self.cmd_play(conn)
def cmd_pause(self, conn, state=None):
"""Set the pause state playback."""
if state is None:
self.paused = not self.paused # Toggle.
else:
self.paused = cast_arg("intbool", state)
self._send_event("player")
def cmd_play(self, conn, index=-1):
"""Begin playback, possibly at a specified playlist index."""
index = cast_arg(int, index)
if index < -1 or index >= len(self.playlist):
raise ArgumentIndexError()
if index == -1: # No index specified: start where we are.
if not self.playlist: # Empty playlist: stop immediately.
return self.cmd_stop(conn)
if self.current_index == -1: # No current song.
self.current_index = 0 # Start at the beginning.
# If we have a current song, just stay there.
else: # Start with the specified index.
self.current_index = index
self.paused = False
self._send_event("player")
def cmd_playid(self, conn, track_id=0):
track_id = cast_arg(int, track_id)
if track_id == -1:
index = -1
else:
index = self._id_to_index(track_id)
return self.cmd_play(conn, index)
def cmd_stop(self, conn):
"""Stop playback."""
self.current_index = -1
self.paused = False
self._send_event("player")
def cmd_seek(self, conn, index, pos):
"""Seek to a specified point in a specified song."""
index = cast_arg(int, index)
if index < 0 or index >= len(self.playlist):
raise ArgumentIndexError()
self.current_index = index
self._send_event("player")
def cmd_seekid(self, conn, track_id, pos):
index = self._id_to_index(track_id)
return self.cmd_seek(conn, index, pos)
# Additions to the MPD protocol.
def cmd_crash(self, conn):
"""Deliberately trigger a TypeError for testing purposes.
We want to test that the server properly responds with ERROR_SYSTEM
without crashing, and that this is not treated as ERROR_ARG (since it
is caused by a programming error, not a protocol error).
"""
raise TypeError
class Connection:
"""A connection between a client and the server."""
def __init__(self, server, sock):
"""Create a new connection for the accepted socket `client`."""
self.server = server
self.sock = sock
self.address = ":".join(map(str, sock.sock.getpeername()))
def debug(self, message, kind=" "):
"""Log a debug message about this connection."""
self.server._log.debug("{}[{.address}]: {}", kind, self, message)
def run(self):
pass
def send(self, lines):
"""Send lines, which which is either a single string or an
iterable consisting of strings, to the client. A newline is
added after every string. Returns a Bluelet event that sends
the data.
"""
if isinstance(lines, str):
lines = [lines]
out = NEWLINE.join(lines) + NEWLINE
for line in out.split(NEWLINE)[:-1]:
self.debug(line, kind=">")
if isinstance(out, str):
out = out.encode("utf-8")
return self.sock.sendall(out)
@classmethod
def handler(cls, server):
def _handle(sock):
"""Creates a new `Connection` and runs it."""
return cls(server, sock).run()
return _handle
class MPDConnection(Connection):
"""A connection that receives commands from an MPD-compatible client."""
def __init__(self, server, sock):
"""Create a new connection for the accepted socket `client`."""
super().__init__(server, sock)
self.authenticated = False
self.notifications = set()
self.idle_subscriptions = set()
def do_command(self, command):
"""A coroutine that runs the given command and sends an
appropriate response."""
try:
yield bluelet.call(command.run(self))
except BPDError as e:
# Send the error.
yield self.send(e.response())
else:
# Send success code.
yield self.send(RESP_OK)
def disconnect(self):
"""The connection has closed for any reason."""
self.server.disconnect(self)
self.debug("disconnected", kind="*")
def notify(self, event):
"""Queue up an event for sending to this client."""
self.notifications.add(event)
def send_notifications(self, force_close_idle=False):
"""Send the client any queued events now."""
pending = self.notifications.intersection(self.idle_subscriptions)
try:
for event in pending:
yield self.send(f"changed: {event}")
if pending or force_close_idle:
self.idle_subscriptions = set()
self.notifications = self.notifications.difference(pending)
yield self.send(RESP_OK)
except bluelet.SocketClosedError:
self.disconnect() # Client disappeared.
def run(self):
"""Send a greeting to the client and begin processing commands
as they arrive.
"""
self.debug("connected", kind="*")
self.server.connect(self)
yield self.send(HELLO)
clist = None # Initially, no command list is being constructed.
while True:
line = yield self.sock.readline()
if not line:
self.disconnect() # Client disappeared.
break
line = line.strip()
if not line:
err = BPDError(ERROR_UNKNOWN, "No command given")
yield self.send(err.response())
self.disconnect() # Client sent a blank line.
break
line = line.decode("utf8") # MPD protocol uses UTF-8.
for line in line.split(NEWLINE):
self.debug(line, kind="<")
if self.idle_subscriptions:
# The connection is in idle mode.
if line == "noidle":
yield bluelet.call(self.send_notifications(True))
else:
err = BPDError(
ERROR_UNKNOWN, f"Got command while idle: {line}"
)
yield self.send(err.response())
break
continue
if line == "noidle":
# When not in idle, this command sends no response.
continue
if clist is not None:
# Command list already opened.
if line == CLIST_END:
yield bluelet.call(self.do_command(clist))
clist = None # Clear the command list.
yield bluelet.call(self.server.dispatch_events())
else:
clist.append(Command(line))
elif line == CLIST_BEGIN or line == CLIST_VERBOSE_BEGIN:
# Begin a command list.
clist = CommandList([], line == CLIST_VERBOSE_BEGIN)
else:
# Ordinary command.
try:
yield bluelet.call(self.do_command(Command(line)))
except BPDCloseError:
# Command indicates that the conn should close.
self.sock.close()
self.disconnect() # Client explicitly closed.
return
except BPDIdleError as e:
self.idle_subscriptions = e.subsystems
self.debug(f"awaiting: {' '.join(e.subsystems)}", kind="z")
yield bluelet.call(self.server.dispatch_events())
class ControlConnection(Connection):
"""A connection used to control BPD for debugging and internal events."""
def __init__(self, server, sock):
"""Create a new connection for the accepted socket `client`."""
super().__init__(server, sock)
def debug(self, message, kind=" "):
self.server._log.debug("CTRL {}[{.address}]: {}", kind, self, message)
def run(self):
"""Listen for control commands and delegate to `ctrl_*` methods."""
self.debug("connected", kind="*")
while True:
line = yield self.sock.readline()
if not line:
break # Client disappeared.
line = line.strip()
if not line:
break # Client sent a blank line.
line = line.decode("utf8") # Protocol uses UTF-8.
for line in line.split(NEWLINE):
self.debug(line, kind="<")
command = Command(line)
try:
func = command.delegate("ctrl_", self)
yield bluelet.call(func(*command.args))
except (AttributeError, TypeError) as e:
yield self.send(f"ERROR: {e.args[0]}")
except Exception:
yield self.send(
["ERROR: server error", traceback.format_exc().rstrip()]
)
def ctrl_play_finished(self):
"""Callback from the player signalling a song finished playing."""
yield bluelet.call(self.server.dispatch_events())
def ctrl_profile(self):
"""Memory profiling for debugging."""
from guppy import hpy
heap = hpy().heap()
yield self.send(heap)
def ctrl_nickname(self, oldlabel, newlabel):
"""Rename a client in the log messages."""
for c in self.server.connections:
if c.address == oldlabel:
c.address = newlabel
break
else:
yield self.send(f"ERROR: no such client: {oldlabel}")
class Command:
"""A command issued by the client for processing by the server."""
command_re = re.compile(r"^([^ \t]+)[ \t]*")
arg_re = re.compile(r'"((?:\\"|[^"])+)"|([^ \t"]+)')
def __init__(self, s):
"""Creates a new `Command` from the given string, `s`, parsing
the string for command name and arguments.
"""
command_match = self.command_re.match(s)
self.name = command_match.group(1)
self.args = []
arg_matches = self.arg_re.findall(s[command_match.end() :])
for match in arg_matches:
if match[0]:
# Quoted argument.
arg = match[0]
arg = arg.replace('\\"', '"').replace("\\\\", "\\")
else:
# Unquoted argument.
arg = match[1]
self.args.append(arg)
def delegate(self, prefix, target, extra_args=0):
"""Get the target method that corresponds to this command.
The `prefix` is prepended to the command name and then the resulting
name is used to search `target` for a method with a compatible number
of arguments.
"""
# Attempt to get correct command function.
func_name = f"{prefix}{self.name}"
if not hasattr(target, func_name):
raise AttributeError(f'unknown command "{self.name}"')
func = getattr(target, func_name)
argspec = inspect.getfullargspec(func)
# Check that `func` is able to handle the number of arguments sent
# by the client (so we can raise ERROR_ARG instead of ERROR_SYSTEM).
# Maximum accepted arguments: argspec includes "self".
max_args = len(argspec.args) - 1 - extra_args
# Minimum accepted arguments: some arguments might be optional.
min_args = max_args
if argspec.defaults:
min_args -= len(argspec.defaults)
wrong_num = (len(self.args) > max_args) or (len(self.args) < min_args)
# If the command accepts a variable number of arguments skip the check.
if wrong_num and not argspec.varargs:
raise TypeError(
f'wrong number of arguments for "{self.name}"',
self.name,
)
return func
def run(self, conn):
"""A coroutine that executes the command on the given
connection.
"""
try:
# `conn` is an extra argument to all cmd handlers.
func = self.delegate("cmd_", conn.server, extra_args=1)
except AttributeError as e:
raise BPDError(ERROR_UNKNOWN, e.args[0])
except TypeError as e:
raise BPDError(ERROR_ARG, e.args[0], self.name)
# Ensure we have permission for this command.
if (
conn.server.password
and not conn.authenticated
and self.name not in SAFE_COMMANDS
):
raise BPDError(ERROR_PERMISSION, "insufficient privileges")
try:
args = [conn] + self.args
results = func(*args)
if results:
for data in results:
yield conn.send(data)
except BPDError as e:
# An exposed error. Set the command name and then let
# the Connection handle it.
e.cmd_name = self.name
raise e
except BPDCloseError:
# An indication that the connection should close. Send
# it on the Connection.
raise
except BPDIdleError:
raise
except Exception:
# An "unintentional" error. Hide it from the client.
conn.server._log.error("{}", traceback.format_exc())
raise BPDError(ERROR_SYSTEM, "server error", self.name)
class CommandList(list[Command]):
"""A list of commands issued by the client for processing by the
server. May be verbose, in which case the response is delimited, or
not. Should be a list of `Command` objects.
"""
def __init__(self, sequence=None, verbose=False):
"""Create a new `CommandList` from the given sequence of
`Command`s. If `verbose`, this is a verbose command list.
"""
if sequence:
for item in sequence:
self.append(item)
self.verbose = verbose
def run(self, conn):
"""Coroutine executing all the commands in this list."""
for i, command in enumerate(self):
try:
yield bluelet.call(command.run(conn))
except BPDError as e:
# If the command failed, stop executing.
e.index = i # Give the error the correct index.
raise e
# Otherwise, possibly send the output delimiter if we're in a
# verbose ("OK") command list.
if self.verbose:
yield conn.send(RESP_CLIST_VERBOSE)
# A subclass of the basic, protocol-handling server that actually plays
# music.
class Server(BaseServer):
"""An MPD-compatible server using GStreamer to play audio and beets
to store its library.
"""
def __init__(self, library, host, port, password, ctrl_port, log):
log.info("Starting server...")
super().__init__(host, port, password, ctrl_port, log)
self.lib = library
self.player = gstplayer.GstPlayer(self.play_finished)
self.cmd_update(None)
log.info("Server ready and listening on {}:{}", host, port)
log.debug("Listening for control signals on {}:{}", host, ctrl_port)
def run(self):
self.player.run()
super().run()
def play_finished(self):
"""A callback invoked every time our player finishes a track."""
self.cmd_next(None)
self._ctrl_send("play_finished")
# Metadata helper functions.
def _item_info(self, item):
info_lines = [
f"file: {as_string(item.destination(relative_to_libdir=True))}",
f"Time: {int(item.length)}",
"duration: {item.length:.3f}",
f"Id: {item.id}",
]
try:
pos = self._id_to_index(item.id)
info_lines.append(f"Pos: {pos}")
except ArgumentNotFoundError:
# Don't include position if not in playlist.
pass
for tagtype, field in self.tagtype_map.items():
info_lines.append(f"{tagtype}: {getattr(item, field)}")
return info_lines
def _parse_range(self, items, accept_single_number=False):
"""Convert a range of positions to a list of item info.
MPD specifies ranges as START:STOP (endpoint excluded) for some
commands. Sometimes a single number can be provided instead.
"""
try:
start, stop = str(items).split(":", 1)
except ValueError:
if accept_single_number:
return [cast_arg(int, items)]
raise BPDError(ERROR_ARG, "bad range syntax")
start = cast_arg(int, start)
stop = cast_arg(int, stop)
return range(start, stop)
def _item_id(self, item):
return item.id
# Database updating.
def cmd_update(self, conn, path="/"):
"""Updates the catalog to reflect the current database state."""
# Path is ignored. Also, the real MPD does this asynchronously;
# this is done inline.
self._log.debug("Building directory tree...")
self.tree = vfs.libtree(self.lib)
self._log.debug("Finished building directory tree.")
self.updated_time = time.time()
self._send_event("update")
self._send_event("database")
# Path (directory tree) browsing.
def _resolve_path(self, path):
"""Returns a VFS node or an item ID located at the path given.
If the path does not exist, raises a
"""
components = path.split("/")
node = self.tree
for component in components:
if not component:
continue
if isinstance(node, int):
# We're trying to descend into a file node.
raise ArgumentNotFoundError()
if component in node.files:
node = node.files[component]
elif component in node.dirs:
node = node.dirs[component]
else:
raise ArgumentNotFoundError()
return node
def _path_join(self, p1, p2):
"""Smashes together two BPD paths."""
out = f"{p1}/{p2}"
return out.replace("//", "/").replace("//", "/")
def cmd_lsinfo(self, conn, path="/"):
"""Sends info on all the items in the path."""
node = self._resolve_path(path)
if isinstance(node, int):
# Trying to list a track.
raise BPDError(ERROR_ARG, "this is not a directory")
else:
for name, itemid in iter(sorted(node.files.items())):
item = self.lib.get_item(itemid)
yield self._item_info(item)
for name, _ in iter(sorted(node.dirs.items())):
dirpath = self._path_join(path, name)
if dirpath.startswith("/"):
# Strip leading slash (libmpc rejects this).
dirpath = dirpath[1:]
yield f"directory: {dirpath}"
def _listall(self, basepath, node, info=False):
"""Helper function for recursive listing. If info, show
tracks' complete info; otherwise, just show items' paths.
"""
if isinstance(node, int):
# List a single file.
if info:
item = self.lib.get_item(node)
yield self._item_info(item)
else:
yield f"file: {basepath}"
else:
# List a directory. Recurse into both directories and files.
for name, itemid in sorted(node.files.items()):
newpath = self._path_join(basepath, name)
# "yield from"
yield from self._listall(newpath, itemid, info)
for name, subdir in sorted(node.dirs.items()):
newpath = self._path_join(basepath, name)
yield f"directory: {newpath}"
yield from self._listall(newpath, subdir, info)
def cmd_listall(self, conn, path="/"):
"""Send the paths all items in the directory, recursively."""
return self._listall(path, self._resolve_path(path), False)
def cmd_listallinfo(self, conn, path="/"):
"""Send info on all the items in the directory, recursively."""
return self._listall(path, self._resolve_path(path), True)
# Playlist manipulation.
def _all_items(self, node):
"""Generator yielding all items under a VFS node."""
if isinstance(node, int):
# Could be more efficient if we built up all the IDs and
# then issued a single SELECT.
yield self.lib.get_item(node)
else:
# Recurse into a directory.
for name, itemid in sorted(node.files.items()):
# "yield from"
yield from self._all_items(itemid)
for name, subdir in sorted(node.dirs.items()):
yield from self._all_items(subdir)
def _add(self, path, send_id=False):
"""Adds a track or directory to the playlist, specified by the
path. If `send_id`, write each item's id to the client.
"""
for item in self._all_items(self._resolve_path(path)):
self.playlist.append(item)
if send_id:
yield f"Id: {item.id}"
self.playlist_version += 1
self._send_event("playlist")
def cmd_add(self, conn, path):
"""Adds a track or directory to the playlist, specified by a
path.
"""
return self._add(path, False)
def cmd_addid(self, conn, path):
"""Same as `cmd_add` but sends an id back to the client."""
return self._add(path, True)
# Server info.
def cmd_status(self, conn):
yield from super().cmd_status(conn)
if self.current_index > -1:
item = self.playlist[self.current_index]
yield (
f"bitrate: {item.bitrate / 1000}",
f"audio: {item.samplerate}:{item.bitdepth}:{item.channels}",
)
(pos, total) = self.player.time()
yield (
f"time: {int(pos)}:{int(total)}",
"elapsed: " + f"{pos:.3f}",
"duration: " + f"{total:.3f}",
)
# Also missing 'updating_db'.
def cmd_stats(self, conn):
"""Sends some statistics about the library."""
with self.lib.transaction() as tx:
statement = (
"SELECT COUNT(DISTINCT artist), "
"COUNT(DISTINCT album), "
"COUNT(id), "
"SUM(length) "
"FROM items"
)
artists, albums, songs, totaltime = tx.query(statement)[0]
yield (
f"artists: {artists}",
f"albums: {albums}",
f"songs: {songs}",
f"uptime: {int(time.time() - self.startup_time)}",
"playtime: 0", # Missing.
f"db_playtime: {int(totaltime)}",
f"db_update: {int(self.updated_time)}",
)
def cmd_decoders(self, conn):
"""Send list of supported decoders and formats."""
decoders = self.player.get_decoders()
for name, (mimes, exts) in decoders.items():
yield f"plugin: {name}"
for ext in exts:
yield f"suffix: {ext}"
for mime in mimes:
yield f"mime_type: {mime}"
# Searching.
tagtype_map = {
"Artist": "artist",
"ArtistSort": "artist_sort",
"Album": "album",
"Title": "title",
"Track": "track",
"AlbumArtist": "albumartist",
"AlbumArtistSort": "albumartist_sort",
"Label": "label",
"Genre": "genre",
"Date": "year",
"OriginalDate": "original_year",
"Composer": "composer",
"Disc": "disc",
"Comment": "comments",
"MUSICBRAINZ_TRACKID": "mb_trackid",
"MUSICBRAINZ_ALBUMID": "mb_albumid",
"MUSICBRAINZ_ARTISTID": "mb_artistid",
"MUSICBRAINZ_ALBUMARTISTID": "mb_albumartistid",
"MUSICBRAINZ_RELEASETRACKID": "mb_releasetrackid",
}
def cmd_tagtypes(self, conn):
"""Returns a list of the metadata (tag) fields available for
searching.
"""
for tag in self.tagtype_map:
yield f"tagtype: {tag}"
def _tagtype_lookup(self, tag):
"""Uses `tagtype_map` to look up the beets column name for an
MPD tagtype (or throw an appropriate exception). Returns both
the canonical name of the MPD tagtype and the beets column
name.
"""
for test_tag, key in self.tagtype_map.items():
# Match case-insensitively.
if test_tag.lower() == tag.lower():
return test_tag, key
raise BPDError(ERROR_UNKNOWN, "no such tagtype")
def _metadata_query(self, query_type, kv, allow_any_query: bool = False):
"""Helper function returns a query object that will find items
according to the library query type provided and the key-value
pairs specified. The any_query_type is used for queries of
type "any"; if None, then an error is thrown.
"""
if kv: # At least one key-value pair.
queries: list[Query] = []
# Iterate pairwise over the arguments.
it = iter(kv)
for tag, value in zip(it, it):
if tag.lower() == "any":
if allow_any_query:
queries.append(
Item.any_writable_media_field_query(
query_type, value
)
)
else:
raise BPDError(ERROR_UNKNOWN, "no such tagtype")
else:
_, key = self._tagtype_lookup(tag)
queries.append(Item.field_query(key, value, query_type))
return dbcore.query.AndQuery(queries)
else: # No key-value pairs.
return dbcore.query.TrueQuery()
def cmd_search(self, conn, *kv):
"""Perform a substring match for items."""
query = self._metadata_query(
dbcore.query.SubstringQuery, kv, allow_any_query=True
)
for item in self.lib.items(query):
yield self._item_info(item)
def cmd_find(self, conn, *kv):
"""Perform an exact match for items."""
query = self._metadata_query(dbcore.query.MatchQuery, kv)
for item in self.lib.items(query):
yield self._item_info(item)
def cmd_list(self, conn, show_tag, *kv):
"""List distinct metadata values for show_tag, possibly
filtered by matching match_tag to match_term.
"""
show_tag_canon, show_key = self._tagtype_lookup(show_tag)
if len(kv) == 1:
if show_tag_canon == "Album":
# If no tag was given, assume artist. This is because MPD
# supports a short version of this command for fetching the
# albums belonging to a particular artist, and some clients
# rely on this behaviour (e.g. MPDroid, M.A.L.P.).
kv = ("Artist", kv[0])
else:
raise BPDError(ERROR_ARG, 'should be "Album" for 3 arguments')
elif len(kv) % 2 != 0:
raise BPDError(ERROR_ARG, "Incorrect number of filter arguments")
query = self._metadata_query(dbcore.query.MatchQuery, kv)
clause, subvals = query.clause()
statement = (
f"SELECT DISTINCT {show_key}"
f" FROM items WHERE {clause}"
f" ORDER BY {show_key}"
)
self._log.debug(statement)
with self.lib.transaction() as tx:
rows = tx.query(statement, subvals)
for row in rows:
if not row[0]:
# Skip any empty values of the field.
continue
yield f"{show_tag_canon}: {row[0]}"
def cmd_count(self, conn, tag, value):
"""Returns the number and total time of songs matching the
tag/value query.
"""
_, key = self._tagtype_lookup(tag)
songs = 0
playtime = 0.0
for item in self.lib.items(
Item.field_query(key, value, dbcore.query.MatchQuery)
):
songs += 1
playtime += item.length
yield f"songs: {songs}"
yield f"playtime: {int(playtime)}"
# Persistent playlist manipulation. In MPD this is an optional feature so
# these dummy implementations match MPD's behaviour with the feature off.
def cmd_listplaylist(self, conn, playlist):
raise BPDError(ERROR_NO_EXIST, "No such playlist")
def cmd_listplaylistinfo(self, conn, playlist):
raise BPDError(ERROR_NO_EXIST, "No such playlist")
def cmd_listplaylists(self, conn):
raise BPDError(ERROR_UNKNOWN, "Stored playlists are disabled")
def cmd_load(self, conn, playlist):
raise BPDError(ERROR_NO_EXIST, "Stored playlists are disabled")
def cmd_playlistadd(self, conn, playlist, uri):
raise BPDError(ERROR_UNKNOWN, "Stored playlists are disabled")
def cmd_playlistclear(self, conn, playlist):
raise BPDError(ERROR_UNKNOWN, "Stored playlists are disabled")
def cmd_playlistdelete(self, conn, playlist, index):
raise BPDError(ERROR_UNKNOWN, "Stored playlists are disabled")
def cmd_playlistmove(self, conn, playlist, from_index, to_index):
raise BPDError(ERROR_UNKNOWN, "Stored playlists are disabled")
def cmd_rename(self, conn, playlist, new_name):
raise BPDError(ERROR_UNKNOWN, "Stored playlists are disabled")
def cmd_rm(self, conn, playlist):
raise BPDError(ERROR_UNKNOWN, "Stored playlists are disabled")
def cmd_save(self, conn, playlist):
raise BPDError(ERROR_UNKNOWN, "Stored playlists are disabled")
# "Outputs." Just a dummy implementation because we don't control
# any outputs.
def cmd_outputs(self, conn):
"""List the available outputs."""
yield (
"outputid: 0",
"outputname: gstreamer",
"outputenabled: 1",
)
def cmd_enableoutput(self, conn, output_id):
output_id = cast_arg(int, output_id)
if output_id != 0:
raise ArgumentIndexError()
def cmd_disableoutput(self, conn, output_id):
output_id = cast_arg(int, output_id)
if output_id == 0:
raise BPDError(ERROR_ARG, "cannot disable this output")
else:
raise ArgumentIndexError()
# Playback control. The functions below hook into the
# half-implementations provided by the base class. Together, they're
# enough to implement all normal playback functionality.
def cmd_play(self, conn, index=-1):
new_index = index != -1 and index != self.current_index
was_paused = self.paused
super().cmd_play(conn, index)
if self.current_index > -1: # Not stopped.
if was_paused and not new_index:
# Just unpause.
self.player.play()
else:
self.player.play_file(self.playlist[self.current_index].path)
def cmd_pause(self, conn, state=None):
super().cmd_pause(conn, state)
if self.paused:
self.player.pause()
elif self.player.playing:
self.player.play()
def cmd_stop(self, conn):
super().cmd_stop(conn)
self.player.stop()
def cmd_seek(self, conn, index, pos):
"""Seeks to the specified position in the specified song."""
index = cast_arg(int, index)
pos = cast_arg(float, pos)
super().cmd_seek(conn, index, pos)
self.player.seek(pos)
# Volume control.
def cmd_setvol(self, conn, vol):
vol = cast_arg(int, vol)
super().cmd_setvol(conn, vol)
self.player.volume = float(vol) / 100
# Beets plugin hooks.
class BPDPlugin(BeetsPlugin):
"""Provides the "beet bpd" command for running a music player
server.
"""
def __init__(self):
super().__init__()
self.config.add(
{
"host": "",
"port": 6600,
"control_port": 6601,
"password": "",
"volume": VOLUME_MAX,
}
)
self.config["password"].redact = True
def start_bpd(self, lib, host, port, password, volume, ctrl_port):
"""Starts a BPD server."""
server = Server(lib, host, port, password, ctrl_port, self._log)
server.cmd_setvol(None, volume)
server.run()
def commands(self):
cmd = beets.ui.Subcommand(
"bpd", help="run an MPD-compatible music player server"
)
def func(lib, opts, args):
host = self.config["host"].as_str()
host = args.pop(0) if args else host
port = args.pop(0) if args else self.config["port"].get(int)
if args:
ctrl_port = args.pop(0)
else:
ctrl_port = self.config["control_port"].get(int)
if args:
raise beets.ui.UserError("too many arguments")
password = self.config["password"].as_str()
volume = self.config["volume"].get(int)
self.start_bpd(
lib, host, int(port), password, volume, int(ctrl_port)
)
cmd.func = func
return [cmd]
beetbox-beets-c1877b7/beetsplug/bpd/gstplayer.py 0000664 0000000 0000000 00000023234 15073551743 0021747 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A wrapper for the GStreamer Python bindings that exposes a simple
music player.
"""
import _thread
import copy
import os
import sys
import time
import urllib
import gi
from beets import ui
gi.require_version("Gst", "1.0")
from gi.repository import GLib, Gst # noqa: E402
Gst.init(None)
class QueryError(Exception):
pass
class GstPlayer:
"""A music player abstracting GStreamer's Playbin element.
Create a player object, then call run() to start a thread with a
runloop. Then call play_file to play music. Use player.playing
to check whether music is currently playing.
A basic play queue is also implemented (just a Python list,
player.queue, whose last element is next to play). To use it,
just call enqueue() and then play(). When a track finishes and
another is available on the queue, it is played automatically.
"""
def __init__(self, finished_callback=None):
"""Initialize a player.
If a finished_callback is provided, it is called every time a
track started with play_file finishes.
Once the player has been created, call run() to begin the main
runloop in a separate thread.
"""
# Set up the Gstreamer player. From the pygst tutorial:
# https://pygstdocs.berlios.de/pygst-tutorial/playbin.html (gone)
# https://brettviren.github.io/pygst-tutorial-org/pygst-tutorial.html
####
# Updated to GStreamer 1.0 with:
# https://wiki.ubuntu.com/Novacut/GStreamer1.0
self.player = Gst.ElementFactory.make("playbin", "player")
if self.player is None:
raise ui.UserError("Could not create playbin")
fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
if fakesink is None:
raise ui.UserError("Could not create fakesink")
self.player.set_property("video-sink", fakesink)
bus = self.player.get_bus()
bus.add_signal_watch()
bus.connect("message", self._handle_message)
# Set up our own stuff.
self.playing = False
self.finished_callback = finished_callback
self.cached_time = None
self._volume = 1.0
def _get_state(self):
"""Returns the current state flag of the playbin."""
# gst's get_state function returns a 3-tuple; we just want the
# status flag in position 1.
return self.player.get_state(Gst.CLOCK_TIME_NONE)[1]
def _handle_message(self, bus, message):
"""Callback for status updates from GStreamer."""
if message.type == Gst.MessageType.EOS:
# file finished playing
self.player.set_state(Gst.State.NULL)
self.playing = False
self.cached_time = None
if self.finished_callback:
self.finished_callback()
elif message.type == Gst.MessageType.ERROR:
# error
self.player.set_state(Gst.State.NULL)
err, debug = message.parse_error()
print(f"Error: {err}")
self.playing = False
def _set_volume(self, volume):
"""Set the volume level to a value in the range [0, 1.5]."""
# And the volume for the playbin.
self._volume = volume
self.player.set_property("volume", volume)
def _get_volume(self):
"""Get the volume as a float in the range [0, 1.5]."""
return self._volume
volume = property(_get_volume, _set_volume)
def play_file(self, path):
"""Immediately begin playing the audio file at the given
path.
"""
self.player.set_state(Gst.State.NULL)
if isinstance(path, str):
path = path.encode("utf-8")
uri = f"file://{urllib.parse.quote(path)}"
self.player.set_property("uri", uri)
self.player.set_state(Gst.State.PLAYING)
self.playing = True
def play(self):
"""If paused, resume playback."""
if self._get_state() == Gst.State.PAUSED:
self.player.set_state(Gst.State.PLAYING)
self.playing = True
def pause(self):
"""Pause playback."""
self.player.set_state(Gst.State.PAUSED)
def stop(self):
"""Halt playback."""
self.player.set_state(Gst.State.NULL)
self.playing = False
self.cached_time = None
def run(self):
"""Start a new thread for the player.
Call this function before trying to play any music with
play_file() or play().
"""
# If we don't use the MainLoop, messages are never sent.
def start():
loop = GLib.MainLoop()
loop.run()
_thread.start_new_thread(start, ())
def time(self):
"""Returns a tuple containing (position, length) where both
values are integers in seconds. If no stream is available,
returns (0, 0).
"""
fmt = Gst.Format(Gst.Format.TIME)
try:
posq = self.player.query_position(fmt)
if not posq[0]:
raise QueryError("query_position failed")
pos = posq[1] / (10**9)
lengthq = self.player.query_duration(fmt)
if not lengthq[0]:
raise QueryError("query_duration failed")
length = lengthq[1] / (10**9)
self.cached_time = (pos, length)
return (pos, length)
except QueryError:
# Stream not ready. For small gaps of time, for instance
# after seeking, the time values are unavailable. For this
# reason, we cache recent.
if self.playing and self.cached_time:
return self.cached_time
else:
return (0, 0)
def seek(self, position):
"""Seeks to position (in seconds)."""
cur_pos, cur_len = self.time()
if position > cur_len:
self.stop()
return
fmt = Gst.Format(Gst.Format.TIME)
ns = position * 10**9 # convert to nanoseconds
self.player.seek_simple(fmt, Gst.SeekFlags.FLUSH, ns)
# save new cached time
self.cached_time = (position, cur_len)
def block(self):
"""Block until playing finishes."""
while self.playing:
time.sleep(1)
def get_decoders(self):
return get_decoders()
def get_decoders():
"""Get supported audio decoders from GStreamer.
Returns a dict mapping decoder element names to the associated media types
and file extensions.
"""
# We only care about audio decoder elements.
filt = (
Gst.ELEMENT_FACTORY_TYPE_DEPAYLOADER
| Gst.ELEMENT_FACTORY_TYPE_DEMUXER
| Gst.ELEMENT_FACTORY_TYPE_PARSER
| Gst.ELEMENT_FACTORY_TYPE_DECODER
| Gst.ELEMENT_FACTORY_TYPE_MEDIA_AUDIO
)
decoders = {}
mime_types = set()
for f in Gst.ElementFactory.list_get_elements(filt, Gst.Rank.NONE):
for pad in f.get_static_pad_templates():
if pad.direction == Gst.PadDirection.SINK:
caps = pad.static_caps.get()
mimes = set()
for i in range(caps.get_size()):
struct = caps.get_structure(i)
mime = struct.get_name()
if mime == "unknown/unknown":
continue
mimes.add(mime)
mime_types.add(mime)
if mimes:
decoders[f.get_name()] = (mimes, set())
# Check all the TypeFindFactory plugin features form the registry. If they
# are associated with an audio media type that we found above, get the list
# of corresponding file extensions.
mime_extensions = {mime: set() for mime in mime_types}
for feat in Gst.Registry.get().get_feature_list(Gst.TypeFindFactory):
caps = feat.get_caps()
if caps:
for i in range(caps.get_size()):
struct = caps.get_structure(i)
mime = struct.get_name()
if mime in mime_types:
mime_extensions[mime].update(feat.get_extensions())
# Fill in the slot we left for file extensions.
for name, (mimes, exts) in decoders.items():
for mime in mimes:
exts.update(mime_extensions[mime])
return decoders
def play_simple(paths):
"""Play the files in paths in a straightforward way, without
using the player's callback function.
"""
p = GstPlayer()
p.run()
for path in paths:
p.play_file(path)
p.block()
def play_complicated(paths):
"""Play the files in the path one after the other by using the
callback function to advance to the next song.
"""
my_paths = copy.copy(paths)
def next_song():
my_paths.pop(0)
p.play_file(my_paths[0])
p = GstPlayer(next_song)
p.run()
p.play_file(my_paths[0])
while my_paths:
time.sleep(1)
if __name__ == "__main__":
# A very simple command-line player. Just give it names of audio
# files on the command line; these are all played in sequence.
paths = [os.path.abspath(os.path.expanduser(p)) for p in sys.argv[1:]]
# play_simple(paths)
play_complicated(paths)
beetbox-beets-c1877b7/beetsplug/bpm.py 0000664 0000000 0000000 00000005103 15073551743 0017741 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, aroquen
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Determine BPM by pressing a key to the rhythm."""
import time
from beets import ui
from beets.plugins import BeetsPlugin
def bpm(max_strokes):
"""Returns average BPM (possibly of a playing song)
listening to Enter keystrokes.
"""
t0 = None
dt = []
for i in range(max_strokes):
# Press enter to the rhythm...
s = input()
if s == "":
t1 = time.time()
# Only start measuring at the second stroke
if t0:
dt.append(t1 - t0)
t0 = t1
else:
break
# Return average BPM
# bpm = (max_strokes-1) / sum(dt) * 60
ave = sum([1.0 / dti * 60 for dti in dt]) / len(dt)
return ave
class BPMPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"max_strokes": 3,
"overwrite": True,
}
)
def commands(self):
cmd = ui.Subcommand(
"bpm",
help="determine bpm of a song by pressing a key to the rhythm",
)
cmd.func = self.command
return [cmd]
def command(self, lib, opts, args):
write = ui.should_write()
self.get_bpm(lib.items(args), write)
def get_bpm(self, items, write=False):
overwrite = self.config["overwrite"].get(bool)
if len(items) > 1:
raise ValueError("Can only get bpm of one song at time")
item = items[0]
if item["bpm"]:
self._log.info("Found bpm {}", item["bpm"])
if not overwrite:
return
self._log.info(
"Press Enter {} times to the rhythm or Ctrl-D to exit",
self.config["max_strokes"].get(int),
)
new_bpm = bpm(self.config["max_strokes"].get(int))
item["bpm"] = int(new_bpm)
if write:
item.try_write()
item.store()
self._log.info("Added new bpm {}", item["bpm"])
beetbox-beets-c1877b7/beetsplug/bpsync.py 0000664 0000000 0000000 00000014544 15073551743 0020472 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2019, Rahul Ahuja.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Update library's tags using Beatport."""
from beets import autotag, library, ui, util
from beets.plugins import BeetsPlugin, apply_item_changes
from .beatport import BeatportPlugin
class BPSyncPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.beatport_plugin = BeatportPlugin()
self.beatport_plugin.setup()
def commands(self):
cmd = ui.Subcommand("bpsync", help="update metadata from Beatport")
cmd.parser.add_option(
"-p",
"--pretend",
action="store_true",
help="show all changes but do nothing",
)
cmd.parser.add_option(
"-m",
"--move",
action="store_true",
dest="move",
help="move files in the library directory",
)
cmd.parser.add_option(
"-M",
"--nomove",
action="store_false",
dest="move",
help="don't move files in library",
)
cmd.parser.add_option(
"-W",
"--nowrite",
action="store_false",
default=None,
dest="write",
help="don't write updated metadata to files",
)
cmd.parser.add_format_option()
cmd.func = self.func
return [cmd]
def func(self, lib, opts, args):
"""Command handler for the bpsync function."""
move = ui.should_move(opts.move)
pretend = opts.pretend
write = ui.should_write(opts.write)
self.singletons(lib, args, move, pretend, write)
self.albums(lib, args, move, pretend, write)
def singletons(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for items matched by
query.
"""
for item in lib.items(query + ["singleton:true"]):
if not item.mb_trackid:
self._log.info(
"Skipping singleton with no mb_trackid: {}", item
)
continue
if not self.is_beatport_track(item):
self._log.info(
"Skipping non-{.beatport_plugin.data_source} singleton: {}",
self,
item,
)
continue
# Apply.
trackinfo = self.beatport_plugin.track_for_id(item.mb_trackid)
with lib.transaction():
autotag.apply_item_metadata(item, trackinfo)
apply_item_changes(lib, item, move, pretend, write)
@staticmethod
def is_beatport_track(item):
return (
item.get("data_source") == BeatportPlugin.data_source
and item.mb_trackid.isnumeric()
)
def get_album_tracks(self, album):
if not album.mb_albumid:
self._log.info("Skipping album with no mb_albumid: {}", album)
return False
if not album.mb_albumid.isnumeric():
self._log.info(
"Skipping album with invalid {.beatport_plugin.data_source} ID: {}",
self,
album,
)
return False
items = list(album.items())
if album.get("data_source") == self.beatport_plugin.data_source:
return items
if not all(self.is_beatport_track(item) for item in items):
self._log.info(
"Skipping non-{.beatport_plugin.data_source} release: {}",
self,
album,
)
return False
return items
def albums(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for albums matched by
query and their items.
"""
# Process matching albums.
for album in lib.albums(query):
# Do we have a valid Beatport album?
items = self.get_album_tracks(album)
if not items:
continue
# Get the Beatport album information.
albuminfo = self.beatport_plugin.album_for_id(album.mb_albumid)
if not albuminfo:
self._log.info(
"Release ID {0.mb_albumid} not found for album {0}", album
)
continue
beatport_trackid_to_trackinfo = {
track.track_id: track for track in albuminfo.tracks
}
library_trackid_to_item = {
int(item.mb_trackid): item for item in items
}
item_to_trackinfo = {
item: beatport_trackid_to_trackinfo[track_id]
for track_id, item in library_trackid_to_item.items()
}
self._log.info("applying changes to {}", album)
with lib.transaction():
autotag.apply_metadata(albuminfo, item_to_trackinfo)
changed = False
# Find any changed item to apply Beatport changes to album.
any_changed_item = items[0]
for item in items:
item_changed = ui.show_model_changes(item)
changed |= item_changed
if item_changed:
any_changed_item = item
apply_item_changes(lib, item, move, pretend, write)
if pretend or not changed:
continue
# Update album structure to reflect an item in it.
for key in library.Album.item_keys:
album[key] = any_changed_item[key]
album.store()
# Move album art (and any inconsistent items).
if move and lib.directory in util.ancestry(items[0].path):
self._log.debug("moving album {}", album)
album.move()
beetbox-beets-c1877b7/beetsplug/bucket.py 0000664 0000000 0000000 00000017265 15073551743 0020454 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides the %bucket{} function for path formatting."""
import re
import string
from datetime import datetime
from itertools import tee
from beets import plugins, ui
ASCII_DIGITS = string.digits + string.ascii_lowercase
class BucketError(Exception):
pass
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def span_from_str(span_str):
"""Build a span dict from the span string representation."""
def normalize_year(d, yearfrom):
"""Convert string to a 4 digits year"""
if yearfrom < 100:
raise BucketError(f"{yearfrom} must be expressed on 4 digits")
# if two digits only, pick closest year that ends by these two
# digits starting from yearfrom
if d < 100:
if (d % 100) < (yearfrom % 100):
d = (yearfrom - yearfrom % 100) + 100 + d
else:
d = (yearfrom - yearfrom % 100) + d
return d
years = [int(x) for x in re.findall(r"\d+", span_str)]
if not years:
raise ui.UserError(
f"invalid range defined for year bucket {span_str!r}: no year found"
)
try:
years = [normalize_year(x, years[0]) for x in years]
except BucketError as exc:
raise ui.UserError(
f"invalid range defined for year bucket {span_str!r}: {exc}"
)
res = {"from": years[0], "str": span_str}
if len(years) > 1:
res["to"] = years[-1]
return res
def complete_year_spans(spans):
"""Set the `to` value of spans if empty and sort them chronologically."""
spans.sort(key=lambda x: x["from"])
for x, y in pairwise(spans):
if "to" not in x:
x["to"] = y["from"] - 1
if spans and "to" not in spans[-1]:
spans[-1]["to"] = datetime.now().year
def extend_year_spans(spans, spanlen, start=1900, end=2014):
"""Add new spans to given spans list so that every year of [start,end]
belongs to a span.
"""
extended_spans = spans[:]
for x, y in pairwise(spans):
# if a gap between two spans, fill the gap with as much spans of
# spanlen length as necessary
for span_from in range(x["to"] + 1, y["from"], spanlen):
extended_spans.append({"from": span_from})
# Create spans prior to declared ones
for span_from in range(spans[0]["from"] - spanlen, start, -spanlen):
extended_spans.append({"from": span_from})
# Create spans after the declared ones
for span_from in range(spans[-1]["to"] + 1, end, spanlen):
extended_spans.append({"from": span_from})
complete_year_spans(extended_spans)
return extended_spans
def build_year_spans(year_spans_str):
"""Build a chronologically ordered list of spans dict from unordered spans
stringlist.
"""
spans = []
for elem in year_spans_str:
spans.append(span_from_str(elem))
complete_year_spans(spans)
return spans
def str2fmt(s):
"""Deduces formatting syntax from a span string."""
regex = re.compile(
r"(?P\D*)(?P\d+)(?P\D*)"
r"(?P\d*)(?P\D*)"
)
m = re.match(regex, s)
res = {
"fromnchars": len(m.group("fromyear")),
"tonchars": len(m.group("toyear")),
}
res["fmt"] = (
f"{m['bef']}{{}}{m['sep']}{'{}' if res['tonchars'] else ''}{m['after']}"
)
return res
def format_span(fmt, yearfrom, yearto, fromnchars, tonchars):
"""Return a span string representation."""
args = [str(yearfrom)[-fromnchars:]]
if tonchars:
args.append(str(yearto)[-tonchars:])
return fmt.format(*args)
def extract_modes(spans):
"""Extract the most common spans lengths and representation formats"""
rangelen = sorted([x["to"] - x["from"] + 1 for x in spans])
deflen = sorted(rangelen, key=rangelen.count)[-1]
reprs = [str2fmt(x["str"]) for x in spans]
deffmt = sorted(reprs, key=reprs.count)[-1]
return deflen, deffmt
def build_alpha_spans(alpha_spans_str, alpha_regexs):
"""Extract alphanumerics from string and return sorted list of chars
[from...to]
"""
spans = []
for elem in alpha_spans_str:
if elem in alpha_regexs:
spans.append(re.compile(alpha_regexs[elem]))
else:
bucket = sorted([x for x in elem.lower() if x.isalnum()])
if bucket:
begin_index = ASCII_DIGITS.index(bucket[0])
end_index = ASCII_DIGITS.index(bucket[-1])
else:
raise ui.UserError(
"invalid range defined for alpha bucket "
f"'{elem}': no alphanumeric character found"
)
spans.append(
re.compile(
rf"^[{ASCII_DIGITS[begin_index : end_index + 1]}]",
re.IGNORECASE,
)
)
return spans
class BucketPlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self.template_funcs["bucket"] = self._tmpl_bucket
self.config.add(
{
"bucket_year": [],
"bucket_alpha": [],
"bucket_alpha_regex": {},
"extrapolate": False,
}
)
self.setup()
def setup(self):
"""Setup plugin from config options"""
self.year_spans = build_year_spans(self.config["bucket_year"].get())
if self.year_spans and self.config["extrapolate"]:
[self.ys_len_mode, self.ys_repr_mode] = extract_modes(
self.year_spans
)
self.year_spans = extend_year_spans(
self.year_spans, self.ys_len_mode
)
self.alpha_spans = build_alpha_spans(
self.config["bucket_alpha"].get(),
self.config["bucket_alpha_regex"].get(),
)
def find_bucket_year(self, year):
"""Return bucket that matches given year or return the year
if no matching bucket.
"""
for ys in self.year_spans:
if ys["from"] <= int(year) <= ys["to"]:
if "str" in ys:
return ys["str"]
else:
return format_span(
self.ys_repr_mode["fmt"],
ys["from"],
ys["to"],
self.ys_repr_mode["fromnchars"],
self.ys_repr_mode["tonchars"],
)
return year
def find_bucket_alpha(self, s):
"""Return alpha-range bucket that matches given string or return the
string initial if no matching bucket.
"""
for i, span in enumerate(self.alpha_spans):
if span.match(s):
return self.config["bucket_alpha"].get()[i]
return s[0].upper()
def _tmpl_bucket(self, text, field=None):
if not field and len(text) == 4 and text.isdigit():
field = "year"
if field == "year":
func = self.find_bucket_year
else:
func = self.find_bucket_alpha
return func(text)
beetbox-beets-c1877b7/beetsplug/chroma.py 0000664 0000000 0000000 00000027702 15073551743 0020445 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Chromaprint/Acoustid acoustic fingerprinting support to the
autotagger. Requires the pyacoustid library.
"""
import re
from collections import defaultdict
from functools import cached_property, partial
from typing import Iterable
import acoustid
import confuse
from beets import config, ui, util
from beets.autotag.distance import Distance
from beets.autotag.hooks import TrackInfo
from beets.metadata_plugins import MetadataSourcePlugin
from beetsplug.musicbrainz import MusicBrainzPlugin
API_KEY = "1vOwZtEn"
SCORE_THRESH = 0.5
TRACK_ID_WEIGHT = 10.0
COMMON_REL_THRESH = 0.6 # How many tracks must have an album in common?
MAX_RECORDINGS = 5
MAX_RELEASES = 5
# Stores the Acoustid match information for each track. This is
# populated when an import task begins and then used when searching for
# candidates. It maps audio file paths to (recording_ids, release_ids)
# pairs. If a given path is not present in the mapping, then no match
# was found.
_matches = {}
# Stores the fingerprint and Acoustid ID for each track. This is stored
# as metadata for each track for later use but is not relevant for
# autotagging.
_fingerprints = {}
_acoustids = {}
def prefix(it, count):
"""Truncate an iterable to at most `count` items."""
for i, v in enumerate(it):
if i >= count:
break
yield v
def releases_key(release, countries, original_year):
"""Used as a key to sort releases by date then preferred country"""
date = release.get("date")
if date and original_year:
year = date.get("year", 9999)
month = date.get("month", 99)
day = date.get("day", 99)
else:
year = 9999
month = 99
day = 99
# Uses index of preferred countries to sort
country_key = 99
if release.get("country"):
for i, country in enumerate(countries):
if country.match(release["country"]):
country_key = i
break
return (year, month, day, country_key)
def acoustid_match(log, path):
"""Gets metadata for a file from Acoustid and populates the
_matches, _fingerprints, and _acoustids dictionaries accordingly.
"""
try:
duration, fp = acoustid.fingerprint_file(util.syspath(path))
except acoustid.FingerprintGenerationError as exc:
log.error(
"fingerprinting of {} failed: {}",
util.displayable_path(repr(path)),
exc,
)
return None
fp = fp.decode()
_fingerprints[path] = fp
try:
res = acoustid.lookup(
API_KEY, fp, duration, meta="recordings releases", timeout=10
)
except acoustid.AcoustidError as exc:
log.debug(
"fingerprint matching {} failed: {}",
util.displayable_path(repr(path)),
exc,
)
return None
log.debug("chroma: fingerprinted {}", util.displayable_path(repr(path)))
# Ensure the response is usable and parse it.
if res["status"] != "ok" or not res.get("results"):
log.debug("no match found")
return None
result = res["results"][0] # Best match.
if result["score"] < SCORE_THRESH:
log.debug("no results above threshold")
return None
_acoustids[path] = result["id"]
# Get recording and releases from the result
if not result.get("recordings"):
log.debug("no recordings found")
return None
recording_ids = []
releases = []
for recording in result["recordings"]:
recording_ids.append(recording["id"])
if "releases" in recording:
releases.extend(recording["releases"])
# The releases list is essentially in random order from the Acoustid lookup
# so we optionally sort it using the match.preferred configuration options.
# 'original_year' to sort the earliest first and
# 'countries' to then sort preferred countries first.
country_patterns = config["match"]["preferred"]["countries"].as_str_seq()
countries = [re.compile(pat, re.I) for pat in country_patterns]
original_year = config["match"]["preferred"]["original_year"]
releases.sort(
key=partial(
releases_key, countries=countries, original_year=original_year
)
)
release_ids = [rel["id"] for rel in releases]
log.debug(
"matched recordings {} on releases {}", recording_ids, release_ids
)
_matches[path] = recording_ids, release_ids
# Plugin structure and autotagging logic.
def _all_releases(items):
"""Given an iterable of Items, determines (according to Acoustid)
which releases the items have in common. Generates release IDs.
"""
# Count the number of "hits" for each release.
relcounts = defaultdict(int)
for item in items:
if item.path not in _matches:
continue
_, release_ids = _matches[item.path]
for release_id in release_ids:
relcounts[release_id] += 1
for release_id, count in relcounts.items():
if float(count) / len(items) > COMMON_REL_THRESH:
yield release_id
class AcoustidPlugin(MetadataSourcePlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"auto": True,
}
)
config["acoustid"]["apikey"].redact = True
if self.config["auto"]:
self.register_listener("import_task_start", self.fingerprint_task)
self.register_listener("import_task_apply", apply_acoustid_metadata)
@cached_property
def mb(self) -> MusicBrainzPlugin:
return MusicBrainzPlugin()
def fingerprint_task(self, task, session):
return fingerprint_task(self._log, task, session)
def track_distance(self, item, info):
dist = Distance()
if item.path not in _matches or not info.track_id:
# Match failed or no track ID.
return dist
recording_ids, _ = _matches[item.path]
dist.add_expr("track_id", info.track_id not in recording_ids)
return dist
def candidates(self, items, artist, album, va_likely):
albums = []
for relid in prefix(_all_releases(items), MAX_RELEASES):
album = self.mb.album_for_id(relid)
if album:
albums.append(album)
self._log.debug("acoustid album candidates: {}", len(albums))
return albums
def item_candidates(self, item, artist, title) -> Iterable[TrackInfo]:
if item.path not in _matches:
return []
recording_ids, _ = _matches[item.path]
tracks = []
for recording_id in prefix(recording_ids, MAX_RECORDINGS):
track = self.mb.track_for_id(recording_id)
if track:
tracks.append(track)
self._log.debug("acoustid item candidates: {}", len(tracks))
return tracks
def album_for_id(self, *args, **kwargs):
# Lookup by fingerprint ID does not make too much sense.
return None
def track_for_id(self, *args, **kwargs):
# Lookup by fingerprint ID does not make too much sense.
return None
def commands(self):
submit_cmd = ui.Subcommand(
"submit", help="submit Acoustid fingerprints"
)
def submit_cmd_func(lib, opts, args):
try:
apikey = config["acoustid"]["apikey"].as_str()
except confuse.NotFoundError:
raise ui.UserError("no Acoustid user API key provided")
submit_items(self._log, apikey, lib.items(args))
submit_cmd.func = submit_cmd_func
fingerprint_cmd = ui.Subcommand(
"fingerprint", help="generate fingerprints for items without them"
)
def fingerprint_cmd_func(lib, opts, args):
for item in lib.items(args):
fingerprint_item(self._log, item, write=ui.should_write())
fingerprint_cmd.func = fingerprint_cmd_func
return [submit_cmd, fingerprint_cmd]
# Hooks into import process.
def fingerprint_task(log, task, session):
"""Fingerprint each item in the task for later use during the
autotagging candidate search.
"""
items = task.items if task.is_album else [task.item]
for item in items:
acoustid_match(log, item.path)
def apply_acoustid_metadata(task, session):
"""Apply Acoustid metadata (fingerprint and ID) to the task's items."""
for item in task.imported_items():
if item.path in _fingerprints:
item.acoustid_fingerprint = _fingerprints[item.path]
if item.path in _acoustids:
item.acoustid_id = _acoustids[item.path]
# UI commands.
def submit_items(log, userkey, items, chunksize=64):
"""Submit fingerprints for the items to the Acoustid server."""
data = [] # The running list of dictionaries to submit.
def submit_chunk():
"""Submit the current accumulated fingerprint data."""
log.info("submitting {} fingerprints", len(data))
try:
acoustid.submit(API_KEY, userkey, data, timeout=10)
except acoustid.AcoustidError as exc:
log.warning("acoustid submission error: {}", exc)
del data[:]
for item in items:
fp = fingerprint_item(log, item, write=ui.should_write())
# Construct a submission dictionary for this item.
item_data = {
"duration": int(item.length),
"fingerprint": fp,
}
if item.mb_trackid:
item_data["mbid"] = item.mb_trackid
log.debug("submitting MBID")
else:
item_data.update(
{
"track": item.title,
"artist": item.artist,
"album": item.album,
"albumartist": item.albumartist,
"year": item.year,
"trackno": item.track,
"discno": item.disc,
}
)
log.debug("submitting textual metadata")
data.append(item_data)
# If we have enough data, submit a chunk.
if len(data) >= chunksize:
submit_chunk()
# Submit remaining data in a final chunk.
if data:
submit_chunk()
def fingerprint_item(log, item, write=False):
"""Get the fingerprint for an Item. If the item already has a
fingerprint, it is not regenerated. If fingerprint generation fails,
return None. If the items are associated with a library, they are
saved to the database. If `write` is set, then the new fingerprints
are also written to files' metadata.
"""
# Get a fingerprint and length for this track.
if not item.length:
log.info("{.filepath}: no duration available", item)
elif item.acoustid_fingerprint:
if write:
log.info("{.filepath}: fingerprint exists, skipping", item)
else:
log.info("{.filepath}: using existing fingerprint", item)
return item.acoustid_fingerprint
else:
log.info("{.filepath}: fingerprinting", item)
try:
_, fp = acoustid.fingerprint_file(util.syspath(item.path))
item.acoustid_fingerprint = fp.decode()
if write:
log.info("{.filepath}: writing fingerprint", item)
item.try_write()
if item._db:
item.store()
return item.acoustid_fingerprint
except acoustid.FingerprintGenerationError as exc:
log.info("fingerprint generation failed: {}", exc)
beetbox-beets-c1877b7/beetsplug/convert.py 0000664 0000000 0000000 00000062413 15073551743 0020652 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Jakob Schnitzer.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Converts tracks or albums to external directory"""
import logging
import os
import shlex
import subprocess
import tempfile
import threading
from string import Template
import mediafile
from confuse import ConfigTypeError, Optional
from beets import config, plugins, ui, util
from beets.library import Item, parse_query_string
from beets.plugins import BeetsPlugin
from beets.util import par_map
from beets.util.artresizer import ArtResizer
from beets.util.m3u import M3UFile
from beetsplug._utils import art
_fs_lock = threading.Lock()
_temp_files = [] # Keep track of temporary transcoded files for deletion.
# Some convenient alternate names for formats.
ALIASES = {
"windows media": "wma",
"vorbis": "ogg",
}
LOSSLESS_FORMATS = ["ape", "flac", "alac", "wave", "aiff"]
def replace_ext(path, ext):
"""Return the path with its extension replaced by `ext`.
The new extension must not contain a leading dot.
"""
ext_dot = b"." + ext
return os.path.splitext(path)[0] + ext_dot
def get_format(fmt=None):
"""Return the command template and the extension from the config."""
if not fmt:
fmt = config["convert"]["format"].as_str().lower()
fmt = ALIASES.get(fmt, fmt)
try:
format_info = config["convert"]["formats"][fmt].get(dict)
command = format_info["command"]
extension = format_info.get("extension", fmt)
except KeyError:
raise ui.UserError(f'convert: format {fmt} needs the "command" field')
except ConfigTypeError:
command = config["convert"]["formats"][fmt].get(str)
extension = fmt
# Convenience and backwards-compatibility shortcuts.
keys = config["convert"].keys()
if "command" in keys:
command = config["convert"]["command"].as_str()
elif "opts" in keys:
# Undocumented option for backwards compatibility with < 1.3.1.
command = (
f"ffmpeg -i $source -y {config['convert']['opts'].as_str()} $dest"
)
if "extension" in keys:
extension = config["convert"]["extension"].as_str()
return (command.encode("utf-8"), extension.encode("utf-8"))
def in_no_convert(item: Item) -> bool:
no_convert_query = config["convert"]["no_convert"].as_str()
if no_convert_query:
query, _ = parse_query_string(no_convert_query, Item)
return query.match(item)
else:
return False
def should_transcode(item, fmt):
"""Determine whether the item should be transcoded as part of
conversion (i.e., its bitrate is high or it has the wrong format).
"""
if in_no_convert(item) or (
config["convert"]["never_convert_lossy_files"]
and item.format.lower() not in LOSSLESS_FORMATS
):
return False
maxbr = config["convert"]["max_bitrate"].get(Optional(int))
if maxbr is not None and item.bitrate >= 1000 * maxbr:
return True
return fmt.lower() != item.format.lower()
class ConvertPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"dest": None,
"pretend": False,
"link": False,
"hardlink": False,
"threads": os.cpu_count(),
"format": "mp3",
"id3v23": "inherit",
"write_metadata": True,
"formats": {
"aac": {
"command": (
"ffmpeg -i $source -y -vn -acodec aac -aq 1 $dest"
),
"extension": "m4a",
},
"alac": {
"command": (
"ffmpeg -i $source -y -vn -acodec alac $dest"
),
"extension": "m4a",
},
"flac": "ffmpeg -i $source -y -vn -acodec flac $dest",
"mp3": "ffmpeg -i $source -y -vn -aq 2 $dest",
"opus": (
"ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest"
),
"ogg": (
"ffmpeg -i $source -y -vn -acodec libvorbis -aq 3 $dest"
),
"wma": "ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest",
},
"max_bitrate": None,
"auto": False,
"auto_keep": False,
"tmpdir": None,
"quiet": False,
"embed": True,
"paths": {},
"no_convert": "",
"never_convert_lossy_files": False,
"copy_album_art": False,
"album_art_maxwidth": 0,
"delete_originals": False,
"playlist": None,
}
)
self.early_import_stages = [self.auto_convert, self.auto_convert_keep]
self.register_listener("import_task_files", self._cleanup)
def commands(self):
cmd = ui.Subcommand("convert", help="convert to external location")
cmd.parser.add_option(
"-p",
"--pretend",
action="store_true",
help="show actions but do nothing",
)
cmd.parser.add_option(
"-t",
"--threads",
action="store",
type="int",
help=(
"change the number of threads, defaults to maximum available"
" processors"
),
)
cmd.parser.add_option(
"-k",
"--keep-new",
action="store_true",
dest="keep_new",
help="keep only the converted and move the old files",
)
cmd.parser.add_option(
"-d", "--dest", action="store", help="set the destination directory"
)
cmd.parser.add_option(
"-f",
"--format",
action="store",
dest="format",
help="set the target format of the tracks",
)
cmd.parser.add_option(
"-y",
"--yes",
action="store_true",
dest="yes",
help="do not ask for confirmation",
)
cmd.parser.add_option(
"-l",
"--link",
action="store_true",
dest="link",
help="symlink files that do not need transcoding.",
)
cmd.parser.add_option(
"-H",
"--hardlink",
action="store_true",
dest="hardlink",
help=(
"hardlink files that do not need transcoding. Overrides --link."
),
)
cmd.parser.add_option(
"-m",
"--playlist",
action="store",
help="""create an m3u8 playlist file containing
the converted files. The playlist file will be
saved below the destination directory, thus
PLAYLIST could be a file name or a relative path.
To ensure a working playlist when transferred to
a different computer, or opened from an external
drive, relative paths pointing to media files
will be used.""",
)
cmd.parser.add_album_option()
cmd.func = self.convert_func
return [cmd]
def auto_convert(self, config, task):
if self.config["auto"]:
par_map(
lambda item: self.convert_on_import(config.lib, item),
task.imported_items(),
)
def auto_convert_keep(self, config, task):
if self.config["auto_keep"]:
empty_opts = self.commands()[0].parser.get_default_values()
(
dest,
threads,
path_formats,
fmt,
pretend,
hardlink,
link,
playlist,
) = self._get_opts_and_config(empty_opts)
items = task.imported_items()
self._parallel_convert(
dest,
False,
path_formats,
fmt,
pretend,
link,
hardlink,
threads,
items,
)
# Utilities converted from functions to methods on logging overhaul
def encode(self, command, source, dest, pretend=False):
"""Encode `source` to `dest` using command template `command`.
Raises `subprocess.CalledProcessError` if the command exited with a
non-zero status code.
"""
# The paths and arguments must be bytes.
assert isinstance(command, bytes)
assert isinstance(source, bytes)
assert isinstance(dest, bytes)
quiet = self.config["quiet"].get(bool)
if not quiet and not pretend:
self._log.info("Encoding {}", util.displayable_path(source))
command = os.fsdecode(command)
source = os.fsdecode(source)
dest = os.fsdecode(dest)
# Substitute $source and $dest in the argument list.
args = shlex.split(command)
encode_cmd = []
for i, arg in enumerate(args):
args[i] = Template(arg).safe_substitute(
{
"source": source,
"dest": dest,
}
)
encode_cmd.append(os.fsdecode(args[i]))
if pretend:
self._log.info("{}", " ".join(args))
return
try:
util.command_output(encode_cmd)
except subprocess.CalledProcessError as exc:
# Something went wrong (probably Ctrl+C), remove temporary files
self._log.info(
"Encoding {} failed. Cleaning up...",
util.displayable_path(source),
)
self._log.debug(
"Command {0} exited with status {1.returncode}: {1.output}",
args,
exc,
)
util.remove(dest)
util.prune_dirs(os.path.dirname(dest))
raise
except OSError as exc:
raise ui.UserError(
f"convert: couldn't invoke {' '.join(args)!r}: {exc}"
)
if not quiet and not pretend:
self._log.info(
"Finished encoding {}", util.displayable_path(source)
)
def convert_item(
self,
dest_dir,
keep_new,
path_formats,
fmt,
pretend=False,
link=False,
hardlink=False,
):
"""A pipeline thread that converts `Item` objects from a
library.
"""
command, ext = get_format(fmt)
item, original, converted = None, None, None
while True:
item = yield (item, original, converted)
dest = item.destination(basedir=dest_dir, path_formats=path_formats)
# Ensure that desired item is readable before processing it. Needed
# to avoid any side-effect of the conversion (linking, keep_new,
# refresh) if we already know that it will fail.
try:
mediafile.MediaFile(util.syspath(item.path))
except mediafile.UnreadableFileError as exc:
self._log.error("Could not open file to convert: {}", exc)
continue
# When keeping the new file in the library, we first move the
# current (pristine) file to the destination. We'll then copy it
# back to its old path or transcode it to a new path.
if keep_new:
original = dest
converted = item.path
if should_transcode(item, fmt):
converted = replace_ext(converted, ext)
else:
original = item.path
if should_transcode(item, fmt):
dest = replace_ext(dest, ext)
converted = dest
# Ensure that only one thread tries to create directories at a
# time. (The existence check is not atomic with the directory
# creation inside this function.)
if not pretend:
with _fs_lock:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info(
"Skipping {.filepath} (target file exists)", item
)
continue
if keep_new:
if pretend:
self._log.info(
"mv {.filepath} {}",
item,
util.displayable_path(original),
)
else:
self._log.info(
"Moving to {}", util.displayable_path(original)
)
util.move(item.path, original)
if should_transcode(item, fmt):
linked = False
try:
self.encode(command, original, converted, pretend)
except subprocess.CalledProcessError:
continue
else:
linked = link or hardlink
if pretend:
msg = "ln" if hardlink else ("ln -s" if link else "cp")
self._log.info(
"{} {} {}",
msg,
util.displayable_path(original),
util.displayable_path(converted),
)
else:
# No transcoding necessary.
msg = (
"Hardlinking"
if hardlink
else ("Linking" if link else "Copying")
)
self._log.info("{} {.filepath}", msg, item)
if hardlink:
util.hardlink(original, converted)
elif link:
util.link(original, converted)
else:
util.copy(original, converted)
if pretend:
continue
id3v23 = self.config["id3v23"].as_choice([True, False, "inherit"])
if id3v23 == "inherit":
id3v23 = None
# Write tags from the database to the file if requested
if self.config["write_metadata"].get(bool):
item.try_write(path=converted, id3v23=id3v23)
if keep_new:
# If we're keeping the transcoded file, read it again (after
# writing) to get new bitrate, duration, etc.
item.path = converted
item.read()
item.store() # Store new path and audio data.
if self.config["embed"] and not linked:
album = item._cached_album
if album and album.artpath:
maxwidth = self._get_art_resize(album.artpath)
self._log.debug(
"embedding album art from {.art_filepath}", album
)
art.embed_item(
self._log,
item,
album.artpath,
maxwidth,
itempath=converted,
id3v23=id3v23,
)
if keep_new:
plugins.send(
"after_convert", item=item, dest=dest, keepnew=True
)
else:
plugins.send(
"after_convert", item=item, dest=converted, keepnew=False
)
def copy_album_art(
self,
album,
dest_dir,
path_formats,
pretend=False,
link=False,
hardlink=False,
):
"""Copies or converts the associated cover art of the album. Album must
have at least one track.
"""
if not album or not album.artpath:
return
album_item = album.items().get()
# Album shouldn't be empty.
if not album_item:
return
# Get the destination of the first item (track) of the album, we use
# this function to format the path accordingly to path_formats.
dest = album_item.destination(
basedir=dest_dir, path_formats=path_formats
)
# Remove item from the path.
dest = os.path.join(*util.components(dest)[:-1])
dest = album.art_destination(album.artpath, item_dir=dest)
if album.artpath == dest:
return
if not pretend:
util.mkdirall(dest)
if os.path.exists(util.syspath(dest)):
self._log.info(
"Skipping {.art_filepath} (target file exists)", album
)
return
# Decide whether we need to resize the cover-art image.
maxwidth = self._get_art_resize(album.artpath)
# Either copy or resize (while copying) the image.
if maxwidth is not None:
self._log.info(
"Resizing cover art from {.art_filepath} to {}",
album,
util.displayable_path(dest),
)
if not pretend:
ArtResizer.shared.resize(maxwidth, album.artpath, dest)
else:
if pretend:
msg = "ln" if hardlink else ("ln -s" if link else "cp")
self._log.info(
"{} {.art_filepath} {}",
msg,
album,
util.displayable_path(dest),
)
else:
msg = (
"Hardlinking"
if hardlink
else ("Linking" if link else "Copying")
)
self._log.info(
"{} cover art from {.art_filepath} to {}",
msg,
album,
util.displayable_path(dest),
)
if hardlink:
util.hardlink(album.artpath, dest)
elif link:
util.link(album.artpath, dest)
else:
util.copy(album.artpath, dest)
def convert_func(self, lib, opts, args):
(
dest,
threads,
path_formats,
fmt,
pretend,
hardlink,
link,
playlist,
) = self._get_opts_and_config(opts)
if opts.album:
albums = lib.albums(args)
items = [i for a in albums for i in a.items()]
if not pretend:
for a in albums:
ui.print_(format(a, ""))
else:
items = list(lib.items(args))
if not pretend:
for i in items:
ui.print_(format(i, ""))
if not items:
self._log.error("Empty query result.")
return
if not (pretend or opts.yes or ui.input_yn("Convert? (Y/n)")):
return
if opts.album and self.config["copy_album_art"]:
for album in albums:
self.copy_album_art(
album, dest, path_formats, pretend, link, hardlink
)
self._parallel_convert(
dest,
opts.keep_new,
path_formats,
fmt,
pretend,
link,
hardlink,
threads,
items,
)
if playlist:
# Playlist paths are understood as relative to the dest directory.
pl_normpath = util.normpath(playlist)
pl_dir = os.path.dirname(pl_normpath)
self._log.info("Creating playlist file {}", pl_normpath)
# Generates a list of paths to media files, ensures the paths are
# relative to the playlist's location and translates the unicode
# strings we get from item.destination to bytes.
items_paths = [
os.path.relpath(
item.destination(basedir=dest, path_formats=path_formats),
pl_dir,
)
for item in items
]
if not pretend:
m3ufile = M3UFile(playlist)
m3ufile.set_contents(items_paths)
m3ufile.write()
def convert_on_import(self, lib, item):
"""Transcode a file automatically after it is imported into the
library.
"""
fmt = self.config["format"].as_str().lower()
if should_transcode(item, fmt):
command, ext = get_format()
# Create a temporary file for the conversion.
tmpdir = self.config["tmpdir"].get()
if tmpdir:
tmpdir = os.fsdecode(util.bytestring_path(tmpdir))
fd, dest = tempfile.mkstemp(f".{os.fsdecode(ext)}", dir=tmpdir)
os.close(fd)
dest = util.bytestring_path(dest)
_temp_files.append(dest) # Delete the transcode later.
# Convert.
try:
self.encode(command, item.path, dest)
except subprocess.CalledProcessError:
return
# Change the newly-imported database entry to point to the
# converted file.
source_path = item.path
item.path = dest
item.write()
item.read() # Load new audio information data.
item.store()
if self.config["delete_originals"]:
self._log.log(
logging.DEBUG if self.config["quiet"] else logging.INFO,
"Removing original file {}",
source_path,
)
util.remove(source_path, False)
def _get_art_resize(self, artpath):
"""For a given piece of album art, determine whether or not it needs
to be resized according to the user's settings. If so, returns the
new size. If not, returns None.
"""
newwidth = None
if self.config["album_art_maxwidth"]:
maxwidth = self.config["album_art_maxwidth"].get(int)
size = ArtResizer.shared.get_size(artpath)
self._log.debug("image size: {}", size)
if size:
if size[0] > maxwidth:
newwidth = maxwidth
else:
self._log.warning(
"Could not get size of image (please see "
"documentation for dependencies)."
)
return newwidth
def _cleanup(self, task, session):
for path in task.old_paths:
if path in _temp_files:
if os.path.isfile(util.syspath(path)):
util.remove(path)
_temp_files.remove(path)
def _get_opts_and_config(self, opts):
"""Returns parameters needed for convert function.
Get parameters from command line if available,
default to config if not available.
"""
dest = opts.dest or self.config["dest"].get()
if not dest:
raise ui.UserError("no convert destination set")
dest = util.bytestring_path(dest)
threads = opts.threads or self.config["threads"].get(int)
path_formats = ui.get_path_formats(self.config["paths"] or None)
fmt = opts.format or self.config["format"].as_str().lower()
playlist = opts.playlist or self.config["playlist"].get()
if playlist is not None:
playlist = os.path.join(dest, util.bytestring_path(playlist))
if opts.pretend is not None:
pretend = opts.pretend
else:
pretend = self.config["pretend"].get(bool)
if opts.hardlink is not None:
hardlink = opts.hardlink
link = False
elif opts.link is not None:
hardlink = False
link = opts.link
else:
hardlink = self.config["hardlink"].get(bool)
link = self.config["link"].get(bool)
return (
dest,
threads,
path_formats,
fmt,
pretend,
hardlink,
link,
playlist,
)
def _parallel_convert(
self,
dest,
keep_new,
path_formats,
fmt,
pretend,
link,
hardlink,
threads,
items,
):
"""Run the convert_item function for every items on as many thread as
defined in threads
"""
convert = [
self.convert_item(
dest, keep_new, path_formats, fmt, pretend, link, hardlink
)
for _ in range(threads)
]
pipe = util.pipeline.Pipeline([iter(items), convert])
pipe.run_parallel()
beetbox-beets-c1877b7/beetsplug/deezer.py 0000664 0000000 0000000 00000025277 15073551743 0020457 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2019, Rahul Ahuja.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Deezer release and track search support to the autotagger"""
from __future__ import annotations
import collections
import time
from typing import TYPE_CHECKING, Literal, Sequence
import requests
from beets import ui
from beets.autotag import AlbumInfo, TrackInfo
from beets.dbcore import types
from beets.metadata_plugins import (
IDResponse,
SearchApiMetadataSourcePlugin,
SearchFilter,
)
if TYPE_CHECKING:
from beets.library import Item, Library
from ._typing import JSONDict
class DeezerPlugin(SearchApiMetadataSourcePlugin[IDResponse]):
item_types = {
"deezer_track_rank": types.INTEGER,
"deezer_track_id": types.INTEGER,
"deezer_updated": types.DATE,
}
# Base URLs for the Deezer API
# Documentation: https://developers.deezer.com/api/
search_url = "https://api.deezer.com/search/"
album_url = "https://api.deezer.com/album/"
track_url = "https://api.deezer.com/track/"
def __init__(self) -> None:
super().__init__()
def commands(self):
"""Add beet UI commands to interact with Deezer."""
deezer_update_cmd = ui.Subcommand(
"deezerupdate", help=f"Update {self.data_source} rank"
)
def func(lib: Library, opts, args):
items = lib.items(args)
self.deezerupdate(list(items), ui.should_write())
deezer_update_cmd.func = func
return [deezer_update_cmd]
def album_for_id(self, album_id: str) -> AlbumInfo | None:
"""Fetch an album by its Deezer ID or URL."""
if not (deezer_id := self._extract_id(album_id)):
return None
album_url = f"{self.album_url}{deezer_id}"
if not (album_data := self.fetch_data(album_url)):
return None
contributors = album_data.get("contributors")
if contributors is not None:
artist, artist_id = self.get_artist(contributors)
else:
artist, artist_id = None, None
release_date = album_data["release_date"]
date_parts = [int(part) for part in release_date.split("-")]
num_date_parts = len(date_parts)
if num_date_parts == 3:
year, month, day = date_parts
elif num_date_parts == 2:
year, month = date_parts
day = None
elif num_date_parts == 1:
year = date_parts[0]
month = None
day = None
else:
raise ui.UserError(
f"Invalid `release_date` returned by {self.data_source} API: "
f"{release_date!r}"
)
tracks_obj = self.fetch_data(f"{self.album_url}{deezer_id}/tracks")
if tracks_obj is None:
return None
try:
tracks_data = tracks_obj["data"]
except KeyError:
self._log.debug("Error fetching album tracks for {}", deezer_id)
tracks_data = None
if not tracks_data:
return None
while "next" in tracks_obj:
tracks_obj = requests.get(
tracks_obj["next"],
timeout=10,
).json()
tracks_data.extend(tracks_obj["data"])
tracks = []
medium_totals: dict[int | None, int] = collections.defaultdict(int)
for i, track_data in enumerate(tracks_data, start=1):
track = self._get_track(track_data)
track.index = i
medium_totals[track.medium] += 1
tracks.append(track)
for track in tracks:
track.medium_total = medium_totals[track.medium]
return AlbumInfo(
album=album_data["title"],
album_id=deezer_id,
deezer_album_id=deezer_id,
artist=artist,
artist_credit=self.get_artist([album_data["artist"]])[0],
artist_id=artist_id,
tracks=tracks,
albumtype=album_data["record_type"],
va=(
len(album_data["contributors"]) == 1
and (artist or "").lower() == "various artists"
),
year=year,
month=month,
day=day,
label=album_data["label"],
mediums=max(filter(None, medium_totals.keys())),
data_source=self.data_source,
data_url=album_data["link"],
cover_art_url=album_data.get("cover_xl"),
)
def track_for_id(self, track_id: str) -> None | TrackInfo:
"""Fetch a track by its Deezer ID or URL and return a
TrackInfo object or None if the track is not found.
:param track_id: (Optional) Deezer ID or URL for the track. Either
``track_id`` or ``track_data`` must be provided.
"""
if not (deezer_id := self._extract_id(track_id)):
self._log.debug("Invalid Deezer track_id: {}", track_id)
return None
if not (track_data := self.fetch_data(f"{self.track_url}{deezer_id}")):
self._log.debug("Track not found: {}", track_id)
return None
track = self._get_track(track_data)
# Get album's tracks to set `track.index` (position on the entire
# release) and `track.medium_total` (total number of tracks on
# the track's disc).
if not (
album_tracks_obj := self.fetch_data(
f"{self.album_url}{track_data['album']['id']}/tracks"
)
):
return None
try:
album_tracks_data = album_tracks_obj["data"]
except KeyError:
self._log.debug(
"Error fetching album tracks for {}", track_data["album"]["id"]
)
return None
medium_total = 0
for i, track_data in enumerate(album_tracks_data, start=1):
if track_data["disk_number"] == track.medium:
medium_total += 1
if track_data["id"] == track.track_id:
track.index = i
track.medium_total = medium_total
return track
def _get_track(self, track_data: JSONDict) -> TrackInfo:
"""Convert a Deezer track object dict to a TrackInfo object.
:param track_data: Deezer Track object dict
"""
artist, artist_id = self.get_artist(
track_data.get("contributors", [track_data["artist"]])
)
return TrackInfo(
title=track_data["title"],
track_id=track_data["id"],
deezer_track_id=track_data["id"],
isrc=track_data.get("isrc"),
artist=artist,
artist_id=artist_id,
length=track_data["duration"],
index=track_data.get("track_position"),
medium=track_data.get("disk_number"),
deezer_track_rank=track_data.get("rank"),
medium_index=track_data.get("track_position"),
data_source=self.data_source,
data_url=track_data["link"],
deezer_updated=time.time(),
)
def _search_api(
self,
query_type: Literal[
"album",
"track",
"artist",
"history",
"playlist",
"podcast",
"radio",
"user",
],
filters: SearchFilter,
query_string: str = "",
) -> Sequence[IDResponse]:
"""Query the Deezer Search API for the specified ``query_string``, applying
the provided ``filters``.
:param filters: Field filters to apply.
:param query_string: Additional query to include in the search.
:return: JSON data for the class:`Response ` object or None
if no search results are returned.
"""
query = self._construct_search_query(
query_string=query_string, filters=filters
)
self._log.debug("Searching {.data_source} for '{}'", self, query)
try:
response = requests.get(
f"{self.search_url}{query_type}",
params={
"q": query,
"limit": self.config["search_limit"].get(),
},
timeout=10,
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
self._log.error(
"Error fetching data from {.data_source} API\n Error: {}",
self,
e,
)
return ()
response_data: Sequence[IDResponse] = response.json().get("data", [])
self._log.debug(
"Found {} result(s) from {.data_source} for '{}'",
len(response_data),
self,
query,
)
return response_data
def deezerupdate(self, items: Sequence[Item], write: bool):
"""Obtain rank information from Deezer."""
for index, item in enumerate(items, start=1):
self._log.info(
"Processing {}/{} tracks - {} ", index, len(items), item
)
try:
deezer_track_id = item.deezer_track_id
except AttributeError:
self._log.debug("No deezer_track_id present for: {}", item)
continue
try:
rank = self.fetch_data(
f"{self.track_url}{deezer_track_id}"
).get("rank")
self._log.debug(
"Deezer track: {} has {} rank", deezer_track_id, rank
)
except Exception as e:
self._log.debug("Invalid Deezer track_id: {}", e)
continue
item.deezer_track_rank = int(rank)
item.store()
item.deezer_updated = time.time()
if write:
item.try_write()
def fetch_data(self, url: str):
try:
response = requests.get(url, timeout=10)
response.raise_for_status()
data = response.json()
except requests.exceptions.RequestException as e:
self._log.error("Error fetching data from {}\n Error: {}", url, e)
return None
if "error" in data:
self._log.debug("Deezer API error: {}", data["error"]["message"])
return None
return data
beetbox-beets-c1877b7/beetsplug/discogs.py 0000664 0000000 0000000 00000076300 15073551743 0020625 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Discogs album search support to the autotagger. Requires the
python3-discogs-client library.
"""
from __future__ import annotations
import http.client
import json
import os
import re
import socket
import time
import traceback
from functools import cache
from string import ascii_lowercase
from typing import TYPE_CHECKING, Sequence, cast
import confuse
from discogs_client import Client, Master, Release
from discogs_client.exceptions import DiscogsAPIError
from requests.exceptions import ConnectionError
from typing_extensions import NotRequired, TypedDict
import beets
import beets.ui
from beets import config
from beets.autotag.distance import string_dist
from beets.autotag.hooks import AlbumInfo, TrackInfo
from beets.metadata_plugins import MetadataSourcePlugin
if TYPE_CHECKING:
from collections.abc import Callable, Iterable
from beets.library import Item
USER_AGENT = f"beets/{beets.__version__} +https://beets.io/"
API_KEY = "rAzVUQYRaoFjeBjyWuWZ"
API_SECRET = "plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy"
# Exceptions that discogs_client should really handle but does not.
CONNECTION_ERRORS = (
ConnectionError,
socket.error,
http.client.HTTPException,
ValueError, # JSON decoding raises a ValueError.
DiscogsAPIError,
)
TRACK_INDEX_RE = re.compile(
r"""
(.*?) # medium: everything before medium_index.
(\d*?) # medium_index: a number at the end of
# `position`, except if followed by a subtrack index.
# subtrack_index: can only be matched if medium
# or medium_index have been matched, and can be
(
(?<=\w)\.[\w]+ # a dot followed by a string (A.1, 2.A)
| (?<=\d)[A-Z]+ # a string that follows a number (1A, B2a)
)?
""",
re.VERBOSE,
)
DISAMBIGUATION_RE = re.compile(r" \(\d+\)")
class ReleaseFormat(TypedDict):
name: str
qty: int
descriptions: list[str] | None
class Artist(TypedDict):
name: str
anv: str
join: str
role: str
tracks: str
id: str
resource_url: str
class Track(TypedDict):
position: str
type_: str
title: str
duration: str
artists: list[Artist]
extraartists: NotRequired[list[Artist]]
class TrackWithSubtracks(Track):
sub_tracks: list[TrackWithSubtracks]
class IntermediateTrackInfo(TrackInfo):
"""Allows work with string mediums from
get_track_info"""
def __init__(
self,
medium_str: str | None,
**kwargs,
) -> None:
self.medium_str = medium_str
super().__init__(**kwargs)
class DiscogsPlugin(MetadataSourcePlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"apikey": API_KEY,
"apisecret": API_SECRET,
"tokenfile": "discogs_token.json",
"user_token": "",
"separator": ", ",
"index_tracks": False,
"featured_string": "Feat.",
"append_style_genre": False,
"strip_disambiguation": True,
"anv": {
"artist_credit": True,
"artist": False,
"album_artist": False,
},
}
)
self.config["apikey"].redact = True
self.config["apisecret"].redact = True
self.config["user_token"].redact = True
self.setup()
def setup(self, session=None) -> None:
"""Create the `discogs_client` field. Authenticate if necessary."""
c_key = self.config["apikey"].as_str()
c_secret = self.config["apisecret"].as_str()
# Try using a configured user token (bypassing OAuth login).
user_token = self.config["user_token"].as_str()
if user_token:
# The rate limit for authenticated users goes up to 60
# requests per minute.
self.discogs_client = Client(USER_AGENT, user_token=user_token)
return
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except OSError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata["token"]
secret = tokendata["secret"]
self.discogs_client = Client(USER_AGENT, c_key, c_secret, token, secret)
def reset_auth(self) -> None:
"""Delete token file & redo the auth steps."""
os.remove(self._tokenfile())
self.setup()
def _tokenfile(self) -> str:
"""Get the path to the JSON file for storing the OAuth token."""
return self.config["tokenfile"].get(confuse.Filename(in_app_dir=True))
def authenticate(self, c_key: str, c_secret: str) -> tuple[str, str]:
# Get the link for the OAuth page.
auth_client = Client(USER_AGENT, c_key, c_secret)
try:
_, _, url = auth_client.get_authorize_url()
except CONNECTION_ERRORS as e:
self._log.debug("connection error: {}", e)
raise beets.ui.UserError("communication with Discogs failed")
beets.ui.print_("To authenticate with Discogs, visit:")
beets.ui.print_(url)
# Ask for the code and validate it.
code = beets.ui.input_("Enter the code:")
try:
token, secret = auth_client.get_access_token(code)
except DiscogsAPIError:
raise beets.ui.UserError("Discogs authorization failed")
except CONNECTION_ERRORS as e:
self._log.debug("connection error: {}", e)
raise beets.ui.UserError("Discogs token request failed")
# Save the token for later use.
self._log.debug("Discogs token {}, secret {}", token, secret)
with open(self._tokenfile(), "w") as f:
json.dump({"token": token, "secret": secret}, f)
return token, secret
def candidates(
self, items: Sequence[Item], artist: str, album: str, va_likely: bool
) -> Iterable[AlbumInfo]:
return self.get_albums(f"{artist} {album}" if va_likely else album)
def get_track_from_album(
self, album_info: AlbumInfo, compare: Callable[[TrackInfo], float]
) -> TrackInfo | None:
"""Return the best matching track of the release."""
scores_and_tracks = [(compare(t), t) for t in album_info.tracks]
score, track_info = min(scores_and_tracks, key=lambda x: x[0])
if score > 0.3:
return None
track_info["artist"] = album_info.artist
track_info["artist_id"] = album_info.artist_id
track_info["album"] = album_info.album
return track_info
def item_candidates(
self, item: Item, artist: str, title: str
) -> Iterable[TrackInfo]:
albums = self.candidates([item], artist, title, False)
def compare_func(track_info: TrackInfo) -> float:
return string_dist(track_info.title, title)
tracks = (self.get_track_from_album(a, compare_func) for a in albums)
return list(filter(None, tracks))
def album_for_id(self, album_id: str) -> AlbumInfo | None:
"""Fetches an album by its Discogs ID and returns an AlbumInfo object
or None if the album is not found.
"""
self._log.debug("Searching for release {}", album_id)
discogs_id = self._extract_id(album_id)
if not discogs_id:
return None
result = Release(self.discogs_client, {"id": discogs_id})
# Try to obtain title to verify that we indeed have a valid Release
try:
getattr(result, "title")
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug(
"API Error: {} (query: {})",
e,
result.data["resource_url"],
)
if e.status_code == 401:
self.reset_auth()
return self.album_for_id(album_id)
return None
except CONNECTION_ERRORS:
self._log.debug("Connection error in album lookup", exc_info=True)
return None
return self.get_album_info(result)
def track_for_id(self, track_id: str) -> TrackInfo | None:
if album := self.album_for_id(track_id):
for track in album.tracks:
if track.track_id == track_id:
return track
return None
def get_albums(self, query: str) -> Iterable[AlbumInfo]:
"""Returns a list of AlbumInfo objects for a discogs search query."""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
query = re.sub(r"(?u)\W+", " ", query)
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r"(?i)\b(CD|disc|vinyl)\s*\d+", "", query)
try:
results = self.discogs_client.search(query, type="release")
results.per_page = self.config["search_limit"].get()
releases = results.page(1)
except CONNECTION_ERRORS:
self._log.debug(
"Communication error while searching for {0!r}",
query,
exc_info=True,
)
return []
return filter(None, map(self.get_album_info, releases))
@cache
def get_master_year(self, master_id: str) -> int | None:
"""Fetches a master release given its Discogs ID and returns its year
or None if the master release is not found.
"""
self._log.debug("Getting master release {}", master_id)
result = Master(self.discogs_client, {"id": master_id})
try:
return result.fetch("year")
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug(
"API Error: {} (query: {})",
e,
result.data["resource_url"],
)
if e.status_code == 401:
self.reset_auth()
return self.get_master_year(master_id)
return None
except CONNECTION_ERRORS:
self._log.debug(
"Connection error in master release lookup", exc_info=True
)
return None
@staticmethod
def get_media_and_albumtype(
formats: list[ReleaseFormat] | None,
) -> tuple[str | None, str | None]:
media = albumtype = None
if formats and (first_format := formats[0]):
if descriptions := first_format["descriptions"]:
albumtype = ", ".join(descriptions)
media = first_format["name"]
return media, albumtype
def get_artist_with_anv(
self, artists: list[Artist], use_anv: bool = False
) -> tuple[str, str | None]:
"""Iterates through a discogs result, fetching data
if the artist anv is to be used, maps that to the name.
Calls the parent class get_artist method."""
artist_list: list[dict[str | int, str]] = []
for artist_data in artists:
a: dict[str | int, str] = {
"name": artist_data["name"],
"id": artist_data["id"],
"join": artist_data.get("join", ""),
}
if use_anv and (anv := artist_data.get("anv", "")):
a["name"] = anv
artist_list.append(a)
artist, artist_id = self.get_artist(artist_list, join_key="join")
return self.strip_disambiguation(artist), artist_id
def get_album_info(self, result: Release) -> AlbumInfo | None:
"""Returns an AlbumInfo object for a discogs Release object."""
# Explicitly reload the `Release` fields, as they might not be yet
# present if the result is from a `discogs_client.search()`.
if not result.data.get("artists"):
try:
result.refresh()
except CONNECTION_ERRORS:
self._log.debug(
"Connection error in release lookup: {0}",
result,
)
return None
# Sanity check for required fields. The list of required fields is
# defined at Guideline 1.3.1.a, but in practice some releases might be
# lacking some of these fields. This function expects at least:
# `artists` (>0), `title`, `id`, `tracklist` (>0)
# https://www.discogs.com/help/doc/submission-guidelines-general-rules
if not all(
[
result.data.get(k)
for k in ["artists", "title", "id", "tracklist"]
]
):
self._log.warning("Release does not contain the required fields")
return None
artist_data = [a.data for a in result.artists]
album_artist, album_artist_id = self.get_artist_with_anv(artist_data)
album_artist_anv, _ = self.get_artist_with_anv(
artist_data, use_anv=True
)
artist_credit = album_artist_anv
album = re.sub(r" +", " ", result.title)
album_id = result.data["id"]
# Use `.data` to access the tracklist directly instead of the
# convenient `.tracklist` property, which will strip out useful artist
# information and leave us with skeleton `Artist` objects that will
# each make an API call just to get the same data back.
tracks = self.get_tracks(
result.data["tracklist"],
(album_artist, album_artist_anv, album_artist_id),
)
# Assign ANV to the proper fields for tagging
if not self.config["anv"]["artist_credit"]:
artist_credit = album_artist
if self.config["anv"]["album_artist"]:
album_artist = album_artist_anv
# Extract information for the optional AlbumInfo fields, if possible.
va = result.data["artists"][0].get("name", "").lower() == "various"
year = result.data.get("year")
mediums = [t.medium for t in tracks]
country = result.data.get("country")
data_url = result.data.get("uri")
style = self.format(result.data.get("styles"))
base_genre = self.format(result.data.get("genres"))
if self.config["append_style_genre"] and style:
genre = self.config["separator"].as_str().join([base_genre, style])
else:
genre = base_genre
discogs_albumid = self._extract_id(result.data.get("uri"))
# Extract information for the optional AlbumInfo fields that are
# contained on nested discogs fields.
media, albumtype = self.get_media_and_albumtype(
result.data.get("formats")
)
label = catalogno = labelid = None
if result.data.get("labels"):
label = self.strip_disambiguation(
result.data["labels"][0].get("name")
)
catalogno = result.data["labels"][0].get("catno")
labelid = result.data["labels"][0].get("id")
cover_art_url = self.select_cover_art(result)
# Additional cleanups
# (various artists name, catalog number, media, disambiguation).
if va:
va_name = config["va_name"].as_str()
album_artist = va_name
artist_credit = va_name
if catalogno == "none":
catalogno = None
# Explicitly set the `media` for the tracks, since it is expected by
# `autotag.apply_metadata`, and set `medium_total`.
for track in tracks:
track.media = media
track.medium_total = mediums.count(track.medium)
# Discogs does not have track IDs. Invent our own IDs as proposed
# in #2336.
track.track_id = f"{album_id}-{track.track_alt}"
track.data_url = data_url
track.data_source = "Discogs"
# Retrieve master release id (returns None if there isn't one).
master_id = result.data.get("master_id")
# Assume `original_year` is equal to `year` for releases without
# a master release, otherwise fetch the master release.
original_year = self.get_master_year(master_id) if master_id else year
return AlbumInfo(
album=album,
album_id=album_id,
artist=album_artist,
artist_credit=artist_credit,
artist_id=album_artist_id,
tracks=tracks,
albumtype=albumtype,
va=va,
year=year,
label=label,
mediums=len(set(mediums)),
releasegroup_id=master_id,
catalognum=catalogno,
country=country,
style=style,
genre=genre,
media=media,
original_year=original_year,
data_source=self.data_source,
data_url=data_url,
discogs_albumid=discogs_albumid,
discogs_labelid=labelid,
discogs_artistid=album_artist_id,
cover_art_url=cover_art_url,
)
def select_cover_art(self, result: Release) -> str | None:
"""Returns the best candidate image, if any, from a Discogs `Release` object."""
if result.data.get("images") and len(result.data.get("images")) > 0:
# The first image in this list appears to be the one displayed first
# on the release page - even if it is not flagged as `type: "primary"` - and
# so it is the best candidate for the cover art.
return result.data.get("images")[0].get("uri")
return None
def format(self, classification: Iterable[str]) -> str | None:
if classification:
return (
self.config["separator"].as_str().join(sorted(classification))
)
else:
return None
def _process_clean_tracklist(
self,
clean_tracklist: list[Track],
album_artist_data: tuple[str, str, str | None],
) -> tuple[list[TrackInfo], dict[int, str], int, list[str], list[str]]:
# Distinct works and intra-work divisions, as defined by index tracks.
tracks: list[TrackInfo] = []
index_tracks = {}
index = 0
divisions: list[str] = []
next_divisions: list[str] = []
for track in clean_tracklist:
# Only real tracks have `position`. Otherwise, it's an index track.
if track["position"]:
index += 1
if next_divisions:
# End of a block of index tracks: update the current
# divisions.
divisions += next_divisions
del next_divisions[:]
track_info = self.get_track_info(
track, index, divisions, album_artist_data
)
track_info.track_alt = track["position"]
tracks.append(track_info)
else:
next_divisions.append(track["title"])
# We expect new levels of division at the beginning of the
# tracklist (and possibly elsewhere).
try:
divisions.pop()
except IndexError:
pass
index_tracks[index + 1] = track["title"]
return tracks, index_tracks, index, divisions, next_divisions
def get_tracks(
self,
tracklist: list[Track],
album_artist_data: tuple[str, str, str | None],
) -> list[TrackInfo]:
"""Returns a list of TrackInfo objects for a discogs tracklist."""
try:
clean_tracklist: list[Track] = self.coalesce_tracks(
cast(list[TrackWithSubtracks], tracklist)
)
except Exception as exc:
# FIXME: this is an extra precaution for making sure there are no
# side effects after #2222. It should be removed after further
# testing.
self._log.debug("{}", traceback.format_exc())
self._log.error("uncaught exception in coalesce_tracks: {}", exc)
clean_tracklist = tracklist
processed = self._process_clean_tracklist(
clean_tracklist, album_artist_data
)
tracks, index_tracks, index, divisions, next_divisions = processed
# Fix up medium and medium_index for each track. Discogs position is
# unreliable, but tracks are in order.
medium = None
medium_count, index_count, side_count = 0, 0, 0
sides_per_medium = 1
# If a medium has two sides (ie. vinyl or cassette), each pair of
# consecutive sides should belong to the same medium.
if all([track.medium_str is not None for track in tracks]):
m = sorted({track.medium_str.lower() for track in tracks})
# If all track.medium are single consecutive letters, assume it is
# a 2-sided medium.
if "".join(m) in ascii_lowercase:
sides_per_medium = 2
for track in tracks:
# Handle special case where a different medium does not indicate a
# new disc, when there is no medium_index and the ordinal of medium
# is not sequential. For example, I, II, III, IV, V. Assume these
# are the track index, not the medium.
# side_count is the number of mediums or medium sides (in the case
# of two-sided mediums) that were seen before.
medium_is_index = (
track.medium_str
and not track.medium_index
and (
len(track.medium_str) != 1
or
# Not within standard incremental medium values (A, B, C, ...).
ord(track.medium_str) - 64 != side_count + 1
)
)
if not medium_is_index and medium != track.medium_str:
side_count += 1
if sides_per_medium == 2:
if side_count % sides_per_medium:
# Two-sided medium changed. Reset index_count.
index_count = 0
medium_count += 1
else:
# Medium changed. Reset index_count.
medium_count += 1
index_count = 0
medium = track.medium_str
index_count += 1
medium_count = 1 if medium_count == 0 else medium_count
track.medium, track.medium_index = medium_count, index_count
# Get `disctitle` from Discogs index tracks. Assume that an index track
# before the first track of each medium is a disc title.
for track in tracks:
if track.medium_index == 1:
if track.index in index_tracks:
disctitle = index_tracks[track.index]
else:
disctitle = None
track.disctitle = disctitle
return cast(list[TrackInfo], tracks)
def coalesce_tracks(
self, raw_tracklist: list[TrackWithSubtracks]
) -> list[Track]:
"""Pre-process a tracklist, merging subtracks into a single track. The
title for the merged track is the one from the previous index track,
if present; otherwise it is a combination of the subtracks titles.
"""
def add_merged_subtracks(
tracklist: list[TrackWithSubtracks],
subtracks: list[TrackWithSubtracks],
) -> None:
"""Modify `tracklist` in place, merging a list of `subtracks` into
a single track into `tracklist`."""
# Calculate position based on first subtrack, without subindex.
idx, medium_idx, sub_idx = self.get_track_index(
subtracks[0]["position"]
)
position = f"{idx or ''}{medium_idx or ''}"
if tracklist and not tracklist[-1]["position"]:
# Assume the previous index track contains the track title.
if sub_idx:
# "Convert" the track title to a real track, discarding the
# subtracks assuming they are logical divisions of a
# physical track (12.2.9 Subtracks).
tracklist[-1]["position"] = position
else:
# Promote the subtracks to real tracks, discarding the
# index track, assuming the subtracks are physical tracks.
index_track = tracklist.pop()
# Fix artists when they are specified on the index track.
if index_track.get("artists"):
for subtrack in subtracks:
if not subtrack.get("artists"):
subtrack["artists"] = index_track["artists"]
# Concatenate index with track title when index_tracks
# option is set
if self.config["index_tracks"]:
for subtrack in subtracks:
subtrack["title"] = (
f"{index_track['title']}: {subtrack['title']}"
)
tracklist.extend(subtracks)
else:
# Merge the subtracks, pick a title, and append the new track.
track = subtracks[0].copy()
track["title"] = " / ".join([t["title"] for t in subtracks])
tracklist.append(track)
# Pre-process the tracklist, trying to identify subtracks.
subtracks: list[TrackWithSubtracks] = []
tracklist: list[TrackWithSubtracks] = []
prev_subindex = ""
for track in raw_tracklist:
# Regular subtrack (track with subindex).
if track["position"]:
_, _, subindex = self.get_track_index(track["position"])
if subindex:
if subindex.rjust(len(raw_tracklist)) > prev_subindex:
# Subtrack still part of the current main track.
subtracks.append(track)
else:
# Subtrack part of a new group (..., 1.3, *2.1*, ...).
add_merged_subtracks(tracklist, subtracks)
subtracks = [track]
prev_subindex = subindex.rjust(len(raw_tracklist))
continue
# Index track with nested sub_tracks.
if not track["position"] and "sub_tracks" in track:
# Append the index track, assuming it contains the track title.
tracklist.append(track)
add_merged_subtracks(tracklist, track["sub_tracks"])
continue
# Regular track or index track without nested sub_tracks.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
subtracks = []
prev_subindex = ""
tracklist.append(track)
# Merge and add the remaining subtracks, if any.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
return cast(list[Track], tracklist)
def strip_disambiguation(self, text: str) -> str:
"""Removes discogs specific disambiguations from a string.
Turns 'Label Name (5)' to 'Label Name' or 'Artist (1) & Another Artist (2)'
to 'Artist & Another Artist'. Does nothing if strip_disambiguation is False."""
if not self.config["strip_disambiguation"]:
return text
return DISAMBIGUATION_RE.sub("", text)
def get_track_info(
self,
track: Track,
index: int,
divisions: list[str],
album_artist_data: tuple[str, str, str | None],
) -> IntermediateTrackInfo:
"""Returns a TrackInfo object for a discogs track."""
artist, artist_anv, artist_id = album_artist_data
artist_credit = artist_anv
if not self.config["anv"]["artist_credit"]:
artist_credit = artist
if self.config["anv"]["artist"]:
artist = artist_anv
title = track["title"]
if self.config["index_tracks"]:
prefix = ", ".join(divisions)
if prefix:
title = f"{prefix}: {title}"
track_id = None
medium, medium_index, _ = self.get_track_index(track["position"])
# If artists are found on the track, we will use those instead
if artists := track.get("artists", []):
artist, artist_id = self.get_artist_with_anv(
artists, self.config["anv"]["artist"]
)
artist_credit, _ = self.get_artist_with_anv(
artists, self.config["anv"]["artist_credit"]
)
length = self.get_track_length(track["duration"])
# Add featured artists
if extraartists := track.get("extraartists", []):
featured_list = [
artist
for artist in extraartists
if "Featuring" in artist["role"]
]
featured, _ = self.get_artist_with_anv(
featured_list, self.config["anv"]["artist"]
)
featured_credit, _ = self.get_artist_with_anv(
featured_list, self.config["anv"]["artist_credit"]
)
if featured:
artist += f" {self.config['featured_string']} {featured}"
artist_credit += (
f" {self.config['featured_string']} {featured_credit}"
)
return IntermediateTrackInfo(
title=title,
track_id=track_id,
artist_credit=artist_credit,
artist=artist,
artist_id=artist_id,
length=length,
index=index,
medium_str=medium,
medium_index=medium_index,
)
@staticmethod
def get_track_index(
position: str,
) -> tuple[str | None, str | None, str | None]:
"""Returns the medium, medium index and subtrack index for a discogs
track position."""
# Match the standard Discogs positions (12.2.9), which can have several
# forms (1, 1-1, A1, A1.1, A1a, ...).
medium = index = subindex = None
if match := TRACK_INDEX_RE.fullmatch(position.upper()):
medium, index, subindex = match.groups()
if subindex and subindex.startswith("."):
subindex = subindex[1:]
return medium or None, index or None, subindex or None
def get_track_length(self, duration: str) -> int | None:
"""Returns the track length in seconds for a discogs duration."""
try:
length = time.strptime(duration, "%M:%S")
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
beetbox-beets-c1877b7/beetsplug/duplicates.py 0000664 0000000 0000000 00000032654 15073551743 0021333 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Pedro Silva.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""List duplicate tracks or albums."""
import os
import shlex
from beets.library import Album, Item
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, UserError, print_
from beets.util import (
MoveOperation,
bytestring_path,
command_output,
displayable_path,
subprocess,
)
PLUGIN = "duplicates"
class DuplicatesPlugin(BeetsPlugin):
"""List duplicate tracks or albums"""
def __init__(self):
super().__init__()
self.config.add(
{
"album": False,
"checksum": "",
"copy": "",
"count": False,
"delete": False,
"format": "",
"full": False,
"keys": [],
"merge": False,
"move": "",
"path": False,
"tiebreak": {},
"strict": False,
"tag": "",
"remove": False,
}
)
self._command = Subcommand("duplicates", help=__doc__, aliases=["dup"])
self._command.parser.add_option(
"-c",
"--count",
dest="count",
action="store_true",
help="show duplicate counts",
)
self._command.parser.add_option(
"-C",
"--checksum",
dest="checksum",
action="store",
metavar="PROG",
help="report duplicates based on arbitrary command",
)
self._command.parser.add_option(
"-d",
"--delete",
dest="delete",
action="store_true",
help="delete items from library and disk",
)
self._command.parser.add_option(
"-F",
"--full",
dest="full",
action="store_true",
help="show all versions of duplicate tracks or albums",
)
self._command.parser.add_option(
"-s",
"--strict",
dest="strict",
action="store_true",
help="report duplicates only if all attributes are set",
)
self._command.parser.add_option(
"-k",
"--key",
dest="keys",
action="append",
metavar="KEY",
help="report duplicates based on keys (use multiple times)",
)
self._command.parser.add_option(
"-M",
"--merge",
dest="merge",
action="store_true",
help="merge duplicate items",
)
self._command.parser.add_option(
"-m",
"--move",
dest="move",
action="store",
metavar="DEST",
help="move items to dest",
)
self._command.parser.add_option(
"-o",
"--copy",
dest="copy",
action="store",
metavar="DEST",
help="copy items to dest",
)
self._command.parser.add_option(
"-t",
"--tag",
dest="tag",
action="store",
help="tag matched items with 'k=v' attribute",
)
self._command.parser.add_option(
"-r",
"--remove",
dest="remove",
action="store_true",
help="remove items from library",
)
self._command.parser.add_all_common_options()
def commands(self):
def _dup(lib, opts, args):
self.config.set_args(opts)
album = self.config["album"].get(bool)
checksum = self.config["checksum"].get(str)
copy = bytestring_path(self.config["copy"].as_str())
count = self.config["count"].get(bool)
delete = self.config["delete"].get(bool)
remove = self.config["remove"].get(bool)
fmt_tmpl = self.config["format"].get(str)
full = self.config["full"].get(bool)
keys = self.config["keys"].as_str_seq()
merge = self.config["merge"].get(bool)
move = bytestring_path(self.config["move"].as_str())
path = self.config["path"].get(bool)
tiebreak = self.config["tiebreak"].get(dict)
strict = self.config["strict"].get(bool)
tag = self.config["tag"].get(str)
if album:
if not keys:
keys = ["mb_albumid"]
items = lib.albums(args)
else:
if not keys:
keys = ["mb_trackid", "mb_albumid"]
items = lib.items(args)
# If there's nothing to do, return early. The code below assumes
# `items` to be non-empty.
if not items:
return
if path:
fmt_tmpl = "$path"
# Default format string for count mode.
if count and not fmt_tmpl:
if album:
fmt_tmpl = "$albumartist - $album"
else:
fmt_tmpl = "$albumartist - $album - $title"
if checksum:
for i in items:
k, _ = self._checksum(i, checksum)
keys = [k]
for obj_id, obj_count, objs in self._duplicates(
items,
keys=keys,
full=full,
strict=strict,
tiebreak=tiebreak,
merge=merge,
):
if obj_id: # Skip empty IDs.
for o in objs:
self._process_item(
o,
copy=copy,
move=move,
delete=delete,
remove=remove,
tag=tag,
fmt=f"{fmt_tmpl}: {obj_count}",
)
self._command.func = _dup
return [self._command]
def _process_item(
self,
item,
copy=False,
move=False,
delete=False,
tag=False,
fmt="",
remove=False,
):
"""Process Item `item`."""
print_(format(item, fmt))
if copy:
item.move(basedir=copy, operation=MoveOperation.COPY)
item.store()
if move:
item.move(basedir=move)
item.store()
if delete:
item.remove(delete=True)
elif remove:
item.remove(delete=False)
if tag:
try:
k, v = tag.split("=")
except Exception:
raise UserError(f"{PLUGIN}: can't parse k=v tag: {tag}")
setattr(item, k, v)
item.store()
def _checksum(self, item, prog):
"""Run external `prog` on file path associated with `item`, cache
output as flexattr on a key that is the name of the program, and
return the key, checksum tuple.
"""
args = [
p.format(file=os.fsdecode(item.path)) for p in shlex.split(prog)
]
key = args[0]
checksum = getattr(item, key, False)
if not checksum:
self._log.debug(
"key {} on item {.filepath} not cached:computing checksum",
key,
item,
)
try:
checksum = command_output(args).stdout
setattr(item, key, checksum)
item.store()
self._log.debug(
"computed checksum for {.title} using {}", item, key
)
except subprocess.CalledProcessError as e:
self._log.debug("failed to checksum {.filepath}: {}", item, e)
else:
self._log.debug(
"key {} on item {.filepath} cached:not computing checksum",
key,
item,
)
return key, checksum
def _group_by(self, objs, keys, strict):
"""Return a dictionary with keys arbitrary concatenations of attributes
and values lists of objects (Albums or Items) with those keys.
If strict, all attributes must be defined for a duplicate match.
"""
import collections
counts = collections.defaultdict(list)
for obj in objs:
values = [getattr(obj, k, None) for k in keys]
values = [v for v in values if v not in (None, "")]
if strict and len(values) < len(keys):
self._log.debug(
"some keys {} on item {.filepath} are null or empty: skipping",
keys,
obj,
)
elif not strict and not len(values):
self._log.debug(
"all keys {} on item {.filepath} are null or empty: skipping",
keys,
obj,
)
else:
key = tuple(values)
counts[key].append(obj)
return counts
def _order(self, objs, tiebreak=None):
"""Return the objects (Items or Albums) sorted by descending
order of priority.
If provided, the `tiebreak` dict indicates the field to use to
prioritize the objects. Otherwise, Items are placed in order of
"completeness" (objects with more non-null fields come first)
and Albums are ordered by their track count.
"""
kind = "items" if all(isinstance(o, Item) for o in objs) else "albums"
if tiebreak and kind in tiebreak.keys():
def key(x):
return tuple(getattr(x, k) for k in tiebreak[kind])
else:
if kind == "items":
def truthy(v):
# Avoid a Unicode warning by avoiding comparison
# between a bytes object and the empty Unicode
# string ''.
return v is not None and (
v != "" if isinstance(v, str) else True
)
fields = Item.all_keys()
def key(x):
return sum(1 for f in fields if truthy(getattr(x, f)))
else:
def key(x):
return len(x.items())
return sorted(objs, key=key, reverse=True)
def _merge_items(self, objs):
"""Merge Item objs by copying missing fields from items in the tail to
the head item.
Return same number of items, with the head item modified.
"""
fields = Item.all_keys()
for f in fields:
for o in objs[1:]:
if getattr(objs[0], f, None) in (None, ""):
value = getattr(o, f, None)
if value:
self._log.debug(
"key {} on item {} is null "
"or empty: setting from item {.filepath}",
f,
displayable_path(objs[0].path),
o,
)
setattr(objs[0], f, value)
objs[0].store()
break
return objs
def _merge_albums(self, objs):
"""Merge Album objs by copying missing items from albums in the tail
to the head album.
Return same number of albums, with the head album modified."""
ids = [i.mb_trackid for i in objs[0].items()]
for o in objs[1:]:
for i in o.items():
if i.mb_trackid not in ids:
missing = Item.from_path(i.path)
missing.album_id = objs[0].id
missing.add(i._db)
self._log.debug(
"item {} missing from album {}:"
" merging from {.filepath} into {}",
missing,
objs[0],
o,
displayable_path(missing.destination()),
)
missing.move(operation=MoveOperation.COPY)
return objs
def _merge(self, objs):
"""Merge duplicate items. See ``_merge_items`` and ``_merge_albums``
for the relevant strategies.
"""
kind = Item if all(isinstance(o, Item) for o in objs) else Album
if kind is Item:
objs = self._merge_items(objs)
else:
objs = self._merge_albums(objs)
return objs
def _duplicates(self, objs, keys, full, strict, tiebreak, merge):
"""Generate triples of keys, duplicate counts, and constituent objects."""
offset = 0 if full else 1
for k, objs in self._group_by(objs, keys, strict).items():
if len(objs) > 1:
objs = self._order(objs, tiebreak)
if merge:
objs = self._merge(objs)
yield (k, len(objs) - offset, objs[offset:])
beetbox-beets-c1877b7/beetsplug/edit.py 0000664 0000000 0000000 00000032720 15073551743 0020115 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Open metadata information in a text editor to let the user edit it."""
import codecs
import os
import shlex
import subprocess
from tempfile import NamedTemporaryFile
import yaml
from beets import plugins, ui, util
from beets.dbcore import types
from beets.importer import Action
from beets.ui.commands import PromptChoice, _do_query
# These "safe" types can avoid the format/parse cycle that most fields go
# through: they are safe to edit with native YAML types.
SAFE_TYPES = (types.BaseFloat, types.BaseInteger, types.Boolean)
class ParseError(Exception):
"""The modified file is unreadable. The user should be offered a chance to
fix the error.
"""
def edit(filename, log):
"""Open `filename` in a text editor."""
cmd = shlex.split(util.editor_command())
cmd.append(filename)
log.debug("invoking editor command: {!r}", cmd)
try:
subprocess.call(cmd)
except OSError as exc:
raise ui.UserError(f"could not run editor command {cmd[0]!r}: {exc}")
def dump(arg):
"""Dump a sequence of dictionaries as YAML for editing."""
return yaml.safe_dump_all(
arg,
allow_unicode=True,
default_flow_style=False,
)
def load(s):
"""Read a sequence of YAML documents back to a list of dictionaries
with string keys.
Can raise a `ParseError`.
"""
try:
out = []
for d in yaml.safe_load_all(s):
if not isinstance(d, dict):
raise ParseError(
f"each entry must be a dictionary; found {type(d).__name__}"
)
# Convert all keys to strings. They started out as strings,
# but the user may have inadvertently messed this up.
out.append({str(k): v for k, v in d.items()})
except yaml.YAMLError as e:
raise ParseError(f"invalid YAML: {e}")
return out
def _safe_value(obj, key, value):
"""Check whether the `value` is safe to represent in YAML and trust as
returned from parsed YAML.
This ensures that values do not change their type when the user edits their
YAML representation.
"""
typ = obj._type(key)
return isinstance(typ, SAFE_TYPES) and isinstance(value, typ.model_type)
def flatten(obj, fields):
"""Represent `obj`, a `dbcore.Model` object, as a dictionary for
serialization. Only include the given `fields` if provided;
otherwise, include everything.
The resulting dictionary's keys are strings and the values are
safely YAML-serializable types.
"""
# Format each value.
d = {}
for key in obj.keys():
value = obj[key]
if _safe_value(obj, key, value):
# A safe value that is faithfully representable in YAML.
d[key] = value
else:
# A value that should be edited as a string.
d[key] = obj.formatted()[key]
# Possibly filter field names.
if fields:
return {k: v for k, v in d.items() if k in fields}
else:
return d
def apply_(obj, data):
"""Set the fields of a `dbcore.Model` object according to a
dictionary.
This is the opposite of `flatten`. The `data` dictionary should have
strings as values.
"""
for key, value in data.items():
if _safe_value(obj, key, value):
# A safe value *stayed* represented as a safe type. Assign it
# directly.
obj[key] = value
else:
# Either the field was stringified originally or the user changed
# it from a safe type to an unsafe one. Parse it as a string.
obj.set_parse(key, str(value))
class EditPlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
# The default fields to edit.
"albumfields": "album albumartist",
"itemfields": "track title artist album",
# Silently ignore any changes to these fields.
"ignore_fields": "id path",
}
)
self.register_listener(
"before_choose_candidate", self.before_choose_candidate_listener
)
def commands(self):
edit_command = ui.Subcommand("edit", help="interactively edit metadata")
edit_command.parser.add_option(
"-f",
"--field",
metavar="FIELD",
action="append",
help="edit this field also",
)
edit_command.parser.add_option(
"--all",
action="store_true",
dest="all",
help="edit all fields",
)
edit_command.parser.add_album_option()
edit_command.func = self._edit_command
return [edit_command]
def _edit_command(self, lib, opts, args):
"""The CLI command function for the `beet edit` command."""
# Get the objects to edit.
items, albums = _do_query(lib, args, opts.album, False)
objs = albums if opts.album else items
if not objs:
ui.print_("Nothing to edit.")
return
# Get the fields to edit.
if opts.all:
fields = None
else:
fields = self._get_fields(opts.album, opts.field)
self.edit(opts.album, objs, fields)
def _get_fields(self, album, extra):
"""Get the set of fields to edit."""
# Start with the configured base fields.
if album:
fields = self.config["albumfields"].as_str_seq()
else:
fields = self.config["itemfields"].as_str_seq()
# Add the requested extra fields.
if extra:
fields += extra
# Ensure we always have the `id` field for identification.
fields.append("id")
return set(fields)
def edit(self, album, objs, fields):
"""The core editor function.
- `album`: A flag indicating whether we're editing Items or Albums.
- `objs`: The `Item`s or `Album`s to edit.
- `fields`: The set of field names to edit (or None to edit
everything).
"""
# Present the YAML to the user and let them change it.
success = self.edit_objects(objs, fields)
# Save the new data.
if success:
self.save_changes(objs)
def edit_objects(self, objs, fields):
"""Dump a set of Model objects to a file as text, ask the user
to edit it, and apply any changes to the objects.
Return a boolean indicating whether the edit succeeded.
"""
# Get the content to edit as raw data structures.
old_data = [flatten(o, fields) for o in objs]
# Set up a temporary file with the initial data for editing.
new = NamedTemporaryFile(
mode="w", suffix=".yaml", delete=False, encoding="utf-8"
)
old_str = dump(old_data)
new.write(old_str)
new.close()
# Loop until we have parseable data and the user confirms.
try:
while True:
# Ask the user to edit the data.
edit(new.name, self._log)
# Read the data back after editing and check whether anything
# changed.
with codecs.open(new.name, encoding="utf-8") as f:
new_str = f.read()
if new_str == old_str:
ui.print_("No changes; aborting.")
return False
# Parse the updated data.
try:
new_data = load(new_str)
except ParseError as e:
ui.print_(f"Could not read data: {e}")
if ui.input_yn("Edit again to fix? (Y/n)", True):
continue
else:
return False
# Show the changes.
# If the objects are not on the DB yet, we need a copy of their
# original state for show_model_changes.
objs_old = [obj.copy() if obj.id < 0 else None for obj in objs]
self.apply_data(objs, old_data, new_data)
changed = False
for obj, obj_old in zip(objs, objs_old):
changed |= ui.show_model_changes(obj, obj_old)
if not changed:
ui.print_("No changes to apply.")
return False
# Confirm the changes.
choice = ui.input_options(
("continue Editing", "apply", "cancel")
)
if choice == "a": # Apply.
return True
elif choice == "c": # Cancel.
return False
elif choice == "e": # Keep editing.
# Reset the temporary changes to the objects. I we have a
# copy from above, use that, else reload from the database.
objs = [
(old_obj or obj) for old_obj, obj in zip(objs_old, objs)
]
for obj in objs:
if not obj.id < 0:
obj.load()
continue
# Remove the temporary file before returning.
finally:
os.remove(new.name)
def apply_data(self, objs, old_data, new_data):
"""Take potentially-updated data and apply it to a set of Model
objects.
The objects are not written back to the database, so the changes
are temporary.
"""
if len(old_data) != len(new_data):
self._log.warning(
"number of objects changed from {} to {}",
len(old_data),
len(new_data),
)
obj_by_id = {o.id: o for o in objs}
ignore_fields = self.config["ignore_fields"].as_str_seq()
for old_dict, new_dict in zip(old_data, new_data):
# Prohibit any changes to forbidden fields to avoid
# clobbering `id` and such by mistake.
forbidden = False
for key in ignore_fields:
if old_dict.get(key) != new_dict.get(key):
self._log.warning("ignoring object whose {} changed", key)
forbidden = True
break
if forbidden:
continue
id_ = int(old_dict["id"])
apply_(obj_by_id[id_], new_dict)
def save_changes(self, objs):
"""Save a list of updated Model objects to the database."""
# Save to the database and possibly write tags.
for ob in objs:
if ob._dirty:
self._log.debug("saving changes to {}", ob)
ob.try_sync(ui.should_write(), ui.should_move())
# Methods for interactive importer execution.
def before_choose_candidate_listener(self, session, task):
"""Append an "Edit" choice and an "edit Candidates" choice (if
there are candidates) to the interactive importer prompt.
"""
choices = [PromptChoice("d", "eDit", self.importer_edit)]
if task.candidates:
choices.append(
PromptChoice(
"c", "edit Candidates", self.importer_edit_candidate
)
)
return choices
def importer_edit(self, session, task):
"""Callback for invoking the functionality during an interactive
import session on the *original* item tags.
"""
# Assign negative temporary ids to Items that are not in the database
# yet. By using negative values, no clash with items in the database
# can occur.
for i, obj in enumerate(task.items, start=1):
# The importer may set the id to None when re-importing albums.
if not obj._db or obj.id is None:
obj.id = -i
# Present the YAML to the user and let them change it.
fields = self._get_fields(album=False, extra=[])
success = self.edit_objects(task.items, fields)
# Remove temporary ids.
for obj in task.items:
if obj.id < 0:
obj.id = None
# Save the new data.
if success:
# Return Action.RETAG, which makes the importer write the tags
# to the files if needed without re-applying metadata.
return Action.RETAG
else:
# Edit cancelled / no edits made. Revert changes.
for obj in task.items:
obj.read()
def importer_edit_candidate(self, session, task):
"""Callback for invoking the functionality during an interactive
import session on a *candidate*. The candidate's metadata is
applied to the original items.
"""
# Prompt the user for a candidate.
sel = ui.input_options([], numrange=(1, len(task.candidates)))
# Force applying the candidate on the items.
task.match = task.candidates[sel - 1]
task.apply_metadata()
return self.importer_edit(session, task)
beetbox-beets-c1877b7/beetsplug/embedart.py 0000664 0000000 0000000 00000022644 15073551743 0020757 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows beets to embed album art into file metadata."""
import os.path
import tempfile
from mimetypes import guess_extension
import requests
from beets import config, ui
from beets.plugins import BeetsPlugin
from beets.ui import print_
from beets.util import bytestring_path, displayable_path, normpath, syspath
from beets.util.artresizer import ArtResizer
from beetsplug._utils import art
def _confirm(objs, album):
"""Show the list of affected objects (items or albums) and confirm
that the user wants to modify their artwork.
`album` is a Boolean indicating whether these are albums (as opposed
to items).
"""
noun = "album" if album else "file"
prompt = (
"Modify artwork for"
f" {len(objs)} {noun}{'s' if len(objs) > 1 else ''} (Y/n)?"
)
# Show all the items or albums.
for obj in objs:
print_(format(obj))
# Confirm with user.
return ui.input_yn(prompt)
class EmbedCoverArtPlugin(BeetsPlugin):
"""Allows albumart to be embedded into the actual files."""
def __init__(self):
super().__init__()
self.config.add(
{
"maxwidth": 0,
"auto": True,
"compare_threshold": 0,
"ifempty": False,
"remove_art_file": False,
"quality": 0,
}
)
if self.config["maxwidth"].get(int) and not ArtResizer.shared.local:
self.config["maxwidth"] = 0
self._log.warning(
"ImageMagick or PIL not found; 'maxwidth' option ignored"
)
if (
self.config["compare_threshold"].get(int)
and not ArtResizer.shared.can_compare
):
self.config["compare_threshold"] = 0
self._log.warning(
"ImageMagick 6.8.7 or higher not installed; "
"'compare_threshold' option ignored"
)
self.register_listener("art_set", self.process_album)
def commands(self):
# Embed command.
embed_cmd = ui.Subcommand(
"embedart", help="embed image files into file metadata"
)
embed_cmd.parser.add_option(
"-f", "--file", metavar="PATH", help="the image file to embed"
)
embed_cmd.parser.add_option(
"-y", "--yes", action="store_true", help="skip confirmation"
)
embed_cmd.parser.add_option(
"-u",
"--url",
metavar="URL",
help="the URL of the image file to embed",
)
maxwidth = self.config["maxwidth"].get(int)
quality = self.config["quality"].get(int)
compare_threshold = self.config["compare_threshold"].get(int)
ifempty = self.config["ifempty"].get(bool)
def embed_func(lib, opts, args):
if opts.file:
imagepath = normpath(opts.file)
if not os.path.isfile(syspath(imagepath)):
raise ui.UserError(
f"image file {displayable_path(imagepath)} not found"
)
items = lib.items(args)
# Confirm with user.
if not opts.yes and not _confirm(items, not opts.file):
return
for item in items:
art.embed_item(
self._log,
item,
imagepath,
maxwidth,
None,
compare_threshold,
ifempty,
quality=quality,
)
elif opts.url:
try:
response = requests.get(opts.url, timeout=5)
response.raise_for_status()
except requests.exceptions.RequestException as e:
self._log.error("{}", e)
return
extension = guess_extension(response.headers["Content-Type"])
if extension is None:
self._log.error("Invalid image file")
return
file = f"image{extension}"
tempimg = os.path.join(tempfile.gettempdir(), file)
try:
with open(tempimg, "wb") as f:
f.write(response.content)
except Exception as e:
self._log.error("Unable to save image: {}", e)
return
items = lib.items(args)
# Confirm with user.
if not opts.yes and not _confirm(items, not opts.url):
os.remove(tempimg)
return
for item in items:
art.embed_item(
self._log,
item,
tempimg,
maxwidth,
None,
compare_threshold,
ifempty,
quality=quality,
)
os.remove(tempimg)
else:
albums = lib.albums(args)
# Confirm with user.
if not opts.yes and not _confirm(albums, not opts.file):
return
for album in albums:
art.embed_album(
self._log,
album,
maxwidth,
False,
compare_threshold,
ifempty,
quality=quality,
)
self.remove_artfile(album)
embed_cmd.func = embed_func
# Extract command.
extract_cmd = ui.Subcommand(
"extractart",
help="extract an image from file metadata",
)
extract_cmd.parser.add_option(
"-o",
dest="outpath",
help="image output file",
)
extract_cmd.parser.add_option(
"-n",
dest="filename",
help="image filename to create for all matched albums",
)
extract_cmd.parser.add_option(
"-a",
dest="associate",
action="store_true",
help="associate the extracted images with the album",
)
def extract_func(lib, opts, args):
if opts.outpath:
art.extract_first(
self._log, normpath(opts.outpath), lib.items(args)
)
else:
filename = bytestring_path(
opts.filename or config["art_filename"].get()
)
if os.path.dirname(filename) != b"":
self._log.error(
"Only specify a name rather than a path for -n"
)
return
for album in lib.albums(args):
artpath = normpath(os.path.join(album.path, filename))
artpath = art.extract_first(
self._log, artpath, album.items()
)
if artpath and opts.associate:
album.set_art(artpath)
album.store()
extract_cmd.func = extract_func
# Clear command.
clear_cmd = ui.Subcommand(
"clearart",
help="remove images from file metadata",
)
clear_cmd.parser.add_option(
"-y", "--yes", action="store_true", help="skip confirmation"
)
def clear_func(lib, opts, args):
items = lib.items(args)
# Confirm with user.
if not opts.yes and not _confirm(items, False):
return
art.clear(self._log, lib, args)
clear_cmd.func = clear_func
return [embed_cmd, extract_cmd, clear_cmd]
def process_album(self, album):
"""Automatically embed art after art has been set"""
if self.config["auto"] and ui.should_write():
max_width = self.config["maxwidth"].get(int)
art.embed_album(
self._log,
album,
max_width,
True,
self.config["compare_threshold"].get(int),
self.config["ifempty"].get(bool),
)
self.remove_artfile(album)
def remove_artfile(self, album):
"""Possibly delete the album art file for an album (if the
appropriate configuration option is enabled).
"""
if self.config["remove_art_file"] and album.artpath:
if os.path.isfile(syspath(album.artpath)):
self._log.debug("Removing album art file for {}", album)
os.remove(syspath(album.artpath))
album.artpath = None
album.store()
beetbox-beets-c1877b7/beetsplug/embyupdate.py 0000664 0000000 0000000 00000013744 15073551743 0021334 0 ustar 00root root 0000000 0000000 """Updates the Emby Library whenever the beets library is changed.
emby:
host: localhost
port: 8096
username: user
apikey: apikey
password: password
"""
import hashlib
from urllib.parse import parse_qs, urlencode, urljoin, urlsplit, urlunsplit
import requests
from beets.plugins import BeetsPlugin
def api_url(host, port, endpoint):
"""Returns a joined url.
Takes host, port and endpoint and generates a valid emby API url.
:param host: Hostname of the emby server
:param port: Portnumber of the emby server
:param endpoint: API endpoint
:type host: str
:type port: int
:type endpoint: str
:returns: Full API url
:rtype: str
"""
# check if http or https is defined as host and create hostname
hostname_list = [host]
if host.startswith("http://") or host.startswith("https://"):
hostname = "".join(hostname_list)
else:
hostname_list.insert(0, "http://")
hostname = "".join(hostname_list)
joined = urljoin(f"{hostname}:{port}", endpoint)
scheme, netloc, path, query_string, fragment = urlsplit(joined)
query_params = parse_qs(query_string)
query_params["format"] = ["json"]
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
def password_data(username, password):
"""Returns a dict with username and its encoded password.
:param username: Emby username
:param password: Emby password
:type username: str
:type password: str
:returns: Dictionary with username and encoded password
:rtype: dict
"""
return {
"username": username,
"password": hashlib.sha1(password.encode("utf-8")).hexdigest(),
"passwordMd5": hashlib.md5(password.encode("utf-8")).hexdigest(),
}
def create_headers(user_id, token=None):
"""Return header dict that is needed to talk to the Emby API.
:param user_id: Emby user ID
:param token: Authentication token for Emby
:type user_id: str
:type token: str
:returns: Headers for requests
:rtype: dict
"""
headers = {}
authorization = (
f'MediaBrowser UserId="{user_id}", '
'Client="other", '
'Device="beets", '
'DeviceId="beets", '
'Version="0.0.0"'
)
headers["x-emby-authorization"] = authorization
if token:
headers["x-mediabrowser-token"] = token
return headers
def get_token(host, port, headers, auth_data):
"""Return token for a user.
:param host: Emby host
:param port: Emby port
:param headers: Headers for requests
:param auth_data: Username and encoded password for authentication
:type host: str
:type port: int
:type headers: dict
:type auth_data: dict
:returns: Access Token
:rtype: str
"""
url = api_url(host, port, "/Users/AuthenticateByName")
r = requests.post(
url,
headers=headers,
data=auth_data,
timeout=10,
)
return r.json().get("AccessToken")
def get_user(host, port, username):
"""Return user dict from server or None if there is no user.
:param host: Emby host
:param port: Emby port
:username: Username
:type host: str
:type port: int
:type username: str
:returns: Matched Users
:rtype: list
"""
url = api_url(host, port, "/Users/Public")
r = requests.get(url, timeout=10)
user = [i for i in r.json() if i["Name"] == username]
return user
class EmbyUpdate(BeetsPlugin):
def __init__(self):
super().__init__("emby")
# Adding defaults.
self.config.add(
{
"host": "http://localhost",
"port": 8096,
"username": None,
"password": None,
"userid": None,
"apikey": None,
}
)
self.config["username"].redact = True
self.config["password"].redact = True
self.config["userid"].redact = True
self.config["apikey"].redact = True
self.register_listener("database_change", self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update for the end."""
self.register_listener("cli_exit", self.update)
def update(self, lib):
"""When the client exists try to send refresh request to Emby."""
self._log.info("Updating Emby library...")
host = self.config["host"].get()
port = self.config["port"].get()
username = self.config["username"].get()
password = self.config["password"].get()
userid = self.config["userid"].get()
token = self.config["apikey"].get()
# Check if at least a apikey or password is given.
if not any([password, token]):
self._log.warning("Provide at least Emby password or apikey.")
return
if not userid:
# Get user information from the Emby API.
user = get_user(host, port, username)
if not user:
self._log.warning("User {} could not be found.", username)
return
userid = user[0]["Id"]
if not token:
# Create Authentication data and headers.
auth_data = password_data(username, password)
headers = create_headers(userid)
# Get authentication token.
token = get_token(host, port, headers, auth_data)
if not token:
self._log.warning("Could not get token for user {}", username)
return
# Recreate headers with a token.
headers = create_headers(userid, token=token)
# Trigger the Update.
url = api_url(host, port, "/Library/Refresh")
r = requests.post(
url,
headers=headers,
timeout=10,
)
if r.status_code != 204:
self._log.warning("Update could not be triggered")
else:
self._log.info("Update triggered.")
beetbox-beets-c1877b7/beetsplug/export.py 0000664 0000000 0000000 00000017512 15073551743 0020513 0 ustar 00root root 0000000 0000000 # This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Exports data from beets"""
import codecs
import csv
import json
import sys
from datetime import date, datetime
from xml.etree import ElementTree
import mediafile
from beets import ui, util
from beets.plugins import BeetsPlugin
from beetsplug.info import library_data, tag_data
class ExportEncoder(json.JSONEncoder):
"""Deals with dates because JSON doesn't have a standard"""
def default(self, o):
if isinstance(o, (datetime, date)):
return o.isoformat()
return json.JSONEncoder.default(self, o)
class ExportPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"default_format": "json",
"json": {
# JSON module formatting options.
"formatting": {
"ensure_ascii": False,
"indent": 4,
"separators": (",", ": "),
"sort_keys": True,
}
},
"jsonlines": {
# JSON Lines formatting options.
"formatting": {
"ensure_ascii": False,
"separators": (",", ": "),
"sort_keys": True,
}
},
"csv": {
# CSV module formatting options.
"formatting": {
# The delimiter used to separate columns.
"delimiter": ",",
# The dialect to use when formatting the file output.
"dialect": "excel",
}
},
"xml": {
# XML module formatting options.
"formatting": {}
},
# TODO: Use something like the edit plugin
# 'item_fields': []
}
)
def commands(self):
cmd = ui.Subcommand("export", help="export data from beets")
cmd.func = self.run
cmd.parser.add_option(
"-l",
"--library",
action="store_true",
help="show library fields instead of tags",
)
cmd.parser.add_option(
"-a",
"--album",
action="store_true",
help='show album fields instead of tracks (implies "--library")',
)
cmd.parser.add_option(
"--append",
action="store_true",
default=False,
help="if should append data to the file",
)
cmd.parser.add_option(
"-i",
"--include-keys",
default=[],
action="append",
dest="included_keys",
help="comma separated list of keys to show",
)
cmd.parser.add_option(
"-o",
"--output",
help="path for the output file. If not given, will print the data",
)
cmd.parser.add_option(
"-f",
"--format",
default="json",
help="the output format: json (default), jsonlines, csv, or xml",
)
return [cmd]
def run(self, lib, opts, args):
file_path = opts.output
file_mode = "a" if opts.append else "w"
file_format = opts.format or self.config["default_format"].get(str)
file_format_is_line_based = file_format == "jsonlines"
format_options = self.config[file_format]["formatting"].get(dict)
export_format = ExportFormat.factory(
file_type=file_format,
**{"file_path": file_path, "file_mode": file_mode},
)
if opts.library or opts.album:
data_collector = library_data
else:
data_collector = tag_data
included_keys = []
for keys in opts.included_keys:
included_keys.extend(keys.split(","))
items = []
for data_emitter in data_collector(
lib,
args,
album=opts.album,
):
try:
data, item = data_emitter(included_keys or "*")
except (mediafile.UnreadableFileError, OSError) as ex:
self._log.error("cannot read file: {}", ex)
continue
for key, value in data.items():
if isinstance(value, bytes):
data[key] = util.displayable_path(value)
if file_format_is_line_based:
export_format.export(data, **format_options)
else:
items += [data]
if not file_format_is_line_based:
export_format.export(items, **format_options)
class ExportFormat:
"""The output format type"""
def __init__(self, file_path, file_mode="w", encoding="utf-8"):
self.path = file_path
self.mode = file_mode
self.encoding = encoding
# creates a file object to write/append or sets to stdout
self.out_stream = (
codecs.open(self.path, self.mode, self.encoding)
if self.path
else sys.stdout
)
@classmethod
def factory(cls, file_type, **kwargs):
if file_type in ["json", "jsonlines"]:
return JsonFormat(**kwargs)
elif file_type == "csv":
return CSVFormat(**kwargs)
elif file_type == "xml":
return XMLFormat(**kwargs)
else:
raise NotImplementedError()
def export(self, data, **kwargs):
raise NotImplementedError()
class JsonFormat(ExportFormat):
"""Saves in a json file"""
def __init__(self, file_path, file_mode="w", encoding="utf-8"):
super().__init__(file_path, file_mode, encoding)
def export(self, data, **kwargs):
json.dump(data, self.out_stream, cls=ExportEncoder, **kwargs)
self.out_stream.write("\n")
class CSVFormat(ExportFormat):
"""Saves in a csv file"""
def __init__(self, file_path, file_mode="w", encoding="utf-8"):
super().__init__(file_path, file_mode, encoding)
def export(self, data, **kwargs):
header = list(data[0].keys()) if data else []
writer = csv.DictWriter(self.out_stream, fieldnames=header, **kwargs)
writer.writeheader()
writer.writerows(data)
class XMLFormat(ExportFormat):
"""Saves in a xml file"""
def __init__(self, file_path, file_mode="w", encoding="utf-8"):
super().__init__(file_path, file_mode, encoding)
def export(self, data, **kwargs):
# Creates the XML file structure.
library = ElementTree.Element("library")
tracks = ElementTree.SubElement(library, "tracks")
if data and isinstance(data[0], dict):
for index, item in enumerate(data):
track = ElementTree.SubElement(tracks, "track")
for key, value in item.items():
track_details = ElementTree.SubElement(track, key)
track_details.text = value
# Depending on the version of python the encoding needs to change
try:
data = ElementTree.tostring(library, encoding="unicode", **kwargs)
except LookupError:
data = ElementTree.tostring(library, encoding="utf-8", **kwargs)
self.out_stream.write(data)
beetbox-beets-c1877b7/beetsplug/fetchart.py 0000664 0000000 0000000 00000154320 15073551743 0020771 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches album art."""
from __future__ import annotations
import os
import re
from abc import ABC, abstractmethod
from collections import OrderedDict
from contextlib import closing
from enum import Enum
from functools import cached_property
from typing import TYPE_CHECKING, AnyStr, ClassVar, Literal, Tuple, Type
import confuse
import requests
from mediafile import image_mime_type
from beets import config, importer, plugins, ui, util
from beets.util import bytestring_path, get_temp_filename, sorted_walk, syspath
from beets.util.artresizer import ArtResizer
from beets.util.config import sanitize_pairs
if TYPE_CHECKING:
from collections.abc import Iterable, Iterator, Sequence
from beets.importer import ImportSession, ImportTask
from beets.library import Album, Library
from beets.logging import BeetsLogger as Logger
try:
from bs4 import BeautifulSoup, Tag
HAS_BEAUTIFUL_SOUP = True
except ImportError:
HAS_BEAUTIFUL_SOUP = False
CONTENT_TYPES = {"image/jpeg": [b"jpg", b"jpeg"], "image/png": [b"png"]}
IMAGE_EXTENSIONS = [ext for exts in CONTENT_TYPES.values() for ext in exts]
class ImageAction(Enum):
"""Indicates whether an image is useable or requires post-processing."""
BAD = 0
EXACT = 1
DOWNSCALE = 2
DOWNSIZE = 3
DEINTERLACE = 4
REFORMAT = 5
class MetadataMatch(Enum):
"""Indicates whether a `Candidate` matches the search criteria exactly."""
EXACT = 0
FALLBACK = 1
SourceLocation = Literal["local", "remote"]
class Candidate:
"""Holds information about a matching artwork, deals with validation of
dimension restrictions and resizing.
"""
def __init__(
self,
log: Logger,
source_name: str,
path: None | bytes = None,
url: None | str = None,
match: None | MetadataMatch = None,
size: None | Tuple[int, int] = None,
):
self._log = log
self.path = path
self.url = url
self.source_name = source_name
self._check: None | ImageAction = None
self.match = match
self.size = size
def _validate(
self,
plugin: FetchArtPlugin,
skip_check_for: None | list[ImageAction] = None,
) -> ImageAction:
"""Determine whether the candidate artwork is valid based on
its dimensions (width and ratio).
`skip_check_for` is a check or list of checks to skip. This is used to
avoid redundant checks when the candidate has already been
validated for a particular operation without changing
plugin configuration.
Return `ImageAction.BAD` if the file is unusable.
Return `ImageAction.EXACT` if the file is usable as-is.
Return `ImageAction.DOWNSCALE` if the file must be rescaled.
Return `ImageAction.DOWNSIZE` if the file must be resized, and possibly
also rescaled.
Return `ImageAction.DEINTERLACE` if the file must be deinterlaced.
Return `ImageAction.REFORMAT` if the file has to be converted.
"""
if not self.path:
return ImageAction.BAD
if not (
plugin.enforce_ratio
or plugin.minwidth
or plugin.maxwidth
or plugin.max_filesize
or plugin.deinterlace
or plugin.cover_format
):
return ImageAction.EXACT
# get_size returns None if no local imaging backend is available
if not self.size:
self.size = ArtResizer.shared.get_size(self.path)
self._log.debug("image size: {.size}", self)
if not self.size:
self._log.warning(
"Could not get size of image (please see "
"documentation for dependencies). "
"The configuration options `minwidth`, "
"`enforce_ratio` and `max_filesize` "
"may be violated."
)
return ImageAction.EXACT
short_edge = min(self.size)
long_edge = max(self.size)
# Check minimum dimension.
if plugin.minwidth and self.size[0] < plugin.minwidth:
self._log.debug(
"image too small ({} < {.minwidth})", self.size[0], plugin
)
return ImageAction.BAD
# Check aspect ratio.
edge_diff = long_edge - short_edge
if plugin.enforce_ratio:
if plugin.margin_px:
if edge_diff > plugin.margin_px:
self._log.debug(
"image is not close enough to being "
"square, ({} - {} > {.margin_px})",
long_edge,
short_edge,
plugin,
)
return ImageAction.BAD
elif plugin.margin_percent:
margin_px = plugin.margin_percent * long_edge
if edge_diff > margin_px:
self._log.debug(
"image is not close enough to being "
"square, ({} - {} > {})",
long_edge,
short_edge,
margin_px,
)
return ImageAction.BAD
elif edge_diff:
# also reached for margin_px == 0 and margin_percent == 0.0
self._log.debug(
"image is not square ({} != {})", self.size[0], self.size[1]
)
return ImageAction.BAD
# Check maximum dimension.
downscale = False
if plugin.maxwidth and self.size[0] > plugin.maxwidth:
self._log.debug(
"image needs rescaling ({} > {.maxwidth})", self.size[0], plugin
)
downscale = True
# Check filesize.
downsize = False
if plugin.max_filesize:
filesize = os.stat(syspath(self.path)).st_size
if filesize > plugin.max_filesize:
self._log.debug(
"image needs resizing ({}B > {.max_filesize}B)",
filesize,
plugin,
)
downsize = True
# Check image format
reformat = False
if plugin.cover_format:
fmt = ArtResizer.shared.get_format(self.path)
reformat = fmt != plugin.cover_format
if reformat:
self._log.debug(
"image needs reformatting: {} -> {.cover_format}",
fmt,
plugin,
)
skip_check_for = skip_check_for or []
if downscale and (ImageAction.DOWNSCALE not in skip_check_for):
return ImageAction.DOWNSCALE
if reformat and (ImageAction.REFORMAT not in skip_check_for):
return ImageAction.REFORMAT
if plugin.deinterlace and (
ImageAction.DEINTERLACE not in skip_check_for
):
return ImageAction.DEINTERLACE
if downsize and (ImageAction.DOWNSIZE not in skip_check_for):
return ImageAction.DOWNSIZE
return ImageAction.EXACT
def validate(
self,
plugin: FetchArtPlugin,
skip_check_for: None | list[ImageAction] = None,
) -> ImageAction:
self._check = self._validate(plugin, skip_check_for)
return self._check
def resize(self, plugin: FetchArtPlugin) -> None:
"""Resize the candidate artwork according to the plugin's
configuration until it is valid or no further resizing is
possible.
"""
# validate the candidate in case it hasn't been done yet
current_check = self.validate(plugin)
checks_performed = []
# we don't want to resize the image if it's valid or bad
while current_check not in [ImageAction.BAD, ImageAction.EXACT]:
self._resize(plugin, current_check)
checks_performed.append(current_check)
current_check = self.validate(
plugin, skip_check_for=checks_performed
)
def _resize(
self, plugin: FetchArtPlugin, check: None | ImageAction = None
) -> None:
"""Resize the candidate artwork according to the plugin's
configuration and the specified check.
"""
# This must only be called when _validate returned something other than
# ImageAction.Bad or ImageAction.EXACT; then path and size are known.
assert self.path is not None
assert self.size is not None
if check == ImageAction.DOWNSCALE:
self.path = ArtResizer.shared.resize(
plugin.maxwidth,
self.path,
quality=plugin.quality,
max_filesize=plugin.max_filesize,
)
elif check == ImageAction.DOWNSIZE:
# dimensions are correct, so maxwidth is set to maximum dimension
self.path = ArtResizer.shared.resize(
max(self.size),
self.path,
quality=plugin.quality,
max_filesize=plugin.max_filesize,
)
elif check == ImageAction.DEINTERLACE:
self.path = ArtResizer.shared.deinterlace(self.path)
elif check == ImageAction.REFORMAT:
self.path = ArtResizer.shared.reformat(
self.path,
plugin.cover_format,
deinterlaced=plugin.deinterlace,
)
def _logged_get(log: Logger, *args, **kwargs) -> requests.Response:
"""Like `requests.get`, but logs the effective URL to the specified
`log` at the `DEBUG` level.
Use the optional `message` parameter to specify what to log before
the URL. By default, the string is "getting URL".
Also sets the User-Agent header to indicate beets.
"""
# Use some arguments with the `send` call but most with the
# `Request` construction. This is a cheap, magic-filled way to
# emulate `requests.get` or, more pertinently,
# `requests.Session.request`.
req_kwargs = kwargs
send_kwargs = {}
for arg in ("stream", "verify", "proxies", "cert", "timeout"):
if arg in kwargs:
send_kwargs[arg] = req_kwargs.pop(arg)
if "timeout" not in send_kwargs:
send_kwargs["timeout"] = 10
# Our special logging message parameter.
if "message" in kwargs:
message = kwargs.pop("message")
else:
message = "getting URL"
req = requests.Request("GET", *args, **req_kwargs)
with requests.Session() as s:
s.headers = {"User-Agent": "beets"}
prepped = s.prepare_request(req)
settings = s.merge_environment_settings(
prepped.url, {}, None, None, None
)
send_kwargs.update(settings)
log.debug("{}: {.url}", message, prepped)
return s.send(prepped, **send_kwargs)
class RequestMixin:
"""Adds a Requests wrapper to the class that uses the logger, which
must be named `self._log`.
"""
_log: Logger
def request(self, *args, **kwargs) -> requests.Response:
"""Like `requests.get`, but uses the logger `self._log`.
See also `_logged_get`.
"""
return _logged_get(self._log, *args, **kwargs)
# ART SOURCES ################################################################
class ArtSource(RequestMixin, ABC):
# Specify whether this source fetches local or remote images
LOC: ClassVar[SourceLocation]
# A list of methods to match metadata, sorted by descending accuracy
VALID_MATCHING_CRITERIA: list[str] = ["default"]
# A human-readable name for the art source
NAME: ClassVar[str]
# The key to select the art source in the config. This value will also be
# stored in the database.
ID: ClassVar[str]
def __init__(
self,
log: Logger,
config: confuse.ConfigView,
match_by: None | list[str] = None,
) -> None:
self._log = log
self._config = config
self.match_by = match_by or self.VALID_MATCHING_CRITERIA
@cached_property
def description(self) -> str:
return f"{self.ID}[{', '.join(self.match_by)}]"
@staticmethod
def add_default_config(config: confuse.ConfigView) -> None:
pass
@classmethod
def available(cls, log: Logger, config: confuse.ConfigView) -> bool:
"""Return whether or not all dependencies are met and the art source is
in fact usable.
"""
return True
@abstractmethod
def get(
self,
album: Album,
plugin: FetchArtPlugin,
paths: None | Sequence[bytes],
) -> Iterator[Candidate]:
pass
def _candidate(self, **kwargs) -> Candidate:
return Candidate(source_name=self.ID, log=self._log, **kwargs)
@abstractmethod
def fetch_image(self, candidate: Candidate, plugin: FetchArtPlugin) -> None:
"""Fetch the image to a temporary file if it is not already available
as a local file.
After calling this, `Candidate.path` is set to the image path if
successful, or to `None` otherwise.
"""
pass
def cleanup(self, candidate: Candidate) -> None:
pass
class LocalArtSource(ArtSource):
LOC = "local"
def fetch_image(self, candidate: Candidate, plugin: FetchArtPlugin) -> None:
pass
class RemoteArtSource(ArtSource):
LOC = "remote"
def fetch_image(self, candidate: Candidate, plugin: FetchArtPlugin) -> None:
"""Downloads an image from a URL and checks whether it seems to
actually be an image.
"""
# This must only be called for candidates that were returned by
# self.get, which are expected to have a url and no path (because they
# haven't been downloaded yet).
assert candidate.path is None
assert candidate.url is not None
if plugin.maxwidth:
candidate.url = ArtResizer.shared.proxy_url(
plugin.maxwidth, candidate.url
)
try:
with closing(
self.request(
candidate.url, stream=True, message="downloading image"
)
) as resp:
ct = resp.headers.get("Content-Type", None)
# Download the image to a temporary file. As some servers
# (notably fanart.tv) have proven to return wrong Content-Types
# when images were uploaded with a bad file extension, do not
# rely on it. Instead validate the type using the file magic
# and only then determine the extension.
data = resp.iter_content(chunk_size=1024)
header = b""
for chunk in data:
header += chunk
if len(header) >= 32:
# The imghdr module will only read 32 bytes, and our
# own additions in mediafile even less.
break
else:
# server didn't return enough data, i.e. corrupt image
return
real_ct = image_mime_type(header)
if real_ct is None:
# detection by file magic failed, fall back to the
# server-supplied Content-Type
# Is our type detection failsafe enough to drop this?
real_ct = ct
if real_ct not in CONTENT_TYPES:
self._log.debug(
"not a supported image: {}",
real_ct or "unknown content type",
)
return
ext = b"." + CONTENT_TYPES[real_ct][0]
if real_ct != ct:
self._log.warning(
"Server specified {}, but returned a "
"{} image. Correcting the extension "
"to {}",
ct,
real_ct,
ext,
)
filename = get_temp_filename(__name__, suffix=ext.decode())
with open(filename, "wb") as fh:
# write the first already loaded part of the image
fh.write(header)
# download the remaining part of the image
for chunk in data:
fh.write(chunk)
self._log.debug(
"downloaded art to: {}", util.displayable_path(filename)
)
candidate.path = util.bytestring_path(filename)
return
except (OSError, requests.RequestException, TypeError) as exc:
# Handling TypeError works around a urllib3 bug:
# https://github.com/shazow/urllib3/issues/556
self._log.debug("error fetching art: {}", exc)
return
def cleanup(self, candidate: Candidate) -> None:
if candidate.path:
try:
util.remove(path=candidate.path)
except util.FilesystemError as exc:
self._log.debug("error cleaning up tmp art: {}", exc)
class CoverArtArchive(RemoteArtSource):
NAME = "Cover Art Archive"
ID = "coverart"
VALID_MATCHING_CRITERIA = ["release", "releasegroup"]
VALID_THUMBNAIL_SIZES = [250, 500, 1200]
URL = "https://coverartarchive.org/release/{mbid}"
GROUP_URL = "https://coverartarchive.org/release-group/{mbid}"
def get(
self,
album: Album,
plugin: FetchArtPlugin,
paths: None | Sequence[bytes],
) -> Iterator[Candidate]:
"""Return the Cover Art Archive and Cover Art Archive release
group URLs using album MusicBrainz release ID and release group
ID.
"""
def get_image_urls(
url: str,
preferred_width: None | str = None,
) -> Iterator[str]:
try:
response = self.request(url)
except requests.RequestException:
self._log.debug("{.NAME}: error receiving response", self)
return
try:
data = response.json()
except ValueError:
self._log.debug(
"{.NAME}: error loading response: {.text}", self, response
)
return
for item in data.get("images", []):
try:
if "Front" not in item["types"]:
continue
# If there is a pre-sized thumbnail of the desired size
# we select it. Otherwise, we return the raw image.
image_url: str = item["image"]
if preferred_width is not None:
if isinstance(item.get("thumbnails"), dict):
image_url = item["thumbnails"].get(
preferred_width, image_url
)
yield image_url
except KeyError:
pass
release_url = self.URL.format(mbid=album.mb_albumid)
release_group_url = self.GROUP_URL.format(mbid=album.mb_releasegroupid)
# Cover Art Archive API offers pre-resized thumbnails at several sizes.
# If the maxwidth config matches one of the already available sizes
# fetch it directly instead of fetching the full sized image and
# resizing it.
preferred_width = None
if plugin.maxwidth in self.VALID_THUMBNAIL_SIZES:
preferred_width = str(plugin.maxwidth)
if "release" in self.match_by and album.mb_albumid:
for url in get_image_urls(release_url, preferred_width):
yield self._candidate(url=url, match=MetadataMatch.EXACT)
if "releasegroup" in self.match_by and album.mb_releasegroupid:
for url in get_image_urls(release_group_url, preferred_width):
yield self._candidate(url=url, match=MetadataMatch.FALLBACK)
class Amazon(RemoteArtSource):
NAME = "Amazon"
ID = "amazon"
URL = "https://images.amazon.com/images/P/{}.{:02d}.LZZZZZZZ.jpg"
INDICES = (1, 2)
def get(
self,
album: Album,
plugin: FetchArtPlugin,
paths: None | Sequence[bytes],
) -> Iterator[Candidate]:
"""Generate URLs using Amazon ID (ASIN) string."""
if album.asin:
for index in self.INDICES:
yield self._candidate(
url=self.URL.format(album.asin, index),
match=MetadataMatch.EXACT,
)
class AlbumArtOrg(RemoteArtSource):
NAME = "AlbumArt.org scraper"
ID = "albumart"
URL = "https://www.albumart.org/index_detail.php"
PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"'
def get(
self,
album: Album,
plugin: FetchArtPlugin,
paths: None | Sequence[bytes],
):
"""Return art URL from AlbumArt.org using album ASIN."""
if not album.asin:
return
# Get the page from albumart.org.
try:
resp = self.request(self.URL, params={"asin": album.asin})
self._log.debug("scraped art URL: {.url}", resp)
except requests.RequestException:
self._log.debug("error scraping art page")
return
# Search the page for the image URL.
m = re.search(self.PAT, resp.text)
if m:
image_url = m.group(1)
yield self._candidate(url=image_url, match=MetadataMatch.EXACT)
else:
self._log.debug("no image found on page")
class GoogleImages(RemoteArtSource):
NAME = "Google Images"
ID = "google"
URL = "https://www.googleapis.com/customsearch/v1"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key = (self._config["google_key"].get(),)
self.cx = (self._config["google_engine"].get(),)
@staticmethod
def add_default_config(config: confuse.ConfigView):
config.add(
{
"google_key": None,
"google_engine": "001442825323518660753:hrh5ch1gjzm",
}
)
config["google_key"].redact = True
config["google_engine"].redact = True
@classmethod
def available(cls, log: Logger, config: confuse.ConfigView) -> bool:
has_key = bool(config["google_key"].get())
if not has_key:
log.debug("google: Disabling art source due to missing key")
return has_key
def get(
self,
album: Album,
plugin: FetchArtPlugin,
paths: None | Sequence[bytes],
) -> Iterator[Candidate]:
"""Return art URL from google custom search engine
given an album title and interpreter.
"""
if not (album.albumartist and album.album):
return
search_string = f"{album.albumartist},{album.album}".encode("utf-8")
try:
response = self.request(
self.URL,
params={
"key": self.key,
"cx": self.cx,
"q": search_string,
"searchType": "image",
},
)
except requests.RequestException:
self._log.debug("google: error receiving response")
return
# Get results using JSON.
try:
data = response.json()
except ValueError:
self._log.debug("google: error loading response: {.text}", response)
return
if "error" in data:
reason = data["error"]["errors"][0]["reason"]
self._log.debug("google fetchart error: {}", reason)
return
if "items" in data.keys():
for item in data["items"]:
yield self._candidate(
url=item["link"], match=MetadataMatch.EXACT
)
class FanartTV(RemoteArtSource):
"""Art from fanart.tv requested using their API"""
NAME = "fanart.tv"
ID = "fanarttv"
API_URL = "https://webservice.fanart.tv/v3/"
API_ALBUMS = f"{API_URL}music/albums/"
PROJECT_KEY = "61a7d0ab4e67162b7a0c7c35915cd48e"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client_key = self._config["fanarttv_key"].get()
@staticmethod
def add_default_config(config: confuse.ConfigView):
config.add(
{
"fanarttv_key": None,
}
)
config["fanarttv_key"].redact = True
def get(
self,
album: Album,
plugin: FetchArtPlugin,
paths: None | Sequence[bytes],
) -> Iterator[Candidate]:
if not album.mb_releasegroupid:
return
try:
response = self.request(
f"{self.API_ALBUMS}{album.mb_releasegroupid}",
headers={
"api-key": self.PROJECT_KEY,
"client-key": self.client_key,
},
)
except requests.RequestException:
self._log.debug("fanart.tv: error receiving response")
return
try:
data = response.json()
except ValueError:
self._log.debug(
"fanart.tv: error loading response: {.text}", response
)
return
if "status" in data and data["status"] == "error":
if "not found" in data["error message"].lower():
self._log.debug("fanart.tv: no image found")
elif "api key" in data["error message"].lower():
self._log.warning(
"fanart.tv: Invalid API key given, please "
"enter a valid one in your config file."
)
else:
self._log.debug(
"fanart.tv: error on request: {}", data["error message"]
)
return
matches = []
# can there be more than one releasegroupid per response?
for mbid, art in data.get("albums", {}).items():
# there might be more art referenced, e.g. cdart, and an albumcover
# might not be present, even if the request was successful
if album.mb_releasegroupid == mbid and "albumcover" in art:
matches.extend(art["albumcover"])
# can this actually occur?
else:
self._log.debug(
"fanart.tv: unexpected mb_releasegroupid in response!"
)
matches.sort(key=lambda x: int(x["likes"]), reverse=True)
for item in matches:
# fanart.tv has a strict size requirement for album art to be
# uploaded
yield self._candidate(
url=item["url"], match=MetadataMatch.EXACT, size=(1000, 1000)
)
class ITunesStore(RemoteArtSource):
NAME = "iTunes Store"
ID = "itunes"
API_URL = "https://itunes.apple.com/search"
def get(
self,
album: Album,
plugin: FetchArtPlugin,
paths: None | Sequence[bytes],
) -> Iterator[Candidate]:
"""Return art URL from iTunes Store given an album title."""
if not (album.albumartist and album.album):
return
payload = {
"term": f"{album.albumartist} {album.album}",
"entity": "album",
"media": "music",
"limit": 200,
}
try:
r = self.request(self.API_URL, params=payload)
r.raise_for_status()
except requests.RequestException as e:
self._log.debug("iTunes search failed: {}", e)
return
try:
candidates = r.json()["results"]
except ValueError as e:
self._log.debug("Could not decode json response: {}", e)
return
except KeyError as e:
self._log.debug(
"{} not found in json. Fields are {} ", e, list(r.json().keys())
)
return
if not candidates:
self._log.debug(
"iTunes search for {!r} got no results", payload["term"]
)
return
if self._config["high_resolution"]:
image_suffix = "100000x100000-999"
else:
image_suffix = "1200x1200bb"
for c in candidates:
try:
if (
c["artistName"] == album.albumartist
and c["collectionName"] == album.album
):
art_url = c["artworkUrl100"]
art_url = art_url.replace("100x100bb", image_suffix)
yield self._candidate(
url=art_url, match=MetadataMatch.EXACT
)
except KeyError as e:
self._log.debug(
"Malformed itunes candidate: {} not found in {}", # NOQA E501
e,
list(c.keys()),
)
try:
fallback_art_url = candidates[0]["artworkUrl100"]
fallback_art_url = fallback_art_url.replace(
"100x100bb", image_suffix
)
yield self._candidate(
url=fallback_art_url, match=MetadataMatch.FALLBACK
)
except KeyError as e:
self._log.debug(
"Malformed itunes candidate: {} not found in {}",
e,
list(c.keys()),
)
class Wikipedia(RemoteArtSource):
NAME = "Wikipedia (queried through DBpedia)"
ID = "wikipedia"
DBPEDIA_URL = "https://dbpedia.org/sparql"
WIKIPEDIA_URL = "https://en.wikipedia.org/w/api.php"
SPARQL_QUERY = """PREFIX rdf:
PREFIX dbpprop:
PREFIX owl:
PREFIX rdfs:
PREFIX foaf:
SELECT DISTINCT ?pageId ?coverFilename WHERE {{
?subject owl:wikiPageID ?pageId .
?subject dbpprop:name ?name .
?subject rdfs:label ?label .
{{ ?subject dbpprop:artist ?artist }}
UNION
{{ ?subject owl:artist ?artist }}
{{ ?artist foaf:name "{artist}"@en }}
UNION
{{ ?artist dbpprop:name "{artist}"@en }}
?subject rdf:type .
?subject dbpprop:cover ?coverFilename .
FILTER ( regex(?name, "{album}", "i") )
}}
Limit 1"""
def get(
self,
album: Album,
plugin: FetchArtPlugin,
paths: None | Sequence[bytes],
) -> Iterator[Candidate]:
if not (album.albumartist and album.album):
return
# Find the name of the cover art filename on DBpedia
cover_filename, page_id = None, None
try:
dbpedia_response = self.request(
self.DBPEDIA_URL,
params={
"format": "application/sparql-results+json",
"timeout": 2500,
"query": self.SPARQL_QUERY.format(
artist=album.albumartist.title(), album=album.album
),
},
headers={"content-type": "application/json"},
)
except requests.RequestException:
self._log.debug("dbpedia: error receiving response")
return
try:
data = dbpedia_response.json()
results = data["results"]["bindings"]
if results:
cover_filename = f"File:{results[0]['coverFilename']['value']}"
page_id = results[0]["pageId"]["value"]
else:
self._log.debug("wikipedia: album not found on dbpedia")
except (ValueError, KeyError, IndexError):
self._log.debug(
"wikipedia: error scraping dbpedia response: {.text}",
dbpedia_response,
)
# Ensure we have a filename before attempting to query wikipedia
if not (cover_filename and page_id):
return
# DBPedia sometimes provides an incomplete cover_filename, indicated
# by the filename having a space before the extension, e.g., 'foo .bar'
# An additional Wikipedia call can help to find the real filename.
# This may be removed once the DBPedia issue is resolved, see:
# https://github.com/dbpedia/extraction-framework/issues/396
if " ." in cover_filename and "." not in cover_filename.split(" .")[-1]:
self._log.debug(
"wikipedia: dbpedia provided incomplete cover_filename"
)
lpart, rpart = cover_filename.rsplit(" .", 1)
# Query all the images in the page
try:
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
"format": "json",
"action": "query",
"continue": "",
"prop": "images",
"pageids": page_id,
},
headers={"content-type": "application/json"},
)
except requests.RequestException:
self._log.debug("wikipedia: error receiving response")
return
# Try to see if one of the images on the pages matches our
# incomplete cover_filename
try:
data = wikipedia_response.json()
results = data["query"]["pages"][page_id]["images"]
for result in results:
if re.match(
rf"{re.escape(lpart)}.*?\.{re.escape(rpart)}",
result["title"],
):
cover_filename = result["title"]
break
except (ValueError, KeyError):
self._log.debug(
"wikipedia: failed to retrieve a cover_filename"
)
return
# Find the absolute url of the cover art on Wikipedia
try:
wikipedia_response = self.request(
self.WIKIPEDIA_URL,
params={
"format": "json",
"action": "query",
"continue": "",
"prop": "imageinfo",
"iiprop": "url",
"titles": cover_filename.encode("utf-8"),
},
headers={"content-type": "application/json"},
)
except requests.RequestException:
self._log.debug("wikipedia: error receiving response")
return
try:
data = wikipedia_response.json()
results = data["query"]["pages"]
for _, result in results.items():
image_url = result["imageinfo"][0]["url"]
yield self._candidate(url=image_url, match=MetadataMatch.EXACT)
except (ValueError, KeyError, IndexError):
self._log.debug("wikipedia: error scraping imageinfo")
return
class FileSystem(LocalArtSource):
NAME = "Filesystem"
ID = "filesystem"
@staticmethod
def filename_priority(
filename: AnyStr, cover_names: Sequence[AnyStr]
) -> list[int]:
"""Sort order for image names.
Return indexes of cover names found in the image filename. This
means that images with lower-numbered and more keywords will have
higher priority.
"""
return [idx for (idx, x) in enumerate(cover_names) if x in filename]
def get(
self,
album: Album,
plugin: FetchArtPlugin,
paths: None | Sequence[bytes],
) -> Iterator[Candidate]:
"""Look for album art files in the specified directories."""
if not paths:
return
cover_names = list(map(util.bytestring_path, plugin.cover_names))
cover_names_str = b"|".join(cover_names)
cover_pat = rb"".join([rb"(\b|_)(", cover_names_str, rb")(\b|_)"])
for path in paths:
if not os.path.isdir(syspath(path)):
continue
# Find all files that look like images in the directory.
images = []
ignore = config["ignore"].as_str_seq()
ignore_hidden = config["ignore_hidden"].get(bool)
for _, _, files in sorted_walk(
path, ignore=ignore, ignore_hidden=ignore_hidden
):
for fn in files:
fn = bytestring_path(fn)
for ext in IMAGE_EXTENSIONS:
if fn.lower().endswith(b"." + ext) and os.path.isfile(
syspath(os.path.join(path, fn))
):
images.append(fn)
# Look for "preferred" filenames.
images = sorted(
images, key=lambda x: self.filename_priority(x, cover_names)
)
remaining = []
for fn in images:
if re.search(cover_pat, os.path.splitext(fn)[0], re.I):
self._log.debug(
"using well-named art file {}",
util.displayable_path(fn),
)
yield self._candidate(
path=os.path.join(path, fn), match=MetadataMatch.EXACT
)
else:
remaining.append(fn)
# Fall back to any image in the folder.
if remaining and not plugin.cautious:
self._log.debug(
"using fallback art file {}",
util.displayable_path(remaining[0]),
)
yield self._candidate(
path=os.path.join(path, remaining[0]),
match=MetadataMatch.FALLBACK,
)
class LastFM(RemoteArtSource):
NAME = "Last.fm"
ID = "lastfm"
# Sizes in priority order.
SIZES = OrderedDict(
[
("mega", (300, 300)),
("extralarge", (300, 300)),
("large", (174, 174)),
("medium", (64, 64)),
("small", (34, 34)),
]
)
API_URL = "https://ws.audioscrobbler.com/2.0"
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.key = (self._config["lastfm_key"].get(),)
@staticmethod
def add_default_config(config: confuse.ConfigView) -> None:
config.add(
{
"lastfm_key": None,
}
)
config["lastfm_key"].redact = True
@classmethod
def available(cls, log: Logger, config: confuse.ConfigView) -> bool:
has_key = bool(config["lastfm_key"].get())
if not has_key:
log.debug("lastfm: Disabling art source due to missing key")
return has_key
def get(
self,
album: Album,
plugin: FetchArtPlugin,
paths: None | Sequence[bytes],
) -> Iterator[Candidate]:
if not album.mb_albumid:
return
try:
response = self.request(
self.API_URL,
params={
"method": "album.getinfo",
"api_key": self.key,
"mbid": album.mb_albumid,
"format": "json",
},
)
except requests.RequestException:
self._log.debug("lastfm: error receiving response")
return
try:
data = response.json()
if "error" in data:
if data["error"] == 6:
self._log.debug(
"lastfm: no results for {.mb_albumid}", album
)
else:
self._log.error(
"lastfm: failed to get album info: {} ({})",
data["message"],
data["error"],
)
else:
images = {
image["size"]: image["#text"]
for image in data["album"]["image"]
}
# Provide candidates in order of size.
for size in self.SIZES.keys():
if size in images:
yield self._candidate(
url=images[size], size=self.SIZES[size]
)
except ValueError:
self._log.debug("lastfm: error loading response: {.text}", response)
return
class Spotify(RemoteArtSource):
NAME = "Spotify"
ID = "spotify"
SPOTIFY_ALBUM_URL = "https://open.spotify.com/album/"
@classmethod
def available(cls, log: Logger, config: confuse.ConfigView) -> bool:
if not HAS_BEAUTIFUL_SOUP:
log.debug(
"To use Spotify as an album art source, "
"you must install the beautifulsoup4 module. See "
"the documentation for further details."
)
return HAS_BEAUTIFUL_SOUP
def get(
self,
album: Album,
plugin: FetchArtPlugin,
paths: None | Sequence[bytes],
) -> Iterator[Candidate]:
try:
url = f"{self.SPOTIFY_ALBUM_URL}{album.items().get().spotify_album_id}"
except AttributeError:
self._log.debug("Fetchart: no Spotify album ID found")
return
try:
response = requests.get(url, timeout=10)
response.raise_for_status()
except requests.RequestException as e:
self._log.debug("Error: {!s}", e)
return
try:
html = response.text
soup = BeautifulSoup(html, "html.parser")
except ValueError:
self._log.debug(
"Spotify: error loading response: {.text}", response
)
return
tag = soup.find("meta", attrs={"property": "og:image"})
if tag is None or not isinstance(tag, Tag):
self._log.debug(
"Spotify: Unexpected response, og:image tag missing"
)
return
image_url = tag["content"]
yield self._candidate(url=image_url, match=MetadataMatch.EXACT)
class CoverArtUrl(RemoteArtSource):
# This source is intended to be used with a plugin that sets the
# cover_art_url field on albums or tracks. Users can also manually update
# the cover_art_url field using the "set" command. This source will then
# use that URL to fetch the image.
NAME = "Cover Art URL"
ID = "cover_art_url"
def get(
self,
album: Album,
plugin: FetchArtPlugin,
paths: None | Sequence[bytes],
) -> Iterator[Candidate]:
image_url = None
try:
# look for cover_art_url on album or first track
if album.get("cover_art_url"):
image_url = album.cover_art_url
else:
image_url = album.items().get().cover_art_url
self._log.debug("Cover art URL {} found for {}", image_url, album)
except (AttributeError, TypeError):
self._log.debug("Cover art URL not found for {}", album)
return
if image_url:
yield self._candidate(url=image_url, match=MetadataMatch.EXACT)
else:
self._log.debug("Cover art URL not found for {}", album)
return
# All art sources. The order they will be tried in is specified by the config.
ART_SOURCES: set[Type[ArtSource]] = {
FileSystem,
CoverArtArchive,
ITunesStore,
AlbumArtOrg,
Amazon,
Wikipedia,
GoogleImages,
FanartTV,
LastFM,
Spotify,
CoverArtUrl,
}
# PLUGIN LOGIC ###############################################################
class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
PAT_PX = r"(0|[1-9][0-9]*)px"
PAT_PERCENT = r"(100(\.00?)?|[1-9]?[0-9](\.[0-9]{1,2})?)%"
def __init__(self) -> None:
super().__init__()
# Holds candidates corresponding to downloaded images between
# fetching them and placing them in the filesystem.
self.art_candidates: dict[ImportTask, Candidate] = {}
self.config.add(
{
"auto": True,
"minwidth": 0,
"maxwidth": 0,
"quality": 0,
"max_filesize": 0,
"enforce_ratio": False,
"cautious": False,
"cover_names": ["cover", "front", "art", "album", "folder"],
"sources": [
"filesystem",
"coverart",
"itunes",
"amazon",
"albumart",
"cover_art_url",
],
"store_source": False,
"high_resolution": False,
"deinterlace": False,
"cover_format": None,
}
)
for source in ART_SOURCES:
source.add_default_config(self.config)
self.minwidth = self.config["minwidth"].get(int)
self.maxwidth = self.config["maxwidth"].get(int)
self.max_filesize = self.config["max_filesize"].get(int)
self.quality = self.config["quality"].get(int)
# allow both pixel and percentage-based margin specifications
self.enforce_ratio = self.config["enforce_ratio"].get(
confuse.OneOf(
[
bool,
confuse.String(pattern=self.PAT_PX),
confuse.String(pattern=self.PAT_PERCENT),
]
)
)
self.margin_px = None
self.margin_percent = None
self.deinterlace = self.config["deinterlace"].get(bool)
if isinstance(self.enforce_ratio, str):
if self.enforce_ratio[-1] == "%":
self.margin_percent = float(self.enforce_ratio[:-1]) / 100
elif self.enforce_ratio[-2:] == "px":
self.margin_px = int(self.enforce_ratio[:-2])
else:
# shouldn't happen
raise confuse.ConfigValueError()
self.enforce_ratio = True
cover_names = self.config["cover_names"].as_str_seq()
self.cover_names = list(map(util.bytestring_path, cover_names))
self.cautious = self.config["cautious"].get(bool)
self.store_source = self.config["store_source"].get(bool)
self.cover_format = self.config["cover_format"].get(
confuse.Optional(str)
)
if self.config["auto"]:
# Enable two import hooks when fetching is enabled.
self.import_stages = [self.fetch_art]
self.register_listener("import_task_files", self.assign_art)
available_sources = [
(s_cls.ID, c)
for s_cls in ART_SOURCES
if s_cls.available(self._log, self.config)
for c in s_cls.VALID_MATCHING_CRITERIA
]
sources = sanitize_pairs(
self.config["sources"].as_pairs(default_value="*"),
available_sources,
)
if "remote_priority" in self.config:
self._log.warning(
"The `fetch_art.remote_priority` configuration option has "
"been deprecated. Instead, place `filesystem` at the end of "
"your `sources` list."
)
if self.config["remote_priority"].get(bool):
fs = []
others = []
for s, c in sources:
if s == "filesystem":
fs.append((s, c))
else:
others.append((s, c))
sources = others + fs
sources_by_name = {s_cls.ID: s_cls for s_cls in ART_SOURCES}
self.sources = [
sources_by_name[s](self._log, self.config, match_by=[c])
for s, c in sources
]
@staticmethod
def _is_source_file_removal_enabled() -> bool:
return config["import"]["delete"].get(bool) or config["import"][
"move"
].get(bool)
# Asynchronous; after music is added to the library.
def fetch_art(self, session: ImportSession, task: ImportTask) -> None:
"""Find art for the album being imported."""
if task.is_album: # Only fetch art for full albums.
if task.album.artpath and os.path.isfile(
syspath(task.album.artpath)
):
# Album already has art (probably a re-import); skip it.
return
if task.choice_flag == importer.Action.ASIS:
# For as-is imports, don't search Web sources for art.
local = True
elif task.choice_flag in (
importer.Action.APPLY,
importer.Action.RETAG,
):
# Search everywhere for art.
local = False
else:
# For any other choices (e.g., TRACKS), do nothing.
return
candidate = self.art_for_album(task.album, task.paths, local)
if candidate:
self.art_candidates[task] = candidate
def _set_art(
self, album: Album, candidate: Candidate, delete: bool = False
) -> None:
album.set_art(candidate.path, delete)
if self.store_source:
# store the source of the chosen artwork in a flexible field
self._log.debug(
"Storing art_source for {0.albumartist} - {0.album}", album
)
album.art_source = candidate.source_name
album.store()
# Synchronous; after music files are put in place.
def assign_art(self, session: ImportSession, task: ImportTask):
"""Place the discovered art in the filesystem."""
if task in self.art_candidates:
candidate = self.art_candidates.pop(task)
removal_enabled = self._is_source_file_removal_enabled()
self._set_art(task.album, candidate, not removal_enabled)
if removal_enabled:
task.prune(candidate.path)
# Manual album art fetching.
def commands(self) -> list[ui.Subcommand]:
cmd = ui.Subcommand("fetchart", help="download album art")
cmd.parser.add_option(
"-f",
"--force",
dest="force",
action="store_true",
default=False,
help="re-download art when already present",
)
cmd.parser.add_option(
"-q",
"--quiet",
dest="quiet",
action="store_true",
default=False,
help="quiet mode: do not output albums that already have artwork",
)
def func(lib: Library, opts, args) -> None:
self.batch_fetch_art(lib, lib.albums(args), opts.force, opts.quiet)
cmd.func = func
return [cmd]
# Utilities converted from functions to methods on logging overhaul
def art_for_album(
self,
album: Album,
paths: None | Sequence[bytes],
local_only: bool = False,
) -> None | Candidate:
"""Given an Album object, returns a path to downloaded art for the
album (or None if no art is found). If `maxwidth`, then images are
resized to this maximum pixel size. If `quality` then resized images
are saved at the specified quality level. If `local_only`, then only
local image files from the filesystem are returned; no network
requests are made.
"""
out = None
for source in self.sources:
if source.LOC == "local" or not local_only:
self._log.debug(
"trying source {0.description}"
" for album {1.albumartist} - {1.album}",
source,
album,
)
# URLs might be invalid at this point, or the image may not
# fulfill the requirements
for candidate in source.get(album, self, paths):
source.fetch_image(candidate, self)
if candidate.validate(self) != ImageAction.BAD:
out = candidate
assert out.path is not None # help mypy
self._log.debug(
"using {.LOC} image {.path}", source, out
)
break
# Remove temporary files for invalid candidates.
source.cleanup(candidate)
if out:
break
if out:
out.resize(self)
return out
def batch_fetch_art(
self,
lib: Library,
albums: Iterable[Album],
force: bool,
quiet: bool,
) -> None:
"""Fetch album art for each of the albums. This implements the manual
fetchart CLI command.
"""
for album in albums:
if (
album.artpath
and not force
and os.path.isfile(syspath(album.artpath))
):
if not quiet:
message = ui.colorize(
"text_highlight_minor", "has album art"
)
self._log.info("{}: {}", album, message)
else:
# In ordinary invocations, look for images on the
# filesystem. When forcing, however, always go to the Web
# sources.
local_paths = None if force else [album.path]
candidate = self.art_for_album(album, local_paths)
if candidate:
self._set_art(album, candidate)
message = ui.colorize("text_success", "found album art")
else:
message = ui.colorize("text_error", "no art found")
self._log.info("{}: {}", album, message)
beetbox-beets-c1877b7/beetsplug/filefilter.py 0000664 0000000 0000000 00000005512 15073551743 0021314 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Malte Ried.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Filter imported files using a regular expression."""
import re
from beets import config
from beets.importer import SingletonImportTask
from beets.plugins import BeetsPlugin
from beets.util import bytestring_path
class FileFilterPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.register_listener(
"import_task_created", self.import_task_created_event
)
self.config.add({"path": ".*"})
self.path_album_regex = self.path_singleton_regex = re.compile(
bytestring_path(self.config["path"].get())
)
if "album_path" in self.config:
self.path_album_regex = re.compile(
bytestring_path(self.config["album_path"].get())
)
if "singleton_path" in self.config:
self.path_singleton_regex = re.compile(
bytestring_path(self.config["singleton_path"].get())
)
def import_task_created_event(self, session, task):
if task.items and len(task.items) > 0:
items_to_import = []
for item in task.items:
if self.file_filter(item["path"]):
items_to_import.append(item)
if len(items_to_import) > 0:
task.items = items_to_import
else:
# Returning an empty list of tasks from the handler
# drops the task from the rest of the importer pipeline.
return []
elif isinstance(task, SingletonImportTask):
if not self.file_filter(task.item["path"]):
return []
# If not filtered, return the original task unchanged.
return [task]
def file_filter(self, full_path):
"""Checks if the configured regular expressions allow the import
of the file given in full_path.
"""
import_config = dict(config["import"])
full_path = bytestring_path(full_path)
if "singletons" not in import_config or not import_config["singletons"]:
# Album
return self.path_album_regex.match(full_path) is not None
else:
# Singleton
return self.path_singleton_regex.match(full_path) is not None
beetbox-beets-c1877b7/beetsplug/fish.py 0000664 0000000 0000000 00000023665 15073551743 0020131 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2015, winters jean-marie.
# Copyright 2020, Justin Mayer
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This plugin generates tab completions for Beets commands for the Fish shell
, including completions for Beets commands, plugin
commands, and option flags. Also generated are completions for all the album
and track fields, suggesting for example `genre:` or `album:` when querying the
Beets database. Completions for the *values* of those fields are not generated
by default but can be added via the `-e` / `--extravalues` flag. For example:
`beet fish -e genre -e albumartist`
"""
import os
from operator import attrgetter
from beets import library, ui
from beets.plugins import BeetsPlugin
from beets.ui import commands
BL_NEED2 = """complete -c beet -n '__fish_beet_needs_command' {} {}\n"""
BL_USE3 = """complete -c beet -n '__fish_beet_using_command {}' {} {}\n"""
BL_SUBS = """complete -c beet -n '__fish_at_level {} ""' {} {}\n"""
BL_EXTRA3 = """complete -c beet -n '__fish_beet_use_extra {}' {} {}\n"""
HEAD = """
function __fish_beet_needs_command
set cmd (commandline -opc)
if test (count $cmd) -eq 1
return 0
end
return 1
end
function __fish_beet_using_command
set cmd (commandline -opc)
set needle (count $cmd)
if test $needle -gt 1
if begin test $argv[1] = $cmd[2];
and not contains -- $cmd[$needle] $FIELDS; end
return 0
end
end
return 1
end
function __fish_beet_use_extra
set cmd (commandline -opc)
set needle (count $cmd)
if test $argv[2] = $cmd[$needle]
return 0
end
return 1
end
"""
class FishPlugin(BeetsPlugin):
def commands(self):
cmd = ui.Subcommand("fish", help="generate Fish shell tab completions")
cmd.func = self.run
cmd.parser.add_option(
"-f",
"--noFields",
action="store_true",
default=False,
help="omit album/track field completions",
)
cmd.parser.add_option(
"-e",
"--extravalues",
action="append",
type="choice",
choices=library.Item.all_keys() + library.Album.all_keys(),
help="include specified field *values* in completions",
)
cmd.parser.add_option(
"-o",
"--output",
default="~/.config/fish/completions/beet.fish",
help=(
"where to save the script. default: ~/.config/fish/completions"
),
)
return [cmd]
def run(self, lib, opts, args):
# Gather the commands from Beets core and its plugins.
# Collect the album and track fields.
# If specified, also collect the values for these fields.
# Make a giant string of all the above, formatted in a way that
# allows Fish to do tab completion for the `beet` command.
completion_file_path = os.path.expanduser(opts.output)
completion_dir = os.path.dirname(completion_file_path)
if completion_dir != "":
os.makedirs(completion_dir, exist_ok=True)
nobasicfields = opts.noFields # Do not complete for album/track fields
extravalues = opts.extravalues # e.g., Also complete artists names
beetcmds = sorted(
(commands.default_commands + commands.plugins.commands()),
key=attrgetter("name"),
)
fields = sorted(set(library.Album.all_keys() + library.Item.all_keys()))
# Collect commands, their aliases, and their help text
cmd_names_help = []
for cmd in beetcmds:
names = list(cmd.aliases)
names.append(cmd.name)
for name in names:
cmd_names_help.append((name, cmd.help))
# Concatenate the string
totstring = f"{HEAD}\n"
totstring += get_cmds_list([name[0] for name in cmd_names_help])
totstring += "" if nobasicfields else get_standard_fields(fields)
totstring += get_extravalues(lib, extravalues) if extravalues else ""
totstring += "\n# ====== setup basic beet completion =====\n\n"
totstring += get_basic_beet_options()
totstring += "\n# ====== setup field completion for subcommands =====\n"
totstring += get_subcommands(cmd_names_help, nobasicfields, extravalues)
# Set up completion for all the command options
totstring += get_all_commands(beetcmds)
with open(completion_file_path, "w") as fish_file:
fish_file.write(totstring)
def _escape(name):
# Escape ? in fish
if name == "?":
name = f"\\{name}"
return name
def get_cmds_list(cmds_names):
# Make a list of all Beets core & plugin commands
return f"set CMDS {' '.join(cmds_names)}\n\n"
def get_standard_fields(fields):
# Make a list of album/track fields and append with ':'
fields = (f"{field}:" for field in fields)
return f"set FIELDS {' '.join(fields)}\n\n"
def get_extravalues(lib, extravalues):
# Make a list of all values from an album/track field.
# 'beet ls albumartist: ' yields completions for ABBA, Beatles, etc.
word = ""
values_set = get_set_of_values_for_field(lib, extravalues)
for fld in extravalues:
extraname = f"{fld.upper()}S"
word += f"set {extraname} {' '.join(sorted(values_set[fld]))}\n\n"
return word
def get_set_of_values_for_field(lib, fields):
# Get unique values from a specified album/track field
fields_dict = {}
for each in fields:
fields_dict[each] = set()
for item in lib.items():
for field in fields:
fields_dict[field].add(wrap(item[field]))
return fields_dict
def get_basic_beet_options():
word = (
BL_NEED2.format("-l format-item", "-f -d 'print with custom format'")
+ BL_NEED2.format("-l format-album", "-f -d 'print with custom format'")
+ BL_NEED2.format(
"-s l -l library", "-f -r -d 'library database file to use'"
)
+ BL_NEED2.format(
"-s d -l directory", "-f -r -d 'destination music directory'"
)
+ BL_NEED2.format(
"-s v -l verbose", "-f -d 'print debugging information'"
)
+ BL_NEED2.format(
"-s c -l config", "-f -r -d 'path to configuration file'"
)
+ BL_NEED2.format(
"-s h -l help", "-f -d 'print this help message and exit'"
)
)
return word
def get_subcommands(cmd_name_and_help, nobasicfields, extravalues):
# Formatting for Fish to complete our fields/values
word = ""
for cmdname, cmdhelp in cmd_name_and_help:
cmdname = _escape(cmdname)
word += f"\n# ------ fieldsetups for {cmdname} -------\n"
word += BL_NEED2.format(
f"-a {cmdname}", f"-f -d {wrap(clean_whitespace(cmdhelp))}"
)
if nobasicfields is False:
word += BL_USE3.format(
cmdname,
f"-a {wrap('$FIELDS')}",
f"-f -d {wrap('fieldname')}",
)
if extravalues:
for f in extravalues:
setvar = wrap(f"${f.upper()}S")
word += " ".join(
BL_EXTRA3.format(
f"{cmdname} {f}:",
f"-f -A -a {setvar}",
f"-d {wrap(f)}",
).split()
)
word += "\n"
return word
def get_all_commands(beetcmds):
# Formatting for Fish to complete command options
word = ""
for cmd in beetcmds:
names = list(cmd.aliases)
names.append(cmd.name)
for name in names:
name = _escape(name)
word += f"\n\n\n# ====== completions for {name} =====\n"
for option in cmd.parser._get_all_options()[1:]:
cmd_l = (
f" -l {option._long_opts[0].replace('--', '')}"
if option._long_opts
else ""
)
cmd_s = (
f" -s {option._short_opts[0].replace('-', '')}"
if option._short_opts
else ""
)
cmd_need_arg = " -r " if option.nargs in [1] else ""
cmd_helpstr = (
f" -d {wrap(' '.join(option.help.split()))}"
if option.help
else ""
)
cmd_arglist = (
f" -a {wrap(' '.join(option.choices))}"
if option.choices
else ""
)
word += " ".join(
BL_USE3.format(
name,
f"{cmd_need_arg}{cmd_s}{cmd_l} -f {cmd_arglist}",
cmd_helpstr,
).split()
)
word += "\n"
word = word + BL_USE3.format(
name,
"-s h -l help -f",
f"-d {wrap('print help')}",
)
return word
def clean_whitespace(word):
# Remove excess whitespace and tabs in a string
return " ".join(word.split())
def wrap(word):
# Need " or ' around strings but watch out if they're in the string
sptoken = '"'
if '"' in word and ("'") in word:
word.replace('"', sptoken)
return f'"{word}"'
tok = '"' if "'" in word else "'"
return f"{tok}{word}{tok}"
beetbox-beets-c1877b7/beetsplug/freedesktop.py 0000664 0000000 0000000 00000002606 15073551743 0021503 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Matt Lichtenberg.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Creates freedesktop.org-compliant .directory files on an album level."""
from beets import ui
from beets.plugins import BeetsPlugin
class FreedesktopPlugin(BeetsPlugin):
def commands(self):
deprecated = ui.Subcommand(
"freedesktop",
help="Print a message to redirect to thumbnails --dolphin",
)
deprecated.func = self.deprecation_message
return [deprecated]
def deprecation_message(self, lib, opts, args):
ui.print_(
"This plugin is deprecated. Its functionality is "
"superseded by the 'thumbnails' plugin"
)
ui.print_(
"'thumbnails --dolphin' replaces freedesktop. See doc & "
"changelog for more information"
)
beetbox-beets-c1877b7/beetsplug/fromfilename.py 0000664 0000000 0000000 00000013132 15073551743 0021630 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Jan-Erik Dahlin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""If the title is empty, try to extract it from the filename
(possibly also extract track and artist)
"""
import os
import re
from beets import plugins
from beets.util import displayable_path
# Filename field extraction patterns.
PATTERNS = [
# Useful patterns.
(
r"^(?P\d+)\.?\s*-\s*(?P.+?)\s*-\s*(?P.+?)"
r"(\s*-\s*(?P.*))?$"
),
r"^(?P.+?)\s*-\s*(?P.+?)(\s*-\s*(?P.*))?$",
r"^(?P\d+)\.?[\s_-]+(?P.+)$",
r"^(?P.+) by (?P.+)$",
r"^(?P\d+).*$",
r"^(?P.+)$",
]
# Titles considered "empty" and in need of replacement.
BAD_TITLE_PATTERNS = [
r"^$",
]
def equal(seq):
"""Determine whether a sequence holds identical elements."""
return len(set(seq)) <= 1
def equal_fields(matchdict, field):
"""Do all items in `matchdict`, whose values are dictionaries, have
the same value for `field`? (If they do, the field is probably not
the title.)
"""
return equal(m[field] for m in matchdict.values())
def all_matches(names, pattern):
"""If all the filenames in the item/filename mapping match the
pattern, return a dictionary mapping the items to dictionaries
giving the value for each named subpattern in the match. Otherwise,
return None.
"""
matches = {}
for item, name in names.items():
m = re.match(pattern, name, re.IGNORECASE)
if m and m.groupdict():
# Only yield a match when the regex applies *and* has
# capture groups. Otherwise, no information can be extracted
# from the filename.
matches[item] = m.groupdict()
else:
return None
return matches
def bad_title(title):
"""Determine whether a given title is "bad" (empty or otherwise
meaningless) and in need of replacement.
"""
for pat in BAD_TITLE_PATTERNS:
if re.match(pat, title, re.IGNORECASE):
return True
return False
def apply_matches(d, log):
"""Given a mapping from items to field dicts, apply the fields to
the objects.
"""
some_map = list(d.values())[0]
keys = some_map.keys()
# Only proceed if the "tag" field is equal across all filenames.
if "tag" in keys and not equal_fields(d, "tag"):
return
# Given both an "artist" and "title" field, assume that one is
# *actually* the artist, which must be uniform, and use the other
# for the title. This, of course, won't work for VA albums.
# Only check for "artist": patterns containing it, also contain "title"
if "artist" in keys:
if equal_fields(d, "artist"):
artist = some_map["artist"]
title_field = "title"
elif equal_fields(d, "title"):
artist = some_map["title"]
title_field = "artist"
else:
# Both vary. Abort.
return
for item in d:
if not item.artist:
item.artist = artist
log.info("Artist replaced with: {.artist}", item)
# otherwise, if the pattern contains "title", use that for title_field
elif "title" in keys:
title_field = "title"
else:
title_field = None
# Apply the title and track, if any.
for item in d:
if title_field and bad_title(item.title):
item.title = str(d[item][title_field])
log.info("Title replaced with: {.title}", item)
if "track" in d[item] and item.track == 0:
item.track = int(d[item]["track"])
log.info("Track replaced with: {.track}", item)
# Plugin structure and hook into import process.
class FromFilenamePlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self.register_listener("import_task_start", self.filename_task)
def filename_task(self, task, session):
"""Examine each item in the task to see if we can extract a title
from the filename. Try to match all filenames to a number of
regexps, starting with the most complex patterns and successively
trying less complex patterns. As soon as all filenames match the
same regex we can make an educated guess of which part of the
regex that contains the title.
"""
items = task.items if task.is_album else [task.item]
# Look for suspicious (empty or meaningless) titles.
missing_titles = sum(bad_title(i.title) for i in items)
if missing_titles:
# Get the base filenames (no path or extension).
names = {}
for item in items:
path = displayable_path(item.path)
name, _ = os.path.splitext(os.path.basename(path))
names[item] = name
# Look for useful information in the filenames.
for pattern in PATTERNS:
self._log.debug(f"Trying pattern: {pattern}")
d = all_matches(names, pattern)
if d:
apply_matches(d, self._log)
beetbox-beets-c1877b7/beetsplug/ftintitle.py 0000664 0000000 0000000 00000017003 15073551743 0021167 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Verrus,
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Moves "featured" artists to the title from the artist field."""
from __future__ import annotations
import re
from typing import TYPE_CHECKING
from beets import plugins, ui
if TYPE_CHECKING:
from beets.importer import ImportSession, ImportTask
from beets.library import Item
def split_on_feat(
artist: str, for_artist: bool = True
) -> tuple[str, str | None]:
"""Given an artist string, split the "main" artist from any artist
on the right-hand side of a string like "feat". Return the main
artist, which is always a string, and the featuring artist, which
may be a string or None if none is present.
"""
# split on the first "feat".
regex = re.compile(plugins.feat_tokens(for_artist), re.IGNORECASE)
parts = tuple(s.strip() for s in regex.split(artist, 1))
if len(parts) == 1:
return parts[0], None
else:
assert len(parts) == 2 # help mypy out
return parts
def contains_feat(title: str) -> bool:
"""Determine whether the title contains a "featured" marker."""
return bool(
re.search(
plugins.feat_tokens(for_artist=False),
title,
flags=re.IGNORECASE,
)
)
def find_feat_part(artist: str, albumartist: str | None) -> str | None:
"""Attempt to find featured artists in the item's artist fields and
return the results. Returns None if no featured artist found.
"""
# Handle a wider variety of extraction cases if the album artist is
# contained within the track artist.
if albumartist and albumartist in artist:
albumartist_split = artist.split(albumartist, 1)
# If the last element of the split (the right-hand side of the
# album artist) is nonempty, then it probably contains the
# featured artist.
if albumartist_split[1] != "":
# Extract the featured artist from the right-hand side.
_, feat_part = split_on_feat(albumartist_split[1])
return feat_part
# Otherwise, if there's nothing on the right-hand side,
# look for a featuring artist on the left-hand side.
else:
lhs, _ = split_on_feat(albumartist_split[0])
if lhs:
return lhs
# Fall back to conservative handling of the track artist without relying
# on albumartist, which covers compilations using a 'Various Artists'
# albumartist and album tracks by a guest artist featuring a third artist.
_, feat_part = split_on_feat(artist, False)
return feat_part
class FtInTitlePlugin(plugins.BeetsPlugin):
def __init__(self) -> None:
super().__init__()
self.config.add(
{
"auto": True,
"drop": False,
"format": "feat. {}",
"keep_in_artist": False,
}
)
self._command = ui.Subcommand(
"ftintitle", help="move featured artists to the title field"
)
self._command.parser.add_option(
"-d",
"--drop",
dest="drop",
action="store_true",
default=None,
help="drop featuring from artists and ignore title update",
)
if self.config["auto"]:
self.import_stages = [self.imported]
def commands(self) -> list[ui.Subcommand]:
def func(lib, opts, args):
self.config.set_args(opts)
drop_feat = self.config["drop"].get(bool)
keep_in_artist_field = self.config["keep_in_artist"].get(bool)
write = ui.should_write()
for item in lib.items(args):
if self.ft_in_title(item, drop_feat, keep_in_artist_field):
item.store()
if write:
item.try_write()
self._command.func = func
return [self._command]
def imported(self, session: ImportSession, task: ImportTask) -> None:
"""Import hook for moving featuring artist automatically."""
drop_feat = self.config["drop"].get(bool)
keep_in_artist_field = self.config["keep_in_artist"].get(bool)
for item in task.imported_items():
if self.ft_in_title(item, drop_feat, keep_in_artist_field):
item.store()
def update_metadata(
self,
item: Item,
feat_part: str,
drop_feat: bool,
keep_in_artist_field: bool,
) -> None:
"""Choose how to add new artists to the title and set the new
metadata. Also, print out messages about any changes that are made.
If `drop_feat` is set, then do not add the artist to the title; just
remove it from the artist field.
"""
# In case the artist is kept, do not update the artist fields.
if keep_in_artist_field:
self._log.info(
"artist: {.artist} (Not changing due to keep_in_artist)", item
)
else:
track_artist, _ = split_on_feat(item.artist)
self._log.info("artist: {0.artist} -> {1}", item, track_artist)
item.artist = track_artist
if item.artist_sort:
# Just strip the featured artist from the sort name.
item.artist_sort, _ = split_on_feat(item.artist_sort)
# Only update the title if it does not already contain a featured
# artist and if we do not drop featuring information.
if not drop_feat and not contains_feat(item.title):
feat_format = self.config["format"].as_str()
new_format = feat_format.format(feat_part)
new_title = f"{item.title} {new_format}"
self._log.info("title: {.title} -> {}", item, new_title)
item.title = new_title
def ft_in_title(
self,
item: Item,
drop_feat: bool,
keep_in_artist_field: bool,
) -> bool:
"""Look for featured artists in the item's artist fields and move
them to the title.
Returns:
True if the item has been modified. False otherwise.
"""
artist = item.artist.strip()
albumartist = item.albumartist.strip()
# Check whether there is a featured artist on this track and the
# artist field does not exactly match the album artist field. In
# that case, we attempt to move the featured artist to the title.
if albumartist and artist == albumartist:
return False
_, featured = split_on_feat(artist)
if not featured:
return False
self._log.info("{.filepath}", item)
# Attempt to find the featured artist.
feat_part = find_feat_part(artist, albumartist)
if not feat_part:
self._log.info("no featuring artists found")
return False
# If we have a featuring artist, move it to the title.
self.update_metadata(item, feat_part, drop_feat, keep_in_artist_field)
return True
beetbox-beets-c1877b7/beetsplug/fuzzy.py 0000664 0000000 0000000 00000002751 15073551743 0020360 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Philippe Mongeau.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Provides a fuzzy matching query."""
import difflib
from beets import config
from beets.dbcore.query import StringFieldQuery
from beets.plugins import BeetsPlugin
class FuzzyQuery(StringFieldQuery[str]):
@classmethod
def string_match(cls, pattern: str, val: str):
# smartcase
if pattern.islower():
val = val.lower()
query_matcher = difflib.SequenceMatcher(None, pattern, val)
threshold = config["fuzzy"]["threshold"].as_number()
return query_matcher.quick_ratio() >= threshold
class FuzzyPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"prefix": "~",
"threshold": 0.7,
}
)
def queries(self):
prefix = self.config["prefix"].as_str()
return {prefix: FuzzyQuery}
beetbox-beets-c1877b7/beetsplug/gmusic.py 0000664 0000000 0000000 00000001774 15073551743 0020464 0 ustar 00root root 0000000 0000000 # This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Deprecation warning for the removed gmusic plugin."""
from beets.plugins import BeetsPlugin
class Gmusic(BeetsPlugin):
def __init__(self):
super().__init__()
self._log.warning(
"The 'gmusic' plugin has been removed following the"
" shutdown of Google Play Music. Remove the plugin"
" from your configuration to silence this warning."
)
beetbox-beets-c1877b7/beetsplug/hook.py 0000664 0000000 0000000 00000005772 15073551743 0020137 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows custom commands to be run when an event is emitted by beets"""
from __future__ import annotations
import os
import shlex
import string
import subprocess
from typing import Any
from beets.plugins import BeetsPlugin
class BytesToStrFormatter(string.Formatter):
"""A variant of `string.Formatter` that converts `bytes` to `str`."""
def convert_field(self, value: Any, conversion: str | None) -> Any:
"""Converts the provided value given a conversion type.
This method decodes the converted value using the formatter's coding.
"""
converted = super().convert_field(value, conversion)
if isinstance(converted, bytes):
return os.fsdecode(converted)
return converted
class HookPlugin(BeetsPlugin):
"""Allows custom commands to be run when an event is emitted by beets"""
def __init__(self):
super().__init__()
self.config.add({"hooks": []})
hooks = self.config["hooks"].get(list)
for hook_index in range(len(hooks)):
hook = self.config["hooks"][hook_index]
hook_event = hook["event"].as_str()
hook_command = hook["command"].as_str()
self.create_and_register_hook(hook_event, hook_command)
def create_and_register_hook(self, event, command):
def hook_function(**kwargs):
if command is None or len(command) == 0:
self._log.error('invalid command "{}"', command)
return
# For backwards compatibility, use a string formatter that decodes
# bytes (in particular, paths) to strings.
formatter = BytesToStrFormatter()
command_pieces = [
formatter.format(piece, event=event, **kwargs)
for piece in shlex.split(command)
]
self._log.debug(
'running command "{}" for event {}',
" ".join(command_pieces),
event,
)
try:
subprocess.check_call(command_pieces)
except subprocess.CalledProcessError as exc:
self._log.error(
"hook for {} exited with status {.returncode}", event, exc
)
except OSError as exc:
self._log.error("hook for {} failed: {}", event, exc)
self.register_listener(event, hook_function)
beetbox-beets-c1877b7/beetsplug/ihate.py 0000664 0000000 0000000 00000005441 15073551743 0020262 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Blemjhoo Tezoulbr .
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Warns you about things you hate (or even blocks import)."""
from beets.importer import Action
from beets.library import Album, Item, parse_query_string
from beets.plugins import BeetsPlugin
__author__ = "baobab@heresiarch.info"
__version__ = "2.0"
def summary(task):
"""Given an ImportTask, produce a short string identifying the
object.
"""
if task.is_album:
return f"{task.cur_artist} - {task.cur_album}"
else:
return f"{task.item.artist} - {task.item.title}"
class IHatePlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.register_listener(
"import_task_choice", self.import_task_choice_event
)
self.config.add(
{
"warn": [],
"skip": [],
}
)
@classmethod
def do_i_hate_this(cls, task, action_patterns):
"""Process group of patterns (warn or skip) and returns True if
task is hated and not whitelisted.
"""
if action_patterns:
for query_string in action_patterns:
query, _ = parse_query_string(
query_string,
Album if task.is_album else Item,
)
if any(query.match(item) for item in task.imported_items()):
return True
return False
def import_task_choice_event(self, session, task):
skip_queries = self.config["skip"].as_str_seq()
warn_queries = self.config["warn"].as_str_seq()
if task.choice_flag == Action.APPLY:
if skip_queries or warn_queries:
self._log.debug("processing your hate")
if self.do_i_hate_this(task, skip_queries):
task.choice_flag = Action.SKIP
self._log.info("skipped: {}", summary(task))
return
if self.do_i_hate_this(task, warn_queries):
self._log.info("you may hate this: {}", summary(task))
else:
self._log.debug("nothing to do")
else:
self._log.debug("user made a decision, nothing to do")
beetbox-beets-c1877b7/beetsplug/importadded.py 0000664 0000000 0000000 00000013133 15073551743 0021461 0 ustar 00root root 0000000 0000000 """Populate an item's `added` and `mtime` fields by using the file
modification time (mtime) of the item's source file before import.
Reimported albums and items are skipped.
"""
import os
from beets import importer, util
from beets.plugins import BeetsPlugin
class ImportAddedPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"preserve_mtimes": False,
"preserve_write_mtimes": False,
}
)
# item.id for new items that were reimported
self.reimported_item_ids = None
# album.path for old albums that were replaced by a reimported album
self.replaced_album_paths = None
# item path in the library to the mtime of the source file
self.item_mtime = {}
register = self.register_listener
register("import_task_created", self.check_config)
register("import_task_created", self.record_if_inplace)
register("import_task_files", self.record_reimported)
register("before_item_moved", self.record_import_mtime)
register("item_copied", self.record_import_mtime)
register("item_linked", self.record_import_mtime)
register("item_hardlinked", self.record_import_mtime)
register("item_reflinked", self.record_import_mtime)
register("album_imported", self.update_album_times)
register("item_imported", self.update_item_times)
register("after_write", self.update_after_write_time)
def check_config(self, task, session):
self.config["preserve_mtimes"].get(bool)
def reimported_item(self, item):
return item.id in self.reimported_item_ids
def reimported_album(self, album):
return album.path in self.replaced_album_paths
def record_if_inplace(self, task, session):
if not (
session.config["copy"]
or session.config["move"]
or session.config["link"]
or session.config["hardlink"]
or session.config["reflink"]
):
self._log.debug(
"In place import detected, recording mtimes from source paths"
)
items = (
[task.item]
if isinstance(task, importer.SingletonImportTask)
else task.items
)
for item in items:
self.record_import_mtime(item, item.path, item.path)
def record_reimported(self, task, session):
self.reimported_item_ids = {
item.id
for item, replaced_items in task.replaced_items.items()
if replaced_items
}
self.replaced_album_paths = set(task.replaced_albums.keys())
def write_file_mtime(self, path, mtime):
"""Write the given mtime to the destination path."""
stat = os.stat(util.syspath(path))
os.utime(util.syspath(path), (stat.st_atime, mtime))
def write_item_mtime(self, item, mtime):
"""Write the given mtime to an item's `mtime` field and to the mtime
of the item's file.
"""
# The file's mtime on disk must be in sync with the item's mtime
self.write_file_mtime(util.syspath(item.path), mtime)
item.mtime = mtime
def record_import_mtime(self, item, source, destination):
"""Record the file mtime of an item's path before its import."""
mtime = os.stat(util.syspath(source)).st_mtime
self.item_mtime[destination] = mtime
self._log.debug(
"Recorded mtime {} for item '{}' imported from '{}'",
mtime,
util.displayable_path(destination),
util.displayable_path(source),
)
def update_album_times(self, lib, album):
if self.reimported_album(album):
self._log.debug(
"Album '{.filepath}' is reimported, skipping import of "
"added dates for the album and its items.",
album,
)
return
album_mtimes = []
for item in album.items():
mtime = self.item_mtime.pop(item.path, None)
if mtime:
album_mtimes.append(mtime)
if self.config["preserve_mtimes"].get(bool):
self.write_item_mtime(item, mtime)
item.store()
album.added = min(album_mtimes)
self._log.debug(
"Import of album '{0.album}', selected album.added={0.added} "
"from item file mtimes.",
album,
)
album.store()
def update_item_times(self, lib, item):
if self.reimported_item(item):
self._log.debug(
"Item '{.filepath}' is reimported, skipping import of added date.",
item,
)
return
mtime = self.item_mtime.pop(item.path, None)
if mtime:
item.added = mtime
if self.config["preserve_mtimes"].get(bool):
self.write_item_mtime(item, mtime)
self._log.debug(
"Import of item '{0.filepath}', selected item.added={0.added}",
item,
)
item.store()
def update_after_write_time(self, item, path):
"""Update the mtime of the item's file with the item.added value
after each write of the item if `preserve_write_mtimes` is enabled.
"""
if item.added:
if self.config["preserve_write_mtimes"].get(bool):
self.write_item_mtime(item, item.added)
self._log.debug(
"Write of item '{0.filepath}', selected item.added={0.added}",
item,
)
beetbox-beets-c1877b7/beetsplug/importfeeds.py 0000664 0000000 0000000 00000012255 15073551743 0021512 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Write paths of imported files in various formats to ease later import in a
music player. Also allow printing the new file locations to stdout in case
one wants to manually add music to a player by its path.
"""
import datetime
import os
import re
from beets import config
from beets.plugins import BeetsPlugin
from beets.util import bytestring_path, link, mkdirall, normpath, syspath
M3U_DEFAULT_NAME = "imported.m3u"
def _build_m3u_session_filename(basename):
"""Builds unique m3u filename by putting current date between given
basename and file ending."""
date = datetime.datetime.now().strftime("%Y%m%d_%Hh%M")
basename = re.sub(r"(\.m3u|\.M3U)", "", basename)
path = normpath(
os.path.join(
config["importfeeds"]["dir"].as_filename(), f"{basename}_{date}.m3u"
)
)
return path
def _build_m3u_filename(basename):
"""Builds unique m3u filename by appending given basename to current
date."""
basename = re.sub(r"[\s,/\\'\"]", "_", basename)
date = datetime.datetime.now().strftime("%Y%m%d_%Hh%M")
path = normpath(
os.path.join(
config["importfeeds"]["dir"].as_filename(),
f"{date}_{basename}.m3u",
)
)
return path
def _write_m3u(m3u_path, items_paths):
"""Append relative paths to items into m3u file."""
mkdirall(m3u_path)
with open(syspath(m3u_path), "ab") as f:
for path in items_paths:
f.write(path + b"\n")
class ImportFeedsPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"formats": [],
"m3u_name": "imported.m3u",
"dir": None,
"relative_to": None,
"absolute_path": False,
}
)
relative_to = self.config["relative_to"].get()
if relative_to:
self.config["relative_to"] = normpath(relative_to)
else:
self.config["relative_to"] = self.get_feeds_dir()
self.register_listener("album_imported", self.album_imported)
self.register_listener("item_imported", self.item_imported)
self.register_listener("import_begin", self.import_begin)
def get_feeds_dir(self):
feeds_dir = self.config["dir"].get()
if feeds_dir:
return os.path.expanduser(bytestring_path(feeds_dir))
return config["directory"].as_filename()
def _record_items(self, lib, basename, items):
"""Records relative paths to the given items for each feed format"""
feedsdir = bytestring_path(self.get_feeds_dir())
formats = self.config["formats"].as_str_seq()
relative_to = self.config["relative_to"].get() or self.get_feeds_dir()
relative_to = bytestring_path(relative_to)
paths = []
for item in items:
if self.config["absolute_path"]:
paths.append(item.path)
else:
try:
relpath = os.path.relpath(item.path, relative_to)
except ValueError:
# On Windows, it is sometimes not possible to construct a
# relative path (if the files are on different disks).
relpath = item.path
paths.append(relpath)
if "m3u" in formats:
m3u_basename = bytestring_path(self.config["m3u_name"].as_str())
m3u_path = os.path.join(feedsdir, m3u_basename)
_write_m3u(m3u_path, paths)
if "m3u_session" in formats:
m3u_path = os.path.join(feedsdir, self.m3u_session)
_write_m3u(m3u_path, paths)
if "m3u_multi" in formats:
m3u_path = _build_m3u_filename(basename)
_write_m3u(m3u_path, paths)
if "link" in formats:
for path in paths:
dest = os.path.join(feedsdir, os.path.basename(path))
if not os.path.exists(syspath(dest)):
link(path, dest)
if "echo" in formats:
self._log.info("Location of imported music:")
for path in paths:
self._log.info(" {}", path)
def album_imported(self, lib, album):
self._record_items(lib, album.album, album.items())
def item_imported(self, lib, item):
self._record_items(lib, item.title, [item])
def import_begin(self, session):
formats = self.config["formats"].as_str_seq()
if "m3u_session" in formats:
self.m3u_session = _build_m3u_session_filename(
self.config["m3u_name"].as_str()
)
beetbox-beets-c1877b7/beetsplug/info.py 0000664 0000000 0000000 00000015660 15073551743 0020127 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Shows file metadata."""
import os
import mediafile
from beets import ui
from beets.library import Item
from beets.plugins import BeetsPlugin
from beets.util import displayable_path, normpath, syspath
def tag_data(lib, args, album=False):
query = []
for arg in args:
path = normpath(arg)
if os.path.isfile(syspath(path)):
yield tag_data_emitter(path)
else:
query.append(arg)
if query:
for item in lib.items(query):
yield tag_data_emitter(item.path)
def tag_fields():
fields = set(mediafile.MediaFile.readable_fields())
fields.add("art")
return fields
def tag_data_emitter(path):
def emitter(included_keys):
if included_keys == "*":
fields = tag_fields()
else:
fields = included_keys
if "images" in fields:
# We can't serialize the image data.
fields.remove("images")
mf = mediafile.MediaFile(syspath(path))
tags = {}
for field in fields:
if field == "art":
tags[field] = mf.art is not None
else:
tags[field] = getattr(mf, field, None)
# create a temporary Item to take advantage of __format__
item = Item.from_path(syspath(path))
return tags, item
return emitter
def library_data(lib, args, album=False):
for item in lib.albums(args) if album else lib.items(args):
yield library_data_emitter(item)
def library_data_emitter(item):
def emitter(included_keys):
data = dict(item.formatted(included_keys=included_keys))
return data, item
return emitter
def update_summary(summary, tags):
for key, value in tags.items():
if key not in summary:
summary[key] = value
elif summary[key] != value:
summary[key] = "[various]"
return summary
def print_data(data, item=None, fmt=None):
"""Print, with optional formatting, the fields of a single element.
If no format string `fmt` is passed, the entries on `data` are printed one
in each line, with the format 'field: value'. If `fmt` is not `None`, the
`item` is printed according to `fmt`, using the `Item.__format__`
machinery.
"""
if fmt:
# use fmt specified by the user
ui.print_(format(item, fmt))
return
path = displayable_path(item.path) if item else None
formatted = {}
for key, value in data.items():
if isinstance(value, list):
formatted[key] = "; ".join(value)
if value is not None:
formatted[key] = value
if len(formatted) == 0:
return
maxwidth = max(len(key) for key in formatted)
if path:
ui.print_(displayable_path(path))
for field in sorted(formatted):
value = formatted[field]
if isinstance(value, list):
value = "; ".join(value)
ui.print_(f"{field:>{maxwidth}}: {value}")
def print_data_keys(data, item=None):
"""Print only the keys (field names) for an item."""
path = displayable_path(item.path) if item else None
formatted = []
for key, value in data.items():
formatted.append(key)
if len(formatted) == 0:
return
if path:
ui.print_(displayable_path(path))
for field in sorted(formatted):
ui.print_(f" {field}")
class InfoPlugin(BeetsPlugin):
def commands(self):
cmd = ui.Subcommand("info", help="show file metadata")
cmd.func = self.run
cmd.parser.add_option(
"-l",
"--library",
action="store_true",
help="show library fields instead of tags",
)
cmd.parser.add_option(
"-a",
"--album",
action="store_true",
help='show album fields instead of tracks (implies "--library")',
)
cmd.parser.add_option(
"-s",
"--summarize",
action="store_true",
help="summarize the tags of all files",
)
cmd.parser.add_option(
"-i",
"--include-keys",
default=[],
action="append",
dest="included_keys",
help="comma separated list of keys to show",
)
cmd.parser.add_option(
"-k",
"--keys-only",
action="store_true",
help="show only the keys",
)
cmd.parser.add_format_option(target="item")
return [cmd]
def run(self, lib, opts, args):
"""Print tag info or library data for each file referenced by args.
Main entry point for the `beet info ARGS...` command.
If an argument is a path pointing to an existing file, then the tags
of that file are printed. All other arguments are considered
queries, and for each item matching all those queries the tags from
the file are printed.
If `opts.summarize` is true, the function merges all tags into one
dictionary and only prints that. If two files have different values
for the same tag, the value is set to '[various]'
"""
if opts.library or opts.album:
data_collector = library_data
else:
data_collector = tag_data
included_keys = []
for keys in opts.included_keys:
included_keys.extend(keys.split(","))
# Drop path even if user provides it multiple times
included_keys = [k for k in included_keys if k != "path"]
first = True
summary = {}
for data_emitter in data_collector(
lib,
args,
album=opts.album,
):
try:
data, item = data_emitter(included_keys or "*")
except (mediafile.UnreadableFileError, OSError) as ex:
self._log.error("cannot read file: {}", ex)
continue
if opts.summarize:
update_summary(summary, data)
else:
if not first:
ui.print_()
if opts.keys_only:
print_data_keys(data, item)
else:
fmt = [opts.format][0] if opts.format else None
print_data(data, item, fmt)
first = False
if opts.summarize:
print_data(summary)
beetbox-beets-c1877b7/beetsplug/inline.py 0000664 0000000 0000000 00000010375 15073551743 0020450 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows inline path template customization code in the config file."""
import itertools
import traceback
from beets import config
from beets.plugins import BeetsPlugin
FUNC_NAME = "__INLINE_FUNC__"
class InlineError(Exception):
"""Raised when a runtime error occurs in an inline expression."""
def __init__(self, code, exc):
super().__init__(
f"error in inline path field code:\n{code}\n{type(exc).__name__}: {exc}"
)
def _compile_func(body):
"""Given Python code for a function body, return a compiled
callable that invokes that code.
"""
body = body.replace("\n", "\n ")
body = f"def {FUNC_NAME}():\n {body}"
code = compile(body, "inline", "exec")
env = {}
eval(code, env)
return env[FUNC_NAME]
class InlinePlugin(BeetsPlugin):
def __init__(self):
super().__init__()
config.add(
{
"pathfields": {}, # Legacy name.
"item_fields": {},
"album_fields": {},
}
)
# Item fields.
for key, view in itertools.chain(
config["item_fields"].items(), config["pathfields"].items()
):
self._log.debug("adding item field {}", key)
func = self.compile_inline(view.as_str(), False)
if func is not None:
self.template_fields[key] = func
# Album fields.
for key, view in config["album_fields"].items():
self._log.debug("adding album field {}", key)
func = self.compile_inline(view.as_str(), True)
if func is not None:
self.album_template_fields[key] = func
def compile_inline(self, python_code, album):
"""Given a Python expression or function body, compile it as a path
field function. The returned function takes a single argument, an
Item, and returns a Unicode string. If the expression cannot be
compiled, then an error is logged and this function returns None.
"""
# First, try compiling as a single function.
try:
code = compile(f"({python_code})", "inline", "eval")
except SyntaxError:
# Fall back to a function body.
try:
func = _compile_func(python_code)
except SyntaxError:
self._log.error(
"syntax error in inline field definition:\n{}",
traceback.format_exc(),
)
return
else:
is_expr = False
else:
is_expr = True
def _dict_for(obj):
out = dict(obj)
if album:
out["items"] = list(obj.items())
return out
if is_expr:
# For expressions, just evaluate and return the result.
def _expr_func(obj):
values = _dict_for(obj)
try:
return eval(code, values)
except Exception as exc:
raise InlineError(python_code, exc)
return _expr_func
else:
# For function bodies, invoke the function with values as global
# variables.
def _func_func(obj):
old_globals = dict(func.__globals__)
func.__globals__.update(_dict_for(obj))
try:
return func()
except Exception as exc:
raise InlineError(python_code, exc)
finally:
func.__globals__.clear()
func.__globals__.update(old_globals)
return _func_func
beetbox-beets-c1877b7/beetsplug/ipfs.py 0000664 0000000 0000000 00000024173 15073551743 0020134 0 ustar 00root root 0000000 0000000 # This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds support for ipfs. Requires go-ipfs and a running ipfs daemon"""
import os
import shutil
import subprocess
import tempfile
from beets import config, library, ui, util
from beets.plugins import BeetsPlugin
class IPFSPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"auto": True,
"nocopy": False,
}
)
if self.config["auto"]:
self.import_stages = [self.auto_add]
def commands(self):
cmd = ui.Subcommand("ipfs", help="interact with ipfs")
cmd.parser.add_option(
"-a", "--add", dest="add", action="store_true", help="Add to ipfs"
)
cmd.parser.add_option(
"-g", "--get", dest="get", action="store_true", help="Get from ipfs"
)
cmd.parser.add_option(
"-p",
"--publish",
dest="publish",
action="store_true",
help="Publish local library to ipfs",
)
cmd.parser.add_option(
"-i",
"--import",
dest="_import",
action="store_true",
help="Import remote library from ipfs",
)
cmd.parser.add_option(
"-l",
"--list",
dest="_list",
action="store_true",
help="Query imported libraries",
)
cmd.parser.add_option(
"-m",
"--play",
dest="play",
action="store_true",
help="Play music from remote libraries",
)
def func(lib, opts, args):
if opts.add:
for album in lib.albums(args):
if len(album.items()) == 0:
self._log.info(
"{} does not contain items, aborting", album
)
self.ipfs_add(album)
album.store()
if opts.get:
self.ipfs_get(lib, args)
if opts.publish:
self.ipfs_publish(lib)
if opts._import:
self.ipfs_import(lib, args)
if opts._list:
self.ipfs_list(lib, args)
if opts.play:
self.ipfs_play(lib, opts, args)
cmd.func = func
return [cmd]
def auto_add(self, session, task):
if task.is_album:
if self.ipfs_add(task.album):
task.album.store()
def ipfs_play(self, lib, opts, args):
from beetsplug.play import PlayPlugin
jlib = self.get_remote_lib(lib)
player = PlayPlugin()
config["play"]["relative_to"] = None
player.album = True
player.play_music(jlib, player, args)
def ipfs_add(self, album):
try:
album_dir = album.item_dir()
except AttributeError:
return False
try:
if album.ipfs:
self._log.debug("{} already added", album_dir)
# Already added to ipfs
return False
except AttributeError:
pass
self._log.info("Adding {} to ipfs", album_dir)
if self.config["nocopy"]:
cmd = "ipfs add --nocopy -q -r".split()
else:
cmd = "ipfs add -q -r".split()
cmd.append(album_dir)
try:
output = util.command_output(cmd).stdout.split()
except (OSError, subprocess.CalledProcessError) as exc:
self._log.error("Failed to add {}, error: {}", album_dir, exc)
return False
length = len(output)
for linenr, line in enumerate(output):
line = line.strip()
if linenr == length - 1:
# last printed line is the album hash
self._log.info("album: {}", line)
album.ipfs = line
else:
try:
item = album.items()[linenr]
self._log.info("item: {}", line)
item.ipfs = line
item.store()
except IndexError:
# if there's non music files in the to-add folder they'll
# get ignored here
pass
return True
def ipfs_get(self, lib, query):
query = query[0]
# Check if query is a hash
# TODO: generalize to other hashes; probably use a multihash
# implementation
if query.startswith("Qm") and len(query) == 46:
self.ipfs_get_from_hash(lib, query)
else:
albums = self.query(lib, query)
for album in albums:
self.ipfs_get_from_hash(lib, album.ipfs)
def ipfs_get_from_hash(self, lib, _hash):
try:
cmd = "ipfs get".split()
cmd.append(_hash)
util.command_output(cmd)
except (OSError, subprocess.CalledProcessError) as err:
self._log.error(
"Failed to get {} from ipfs.\n{.output}", _hash, err
)
return False
self._log.info("Getting {} from ipfs", _hash)
imp = ui.commands.TerminalImportSession(
lib, loghandler=None, query=None, paths=[_hash]
)
imp.run()
# This uses a relative path, hence we cannot use util.syspath(_hash,
# prefix=True). However, that should be fine since the hash will not
# exceed MAX_PATH.
shutil.rmtree(util.syspath(_hash, prefix=False))
def ipfs_publish(self, lib):
with tempfile.NamedTemporaryFile() as tmp:
self.ipfs_added_albums(lib, tmp.name)
try:
if self.config["nocopy"]:
cmd = "ipfs add --nocopy -q ".split()
else:
cmd = "ipfs add -q ".split()
cmd.append(tmp.name)
output = util.command_output(cmd).stdout
except (OSError, subprocess.CalledProcessError) as err:
msg = f"Failed to publish library. Error: {err}"
self._log.error(msg)
return False
self._log.info("hash of library: {}", output)
def ipfs_import(self, lib, args):
_hash = args[0]
if len(args) > 1:
lib_name = args[1]
else:
lib_name = _hash
lib_root = os.path.dirname(lib.path)
remote_libs = os.path.join(lib_root, b"remotes")
if not os.path.exists(remote_libs):
try:
os.makedirs(remote_libs)
except OSError as e:
msg = f"Could not create {remote_libs}. Error: {e}"
self._log.error(msg)
return False
path = os.path.join(remote_libs, lib_name.encode() + b".db")
if not os.path.exists(path):
cmd = f"ipfs get {_hash} -o".split()
cmd.append(path)
try:
util.command_output(cmd)
except (OSError, subprocess.CalledProcessError):
self._log.error("Could not import {}", _hash)
return False
# add all albums from remotes into a combined library
jpath = os.path.join(remote_libs, b"joined.db")
jlib = library.Library(jpath)
nlib = library.Library(path)
for album in nlib.albums():
if not self.already_added(album, jlib):
new_album = []
for item in album.items():
item.id = None
new_album.append(item)
added_album = jlib.add_album(new_album)
added_album.ipfs = album.ipfs
added_album.store()
def already_added(self, check, jlib):
for jalbum in jlib.albums():
if jalbum.mb_albumid == check.mb_albumid:
return True
return False
def ipfs_list(self, lib, args):
fmt = config["format_album"].get()
try:
albums = self.query(lib, args)
except OSError:
ui.print_("No imported libraries yet.")
return
for album in albums:
ui.print_(format(album, fmt), " : ", album.ipfs.decode())
def query(self, lib, args):
rlib = self.get_remote_lib(lib)
albums = rlib.albums(args)
return albums
def get_remote_lib(self, lib):
lib_root = os.path.dirname(lib.path)
remote_libs = os.path.join(lib_root, b"remotes")
path = os.path.join(remote_libs, b"joined.db")
if not os.path.isfile(path):
raise OSError
return library.Library(path)
def ipfs_added_albums(self, rlib, tmpname):
"""Returns a new library with only albums/items added to ipfs"""
tmplib = library.Library(tmpname)
for album in rlib.albums():
try:
if album.ipfs:
self.create_new_album(album, tmplib)
except AttributeError:
pass
return tmplib
def create_new_album(self, album, tmplib):
items = []
for item in album.items():
try:
if not item.ipfs:
break
except AttributeError:
pass
item_path = os.fsdecode(os.path.basename(item.path))
# Clear current path from item
item.path = f"/ipfs/{album.ipfs}/{item_path}"
item.id = None
items.append(item)
if len(items) < 1:
return False
self._log.info("Adding '{}' to temporary library", album)
new_album = tmplib.add_album(items)
new_album.ipfs = album.ipfs
new_album.store(inherit=False)
beetbox-beets-c1877b7/beetsplug/keyfinder.py 0000664 0000000 0000000 00000006057 15073551743 0021154 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Uses the `KeyFinder` program to add the `initial_key` field."""
import os.path
import subprocess
from beets import ui, util
from beets.plugins import BeetsPlugin
class KeyFinderPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"bin": "KeyFinder",
"auto": True,
"overwrite": False,
}
)
if self.config["auto"].get(bool):
self.import_stages = [self.imported]
def commands(self):
cmd = ui.Subcommand(
"keyfinder", help="detect and add initial key from audio"
)
cmd.func = self.command
return [cmd]
def command(self, lib, opts, args):
self.find_key(lib.items(args), write=ui.should_write())
def imported(self, session, task):
self.find_key(task.imported_items())
def find_key(self, items, write=False):
overwrite = self.config["overwrite"].get(bool)
command = [self.config["bin"].as_str()]
# The KeyFinder GUI program needs the -f flag before the path.
# keyfinder-cli is similar, but just wants the path with no flag.
if "keyfinder-cli" not in os.path.basename(command[0]).lower():
command.append("-f")
for item in items:
if item["initial_key"] and not overwrite:
continue
try:
output = util.command_output(
command + [util.syspath(item.path)]
).stdout
except (subprocess.CalledProcessError, OSError) as exc:
self._log.error("execution failed: {}", exc)
continue
try:
key_raw = output.rsplit(None, 1)[-1]
except IndexError:
# Sometimes keyfinder-cli returns 0 but with no key, usually
# when the file is silent or corrupt, so we log and skip.
self._log.error("no key returned for path: {.path}", item)
continue
try:
key = key_raw.decode("utf-8")
except UnicodeDecodeError:
self._log.error("output is invalid UTF-8")
continue
item["initial_key"] = key
self._log.info(
"added computed initial key {} for {.filepath}", key, item
)
if write:
item.try_write()
item.store()
beetbox-beets-c1877b7/beetsplug/kodiupdate.py 0000664 0000000 0000000 00000006467 15073551743 0021332 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2017, Pauli Kettunen.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates a Kodi library whenever the beets library is changed.
This is based on the Plex Update plugin.
Put something like the following in your config.yaml to configure:
kodi:
host: localhost
port: 8080
user: user
pwd: secret
"""
import requests
from beets.plugins import BeetsPlugin
def update_kodi(host, port, user, password):
"""Sends request to the Kodi api to start a library refresh."""
url = f"http://{host}:{port}/jsonrpc"
"""Content-Type: application/json is mandatory
according to the kodi jsonrpc documentation"""
headers = {"Content-Type": "application/json"}
# Create the payload. Id seems to be mandatory.
payload = {"jsonrpc": "2.0", "method": "AudioLibrary.Scan", "id": 1}
r = requests.post(
url,
auth=(user, password),
json=payload,
headers=headers,
timeout=10,
)
return r
class KodiUpdate(BeetsPlugin):
def __init__(self):
super().__init__("kodi")
# Adding defaults.
self.config.add(
[{"host": "localhost", "port": 8080, "user": "kodi", "pwd": "kodi"}]
)
self.config["user"].redact = True
self.config["pwd"].redact = True
self.register_listener("database_change", self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update"""
self.register_listener("cli_exit", self.update)
def update(self, lib):
"""When the client exists try to send refresh request to Kodi server."""
self._log.info("Requesting a Kodi library update...")
kodi = self.config.get()
# Backwards compatibility in case not configured as an array
if not isinstance(kodi, list):
kodi = [kodi]
for instance in kodi:
# Try to send update request.
try:
r = update_kodi(
instance["host"],
instance["port"],
instance["user"],
instance["pwd"],
)
r.raise_for_status()
json = r.json()
if json.get("result") != "OK":
self._log.warning(
"Kodi update failed: JSON response was {0!r}", json
)
continue
self._log.info(
"Kodi update triggered for {}:{}",
instance["host"],
instance["port"],
)
except requests.exceptions.RequestException as e:
self._log.warning("Kodi update failed: {}", str(e))
continue
beetbox-beets-c1877b7/beetsplug/lastgenre/ 0000775 0000000 0000000 00000000000 15073551743 0020576 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beetsplug/lastgenre/__init__.py 0000664 0000000 0000000 00000057740 15073551743 0022724 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Gets genres for imported music based on Last.fm tags.
Uses a provided whitelist file to determine which tags are valid genres.
The included (default) genre list was originally produced by scraping Wikipedia
and has been edited to remove some questionable entries.
The scraper script used is available here:
https://gist.github.com/1241307
"""
import os
import traceback
from pathlib import Path
from typing import Union
import pylast
import yaml
from beets import config, library, plugins, ui
from beets.library import Album, Item
from beets.util import plurality, unique_list
LASTFM = pylast.LastFMNetwork(api_key=plugins.LASTFM_KEY)
PYLAST_EXCEPTIONS = (
pylast.WSError,
pylast.MalformedResponseError,
pylast.NetworkError,
)
# Canonicalization tree processing.
def flatten_tree(elem, path, branches):
"""Flatten nested lists/dictionaries into lists of strings
(branches).
"""
if not path:
path = []
if isinstance(elem, dict):
for k, v in elem.items():
flatten_tree(v, path + [k], branches)
elif isinstance(elem, list):
for sub in elem:
flatten_tree(sub, path, branches)
else:
branches.append(path + [str(elem)])
def find_parents(candidate, branches):
"""Find parents genre of a given genre, ordered from the closest to
the further parent.
"""
for branch in branches:
try:
idx = branch.index(candidate.lower())
return list(reversed(branch[: idx + 1]))
except ValueError:
continue
return [candidate]
# Main plugin logic.
WHITELIST = os.path.join(os.path.dirname(__file__), "genres.txt")
C14N_TREE = os.path.join(os.path.dirname(__file__), "genres-tree.yaml")
class LastGenrePlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"whitelist": True,
"min_weight": 10,
"count": 1,
"fallback": None,
"canonical": False,
"source": "album",
"force": False,
"keep_existing": False,
"auto": True,
"separator": ", ",
"prefer_specific": False,
"title_case": True,
"extended_debug": False,
}
)
self.setup()
def setup(self):
"""Setup plugin from config options"""
if self.config["auto"]:
self.import_stages = [self.imported]
self._genre_cache = {}
self.whitelist = self._load_whitelist()
self.c14n_branches, self.canonicalize = self._load_c14n_tree()
def _load_whitelist(self) -> set[str]:
"""Load the whitelist from a text file.
Default whitelist is used if config is True, empty string or set to "nothing".
"""
whitelist = set()
wl_filename = self.config["whitelist"].get()
if wl_filename in (True, "", None): # Indicates the default whitelist.
wl_filename = WHITELIST
if wl_filename:
self._log.debug("Loading whitelist {}", wl_filename)
text = Path(wl_filename).expanduser().read_text(encoding="utf-8")
for line in text.splitlines():
if (line := line.strip().lower()) and not line.startswith("#"):
whitelist.add(line)
return whitelist
def _load_c14n_tree(self) -> tuple[list[list[str]], bool]:
"""Load the canonicalization tree from a YAML file.
Default tree is used if config is True, empty string, set to "nothing"
or if prefer_specific is enabled.
"""
c14n_branches: list[list[str]] = []
c14n_filename = self.config["canonical"].get()
canonicalize = c14n_filename is not False
# Default tree
if c14n_filename in (True, "", None) or (
# prefer_specific requires a tree, load default tree
not canonicalize and self.config["prefer_specific"].get()
):
c14n_filename = C14N_TREE
# Read the tree
if c14n_filename:
self._log.debug("Loading canonicalization tree {}", c14n_filename)
with Path(c14n_filename).expanduser().open(encoding="utf-8") as f:
genres_tree = yaml.safe_load(f)
flatten_tree(genres_tree, [], c14n_branches)
return c14n_branches, canonicalize
@property
def sources(self) -> tuple[str, ...]:
"""A tuple of allowed genre sources. May contain 'track',
'album', or 'artist.'
"""
source = self.config["source"].as_choice(("track", "album", "artist"))
if source == "track":
return "track", "album", "artist"
if source == "album":
return "album", "artist"
if source == "artist":
return ("artist",)
return tuple()
# More canonicalization and general helpers.
def _get_depth(self, tag):
"""Find the depth of a tag in the genres tree."""
depth = None
for key, value in enumerate(self.c14n_branches):
if tag in value:
depth = value.index(tag)
break
return depth
def _sort_by_depth(self, tags):
"""Given a list of tags, sort the tags by their depths in the
genre tree.
"""
depth_tag_pairs = [(self._get_depth(t), t) for t in tags]
depth_tag_pairs = [e for e in depth_tag_pairs if e[0] is not None]
depth_tag_pairs.sort(reverse=True)
return [p[1] for p in depth_tag_pairs]
def _resolve_genres(self, tags: list[str]) -> list[str]:
"""Canonicalize, sort and filter a list of genres.
- Returns an empty list if the input tags list is empty.
- If canonicalization is enabled, it extends the list by incorporating
parent genres from the canonicalization tree. When a whitelist is set,
only parent tags that pass a validity check (_is_valid) are included;
otherwise, it adds the oldest ancestor. Adding parent tags is stopped
when the count of tags reaches the configured limit (count).
- The tags list is then deduplicated to ensure only unique genres are
retained.
- If the 'prefer_specific' configuration is enabled, the list is sorted
by the specificity (depth in the canonicalization tree) of the genres.
- Finally applies whitelist filtering to ensure that only valid
genres are kept. (This may result in no genres at all being retained).
- Returns the filtered list of genres, limited to the configured count.
"""
if not tags:
return []
count = self.config["count"].get(int)
# Canonicalization (if enabled)
if self.canonicalize:
# Extend the list to consider tags parents in the c14n tree
tags_all = []
for tag in tags:
# Add parents that are in the whitelist, or add the oldest
# ancestor if no whitelist
if self.whitelist:
parents = [
x
for x in find_parents(tag, self.c14n_branches)
if self._is_valid(x)
]
else:
parents = [find_parents(tag, self.c14n_branches)[-1]]
tags_all += parents
# Stop if we have enough tags already, unless we need to find
# the most specific tag (instead of the most popular).
if (
not self.config["prefer_specific"]
and len(tags_all) >= count
):
break
tags = tags_all
tags = unique_list(tags)
# Sort the tags by specificity.
if self.config["prefer_specific"]:
tags = self._sort_by_depth(tags)
# c14n only adds allowed genres but we may have had forbidden genres in
# the original tags list
valid_tags = [t for t in tags if self._is_valid(t)]
return valid_tags[:count]
def fetch_genre(self, lastfm_obj):
"""Return the genre for a pylast entity or None if no suitable genre
can be found. Ex. 'Electronic, House, Dance'
"""
min_weight = self.config["min_weight"].get(int)
return self._tags_for(lastfm_obj, min_weight)
def _is_valid(self, genre: str) -> bool:
"""Check if the genre is valid.
Depending on the whitelist property, valid means a genre is in the
whitelist or any genre is allowed.
"""
if genre and (not self.whitelist or genre.lower() in self.whitelist):
return True
return False
# Cached last.fm entity lookups.
def _last_lookup(self, entity, method, *args):
"""Get a genre based on the named entity using the callable `method`
whose arguments are given in the sequence `args`. The genre lookup
is cached based on the entity name and the arguments.
Before the lookup, each argument has the "-" Unicode character replaced
with its rough ASCII equivalents in order to return better results from
the Last.fm database.
"""
# Shortcut if we're missing metadata.
if any(not s for s in args):
return []
key = f"{entity}.{'-'.join(str(a) for a in args)}"
if key not in self._genre_cache:
args = [a.replace("\u2010", "-") for a in args]
self._genre_cache[key] = self.fetch_genre(method(*args))
genre = self._genre_cache[key]
if self.config["extended_debug"]:
self._log.debug("last.fm (unfiltered) {} tags: {}", entity, genre)
return genre
def fetch_album_genre(self, obj):
"""Return raw album genres from Last.fm for this Item or Album."""
return self._last_lookup(
"album", LASTFM.get_album, obj.albumartist, obj.album
)
def fetch_album_artist_genre(self, obj):
"""Return raw album artist genres from Last.fm for this Item or Album."""
return self._last_lookup("artist", LASTFM.get_artist, obj.albumartist)
def fetch_artist_genre(self, item):
"""Returns raw track artist genres from Last.fm for this Item."""
return self._last_lookup("artist", LASTFM.get_artist, item.artist)
def fetch_track_genre(self, obj):
"""Returns raw track genres from Last.fm for this Item."""
return self._last_lookup(
"track", LASTFM.get_track, obj.artist, obj.title
)
# Main processing: _get_genre() and helpers.
def _format_and_stringify(self, tags: list[str]) -> str:
"""Format to title_case if configured and return as delimited string."""
if self.config["title_case"]:
formatted = [tag.title() for tag in tags]
else:
formatted = tags
return self.config["separator"].as_str().join(formatted)
def _get_existing_genres(self, obj: Union[Album, Item]) -> list[str]:
"""Return a list of genres for this Item or Album. Empty string genres
are removed."""
separator = self.config["separator"].get()
if isinstance(obj, library.Item):
item_genre = obj.get("genre", with_album=False).split(separator)
else:
item_genre = obj.get("genre").split(separator)
# Filter out empty strings
return [g for g in item_genre if g]
def _combine_resolve_and_log(
self, old: list[str], new: list[str]
) -> list[str]:
"""Combine old and new genres and process via _resolve_genres."""
self._log.debug("raw last.fm tags: {}", new)
self._log.debug("existing genres taken into account: {}", old)
combined = old + new
return self._resolve_genres(combined)
def _get_genre(
self, obj: Union[Album, Item]
) -> tuple[Union[str, None], ...]:
"""Get the final genre string for an Album or Item object.
`self.sources` specifies allowed genre sources. Starting with the first
source in this tuple, the following stages run through until a genre is
found or no options are left:
- track (for Items only)
- album
- artist, albumartist or "most popular track genre" (for VA-albums)
- original fallback
- configured fallback
- None
A `(genre, label)` pair is returned, where `label` is a string used for
logging. For example, "keep + artist, whitelist" indicates that existing
genres were combined with new last.fm genres and whitelist filtering was
applied, while "artist, any" means only new last.fm genres are included
and the whitelist feature was disabled.
"""
def _try_resolve_stage(stage_label: str, keep_genres, new_genres):
"""Try to resolve genres for a given stage and log the result."""
resolved_genres = self._combine_resolve_and_log(
keep_genres, new_genres
)
if resolved_genres:
suffix = "whitelist" if self.whitelist else "any"
label = f"{stage_label}, {suffix}"
if keep_genres:
label = f"keep + {label}"
return self._format_and_stringify(resolved_genres), label
return None
keep_genres = []
new_genres = []
genres = self._get_existing_genres(obj)
if genres and not self.config["force"]:
# Without force pre-populated tags are returned as-is.
label = "keep any, no-force"
if isinstance(obj, library.Item):
return obj.get("genre", with_album=False), label
return obj.get("genre"), label
if self.config["force"]:
# Force doesn't keep any unless keep_existing is set.
# Whitelist validation is handled in _resolve_genres.
if self.config["keep_existing"]:
keep_genres = [g.lower() for g in genres]
# Run through stages: track, album, artist,
# album artist, or most popular track genre.
if isinstance(obj, library.Item) and "track" in self.sources:
if new_genres := self.fetch_track_genre(obj):
if result := _try_resolve_stage(
"track", keep_genres, new_genres
):
return result
if "album" in self.sources:
if new_genres := self.fetch_album_genre(obj):
if result := _try_resolve_stage(
"album", keep_genres, new_genres
):
return result
if "artist" in self.sources:
new_genres = []
if isinstance(obj, library.Item):
new_genres = self.fetch_artist_genre(obj)
stage_label = "artist"
elif obj.albumartist != config["va_name"].as_str():
new_genres = self.fetch_album_artist_genre(obj)
stage_label = "album artist"
else:
# For "Various Artists", pick the most popular track genre.
item_genres = []
for item in obj.items():
item_genre = None
if "track" in self.sources:
item_genre = self.fetch_track_genre(item)
if not item_genre:
item_genre = self.fetch_artist_genre(item)
if item_genre:
item_genres += item_genre
if item_genres:
most_popular, rank = plurality(item_genres)
new_genres = [most_popular]
stage_label = "most popular track"
self._log.debug(
'Most popular track genre "{}" ({}) for VA album.',
most_popular,
rank,
)
if new_genres:
if result := _try_resolve_stage(
stage_label, keep_genres, new_genres
):
return result
# Nothing found, leave original if configured and valid.
if obj.genre and self.config["keep_existing"]:
if not self.whitelist or self._is_valid(obj.genre.lower()):
return obj.genre, "original fallback"
# Return fallback string.
if fallback := self.config["fallback"].get():
return fallback, "fallback"
# No fallback configured.
return None, "fallback unconfigured"
# Beets plugin hooks and CLI.
def commands(self):
lastgenre_cmd = ui.Subcommand("lastgenre", help="fetch genres")
lastgenre_cmd.parser.add_option(
"-p",
"--pretend",
action="store_true",
help="show actions but do nothing",
)
lastgenre_cmd.parser.add_option(
"-f",
"--force",
dest="force",
action="store_true",
help="modify existing genres",
)
lastgenre_cmd.parser.add_option(
"-F",
"--no-force",
dest="force",
action="store_false",
help="don't modify existing genres",
)
lastgenre_cmd.parser.add_option(
"-k",
"--keep-existing",
dest="keep_existing",
action="store_true",
help="combine with existing genres when modifying",
)
lastgenre_cmd.parser.add_option(
"-K",
"--no-keep-existing",
dest="keep_existing",
action="store_false",
help="don't combine with existing genres when modifying",
)
lastgenre_cmd.parser.add_option(
"-s",
"--source",
dest="source",
type="string",
help="genre source: artist, album, or track",
)
lastgenre_cmd.parser.add_option(
"-A",
"--items",
action="store_false",
dest="album",
help="match items instead of albums",
)
lastgenre_cmd.parser.add_option(
"-a",
"--albums",
action="store_true",
dest="album",
help="match albums instead of items (default)",
)
lastgenre_cmd.parser.add_option(
"-d",
"--debug",
action="store_true",
dest="extended_debug",
help="extended last.fm debug logging",
)
lastgenre_cmd.parser.set_defaults(album=True)
def lastgenre_func(lib, opts, args):
write = ui.should_write()
pretend = getattr(opts, "pretend", False)
self.config.set_args(opts)
if opts.album:
# Fetch genres for whole albums
for album in lib.albums(args):
album_genre, src = self._get_genre(album)
prefix = "Pretend: " if pretend else ""
self._log.info(
'{}genre for album "{.album}" ({}): {}',
prefix,
album,
src,
album_genre,
)
if not pretend:
album.genre = album_genre
if "track" in self.sources:
album.store(inherit=False)
else:
album.store()
for item in album.items():
# If we're using track-level sources, also look up each
# track on the album.
if "track" in self.sources:
item_genre, src = self._get_genre(item)
self._log.info(
'{}genre for track "{.title}" ({}): {}',
prefix,
item,
src,
item_genre,
)
if not pretend:
item.genre = item_genre
item.store()
if write and not pretend:
item.try_write()
else:
# Just query singletons, i.e. items that are not part of
# an album
for item in lib.items(args):
item_genre, src = self._get_genre(item)
prefix = "Pretend: " if pretend else ""
self._log.info(
'{}genre for track "{0.title}" ({1}): {}',
prefix,
item,
src,
item_genre,
)
if not pretend:
item.genre = item_genre
item.store()
if write and not pretend:
item.try_write()
lastgenre_cmd.func = lastgenre_func
return [lastgenre_cmd]
def imported(self, session, task):
"""Event hook called when an import task finishes."""
if task.is_album:
album = task.album
album.genre, src = self._get_genre(album)
self._log.debug(
'genre for album "{0.album}" ({1}): {0.genre}', album, src
)
# If we're using track-level sources, store the album genre only,
# then also look up individual track genres.
if "track" in self.sources:
album.store(inherit=False)
for item in album.items():
item.genre, src = self._get_genre(item)
self._log.debug(
'genre for track "{0.title}" ({1}): {0.genre}',
item,
src,
)
item.store()
# Store the album genre and inherit to tracks.
else:
album.store()
else:
item = task.item
item.genre, src = self._get_genre(item)
self._log.debug(
'genre for track "{0.title}" ({1}): {0.genre}', item, src
)
item.store()
def _tags_for(self, obj, min_weight=None):
"""Core genre identification routine.
Given a pylast entity (album or track), return a list of
tag names for that entity. Return an empty list if the entity is
not found or another error occurs.
If `min_weight` is specified, tags are filtered by weight.
"""
# Work around an inconsistency in pylast where
# Album.get_top_tags() does not return TopItem instances.
# https://github.com/pylast/pylast/issues/86
if isinstance(obj, pylast.Album):
obj = super(pylast.Album, obj)
try:
res = obj.get_top_tags()
except PYLAST_EXCEPTIONS as exc:
self._log.debug("last.fm error: {}", exc)
return []
except Exception as exc:
# Isolate bugs in pylast.
self._log.debug("{}", traceback.format_exc())
self._log.error("error in pylast library: {}", exc)
return []
# Filter by weight (optionally).
if min_weight:
res = [el for el in res if (int(el.weight or 0)) >= min_weight]
# Get strings from tags.
res = [el.item.get_name().lower() for el in res]
return res
beetbox-beets-c1877b7/beetsplug/lastgenre/genres-tree.yaml 0000664 0000000 0000000 00000037460 15073551743 0023714 0 ustar 00root root 0000000 0000000 - african:
- african heavy metal
- african hip hop
- afrobeat
- apala
- benga
- bikutsi
- bongo flava
- cape jazz
- chimurenga
- coupé-décalé
- egyptian
- fuji music
- genge
- highlife
- hiplife
- isicathamiya
- jit
- jĂąjĂş
- kapuka
- kizomba
- kuduro
- kwaito
- kwela
- makossa
- maloya
- marrabenta
- mbalax
- mbaqanga
- mbube
- morna
- museve
- palm-wine
- raĂŻ
- sakara
- sega
- seggae
- semba
- shangaan electro
- soukous
- taarab
- zouglou
- asian:
- east asian:
- anison
- c-pop
- cantopop
- enka
- hong kong english pop
- j-pop
- k-pop
- kayĹŤkyoku
- korean pop
- mandopop
- onkyokei
- taiwanese pop
- fann at-tanbura
- fijiri
- khaliji
- liwa
- sawt
- south and southeast asian:
- baila
- bhangra
- bhojpuri
- dangdut
- filmi
- indian pop
- lavani
- luk thung:
- luk krung
- manila sound
- morlam
- pinoy pop
- pop sunda
- ragini
- thai pop
- avant-garde:
- experimental music
- lo-fi
- musique concrète
- blues:
- african blues
- blues rock
- blues shouter
- british blues
- canadian blues
- chicago blues
- classic female blues
- contemporary r&b
- country blues
- delta blues
- detroit blues
- electric blues
- gospel blues
- hill country blues
- hokum blues
- jazz blues
- jump blues
- kansas city blues
- louisiana blues
- memphis blues
- piano blues
- piedmont blues
- punk blues
- soul blues
- st. louis blues
- swamp blues
- texas blues
- west coast blues
- caribbean and latin american:
- bachata
- baithak gana
- bolero
- brazilian:
- axé
- bossa nova
- brazilian rock
- brega
- choro
- forrĂł
- frevo
- funk carioca
- lambada
- maracatu
- mĂşsica popular brasileira
- mĂşsica sertaneja
- pagode
- samba
- samba rock
- tecnobrega
- tropicalia
- zouk-lambada
- calypso
- chutney
- chutney soca
- compas
- folklore argentino
- mambo
- merengue
- méringue
- other latin:
- chicha
- criolla
- cumbia
- huayno
- mariachi
- ranchera
- tejano
- punta
- punta rock
- rasin
- reggaeton
- salsa
- soca
- son
- timba
- twoubadou
- zouk
- classical:
- ballet
- baroque:
- baroque music
- cantata
- chamber music:
- string quartet
- classical music
- concerto:
- concerto grosso
- contemporary classical
- modern classical
- opera
- oratorio
- orchestra:
- orchestral
- symphonic
- symphony
- organum
- mass:
- requiem
- sacred music:
- cantique
- gregorian chant
- sonata
- comedy:
- comedy music
- comedy rock
- humor
- parody music
- stand-up
- kabarett
- country:
- alternative country:
- cowpunk
- americana
- australian country music
- bakersfield sound
- bluegrass:
- progressive bluegrass
- reactionary bluegrass
- blues country
- cajun:
- cajun fiddle tunes
- christian country music
- classic country
- close harmony
- country pop
- country rap
- country rock
- country soul
- cowboy/western music
- dansband music
- franco-country
- gulf and western
- hellbilly music
- hokum
- honky tonk
- instrumental country
- lubbock sound
- nashville sound
- neotraditional country
- outlaw country
- progressive country
- psychobilly/punkabilly
- red dirt
- rockabilly
- sertanejo
- texas country
- traditional country music
- truck-driving country
- western swing
- zydeco
- easy listening:
- background music
- beautiful music
- elevator music
- furniture music
- lounge music
- middle of the road
- new-age music
- electronic:
- ambient:
- ambient dub
- ambient house
- ambient techno
- dark ambient
- drone music
- illbient
- isolationism
- lowercase
- asian underground
- breakbeat:
- 4-beat
- acid breaks
- baltimore club
- big beat
- broken beat
- florida breaks
- nu skool breaks
- chiptune:
- bitpop
- game boy music
- nintendocore
- video game music
- yorkshire bleeps and bass
- disco:
- cosmic disco
- disco polo
- euro disco
- italo disco
- nu-disco
- space disco
- downtempo:
- acid jazz
- balearic beat
- chill out
- dub music
- dubtronica
- ethnic electronica
- moombahton
- nu jazz
- trip hop
- drum and bass:
- darkcore
- darkstep
- drumfunk
- drumstep
- hardstep
- intelligent drum and bass
- jump-up
- liquid funk
- neurofunk
- jungle:
- darkside jungle
- ragga jungle
- oldschool jungle
- raggacore
- sambass
- techstep
- leftfield
- halftime
- electro:
- crunk
- electro backbeat
- electro-grime
- electropop
- electroacoustic:
- acousmatic music
- computer music
- electroacoustic improvisation
- field recording
- live coding
- live electronics
- soundscape composition
- tape music
- electronic rock:
- alternative dance:
- baggy
- madchester
- dance-punk
- dance-rock
- dark wave
- electroclash
- electronicore
- electropunk
- ethereal wave
- indietronica
- new rave
- space rock
- synthpop
- synthpunk
- electronica:
- berlin school
- chillwave
- electronic art music
- electronic dance music
- folktronica
- freestyle music
- glitch
- idm
- laptronica
- skweee
- sound art
- synthcore
- eurodance:
- bubblegum dance
- italo dance
- turbofolk
- hardcore:
- bouncy house
- bouncy techno
- breakbeat hardcore
- breakcore
- digital hardcore
- doomcore
- dubstyle
- gabber
- happy hardcore
- hardstyle
- jumpstyle
- makina
- speedcore
- terrorcore
- uk hardcore
- hi-nrg:
- eurobeat
- hard nrg
- new beat
- house:
- acid house
- chicago house
- deep house
- diva house
- dutch house
- electro house
- freestyle house
- french house
- funky house
- ghetto house
- hardbag
- hip house
- italo house
- latin house
- minimal house
- progressive house
- rave music
- swing house
- tech house
- tribal house
- uk hard house
- us garage
- vocal house
- industrial:
- aggrotech
- coldwave
- cybergrind
- dark electro
- death industrial
- electro-industrial
- electronic body music:
- futurepop
- industrial metal:
- neue deutsche härte
- industrial rock
- noise:
- japanoise
- power electronics
- power noise
- witch house
- juke:
- footwork
- post-disco:
- boogie
- dance-pop
- progressive:
- progressive house/trance:
- disco house
- dream house
- space house
- progressive breaks
- progressive drum & bass
- progressive techno
- techno:
- acid techno
- detroit techno
- dub techno
- free tekno
- ghettotech
- minimal
- nortec
- schranz
- techno-dnb
- technopop
- tecno brega
- toytown techno
- trance:
- acid trance
- classic trance
- dream trance
- goa trance:
- dark psytrance
- full on
- psybreaks
- psyprog
- suomisaundi
- hard trance
- tech trance
- uplifting trance:
- orchestral uplifting
- vocal trance
- uk garage:
- 2-step
- 4x4
- bassline
- breakstep
- dubstep
- funky
- grime
- speed garage
- trap
- folk:
- american folk revival
- anti-folk
- british folk revival
- celtic music
- contemporary folk
- filk music
- freak folk
- indie folk
- industrial folk
- neofolk
- progressive folk
- psychedelic folk
- sung poetry
- techno-folk
- hip hop:
- alternative hip hop
- avant-garde hip hop
- chap hop
- christian hip hop
- conscious hip hop
- crunkcore
- cumbia rap
- east coast hip hop:
- brick city club
- hardcore hip hop
- mafioso rap
- new jersey hip hop
- electro music
- freestyle rap
- g-funk
- gangsta rap
- glitch hop
- golden age hip hop
- hip hop soul
- hip pop
- hyphy
- industrial hip hop
- instrumental hip hop
- jazz rap
- low bap
- lyrical hip hop
- merenrap
- midwest hip hop:
- chicago hip hop
- detroit hip hop
- horrorcore
- st. louis hip hop
- twin cities hip hop
- motswako
- nerdcore
- new jack swing
- new school hip hop
- old school hip hop
- political hip hop
- rap opera
- rap rock:
- rap metal
- rapcore
- songo-salsa
- southern hip hop:
- atlanta hip hop:
- snap music
- bounce music
- houston hip hop:
- chopped and screwed
- miami bass
- turntablism
- underground hip hop
- urban pasifika
- west coast hip hop:
- chicano rap
- jerkin'
- austrian hip hop
- german hip hop
- jazz:
- asian american jazz
- avant-garde jazz
- bebop
- boogie-woogie
- brass band
- british dance band
- chamber jazz
- continental jazz
- cool jazz
- crossover jazz
- cubop
- dixieland
- ethno jazz
- european free jazz
- free funk
- free improvisation
- free jazz
- gypsy jazz
- hard bop
- jazz fusion
- jazz rock
- jazz-funk
- kansas city jazz
- latin jazz
- livetronica
- m-base
- mainstream jazz
- modal jazz
- neo-bop jazz
- neo-swing
- novelty ragtime
- orchestral jazz
- post-bop
- punk jazz
- ragtime
- shibuya-kei
- ska jazz
- smooth jazz
- soul jazz
- straight-ahead jazz
- stride jazz
- swing
- third stream
- trad jazz
- vocal jazz
- west coast gypsy jazz
- west coast jazz
- kids music:
- kinderlieder
- pop:
- adult contemporary
- arab pop
- baroque pop
- bubblegum pop
- christian pop
- classical crossover
- europop:
- austropop
- balkan pop
- french pop
- latin pop
- laĂŻkĂł
- nederpop
- russian pop
- iranian pop
- jangle pop
- latin ballad
- levenslied
- louisiana swamp pop
- mexican pop
- motorpop
- new romanticism
- pop rap
- popera
- psychedelic pop
- schlager
- soft rock
- sophisti-pop
- space age pop
- sunshine pop
- surf pop
- teen pop
- traditional pop music
- turkish pop
- vispop
- wonky pop
- rhythm and blues:
- funk:
- deep funk
- go-go
- p-funk
- soul:
- blue-eyed soul
- neo soul
- northern soul
- rock:
- alternative rock:
- britpop:
- post-britpop
- dream pop
- grunge:
- post-grunge
- indie pop:
- dunedin sound
- twee pop
- indie rock
- noise pop
- nu metal
- post-punk revival
- post-rock:
- post-metal
- sadcore
- shoegaze
- slowcore
- art rock
- beat music
- chinese rock
- christian rock
- classic rock
- dark cabaret
- desert rock
- experimental rock
- folk rock
- garage rock
- glam rock
- hard rock
- heavy metal:
- alternative metal:
- funk metal
- black metal:
- viking metal
- christian metal
- death metal:
- death/doom
- goregrind
- melodic death metal
- technical death metal
- doom metal:
- epic doom metal
- funeral doom
- drone metal
- epic metal
- folk metal:
- celtic metal
- medieval metal
- pagan metal
- funk metal
- glam metal
- gothic metal
- industrial metal:
- industrial death metal
- metalcore:
- deathcore
- mathcore:
- djent
- synthcore
- neoclassical metal
- post-metal
- power metal:
- progressive power metal
- progressive metal
- sludge metal
- speed metal
- stoner rock:
- stoner metal
- symphonic metal
- thrash metal:
- crossover thrash
- groove metal
- progressive thrash metal
- teutonic thrash metal
- traditional heavy metal
- math rock
- new wave:
- world fusion
- paisley underground
- pop rock
- post-punk:
- gothic rock
- no wave
- noise rock
- power pop
- progressive rock:
- canterbury scene
- krautrock
- new prog
- rock in opposition
- psychedelic rock:
- acid rock
- freakbeat
- neo-psychedelia
- raga rock
- punk rock:
- anarcho punk:
- crust punk:
- d-beat
- art punk
- christian punk
- deathrock
- deutschpunk
- folk punk:
- celtic punk
- gypsy punk
- garage punk
- grindcore:
- crustgrind
- noisegrind
- hardcore punk:
- post-hardcore:
- emo:
- screamo
- powerviolence
- street punk
- thrashcore
- horror punk
- oi!
- pop punk
- psychobilly
- riot grrrl
- ska punk:
- ska-core
- skate punk
- rock and roll
- southern rock
- sufi rock
- surf rock
- visual kei:
- nagoya kei
- reggae:
- roots reggae
- reggae fusion
- reggae en español:
- spanish reggae
- reggae 110
- reggae bultrĂłn
- romantic flow
- lovers rock
- raggamuffin:
- ragga
- dancehall
- ska:
- 2 tone
- rocksteady
- dub
- soundtrack:
- singer-songwriter:
- cantautorato
- cantautor
- cantautora
- chanson
- canciĂłn de autor
- nueva canciĂłn
- world:
- world dub
- world fusion
- worldbeat
beetbox-beets-c1877b7/beetsplug/lastgenre/genres.txt 0000664 0000000 0000000 00000041736 15073551743 0022635 0 ustar 00root root 0000000 0000000 2 tone
2-step garage
4-beat
4x4 garage
8-bit
acapella
acid
acid breaks
acid house
acid jazz
acid rock
acoustic music
acousticana
adult contemporary music
african popular music
african rumba
afrobeat
aleatoric music
alternative country
alternative dance
alternative hip hop
alternative metal
alternative rock
ambient
ambient house
ambient music
americana
anarcho punk
anti-folk
apala
ape haters
arab pop
arabesque
arabic pop
argentine rock
ars antiqua
ars nova
art punk
art rock
ashiq
asian american jazz
australian country music
australian hip hop
australian pub rock
austropop
avant-garde
avant-garde jazz
avant-garde metal
avant-garde music
axé
bac-bal
bachata
baggy
baila
baile funk
baisha xiyue
baithak gana
baiĂŁo
bajourou
bakersfield sound
bakou
bakshy
bal-musette
balakadri
balinese gamelan
balkan pop
ballad
ballata
ballet
bamboo band
bambuco
banda
bangsawan
bantowbol
barbershop music
barndance
baroque
baroque music
baroque pop
bass music
batcave
batucada
batuco
batá-rumba
beach music
beat
beatboxing
beautiful music
bebop
beiguan
bel canto
bend-skin
benga
berlin school of electronic music
bhajan
bhangra
bhangra-wine
bhangragga
bhangramuffin
big band
big band music
big beat
biguine
bihu
bikutsi
biomusic
bitcore
bitpop
black metal
blackened death metal
blue-eyed soul
bluegrass
blues
blues ballad
blues-rock
boogie
boogie woogie
boogie-woogie
bossa nova
brass band
brazilian funk
brazilian jazz
breakbeat
breakbeat hardcore
breakcore
breton music
brill building pop
britfunk
british blues
british invasion
britpop
broken beat
brown-eyed soul
brukdown
brutal death metal
bubblegum dance
bubblegum pop
bulerias
bumba-meu-boi
bunraku
burger-highlife
burgundian school
byzantine chant
ca din tulnic
ca pe lunca
ca trĂą
cabaret
cadence
cadence rampa
cadence-lypso
café-aman
cai luong
cajun music
cakewalk
calenda
calentanos
calgia
calypso
calypso jazz
calypso-style baila
campursari
canatronic
canciĂłn de autor
candombe
canon
canrock
cantata
cantautorato
cantautor
cantautora
cante chico
cante jondo
canterbury scene
cantiga
cantique
cantiñas
canto livre
canto nuevo
canto popular
cantopop
canzone napoletana
cape jazz
capoeira music
caracoles
carceleras
cardas
cardiowave
carimbĂł
cariso
carnatic music
carol
cartageneras
cassette culture
casséy-co
cavacha
caveman
caña
celempungan
cello rock
celtic
celtic fusion
celtic metal
celtic punk
celtic reggae
celtic rock
cha-cha-cha
chakacha
chalga
chamamé
chamber jazz
chamber music
chamber pop
champeta
changuĂ
chanson
chant
charanga
charanga-vallenata
charikawi
chastushki
chau van
chemical breaks
chicago blues
chicago house
chicago soul
chicano rap
chicha
chicken scratch
children's music
chillout
chillwave
chimurenga
chinese music
chinese pop
chinese rock
chip music
cho-kantrum
chongak
chopera
chorinho
choro
chouval bwa
chowtal
christian alternative
christian black metal
christian electronic music
christian hardcore
christian hip hop
christian industrial
christian metal
christian music
christian punk
christian r&b
christian rock
christian ska
christmas carol
christmas music
chumba
chut-kai-pang
chutney
chutney soca
chutney-bhangra
chutney-hip hop
chutney-soca
chylandyk
chzalni
chèo
cigányzene
classic
classic country
classic female blues
classic rock
classical
classical music
classical music era
clicks n cuts
close harmony
club music
cocobale
coimbra fado
coladeira
colombianas
combined rhythm
comedy
comedy rap
comedy rock
comic opera
comparsa
compas direct
compas meringue
concert overture
concerto
concerto grosso
congo
conjunto
contemporary christian
contemporary christian music
contemporary classical
contemporary r&b
contonbley
contradanza
cool jazz
corrido
corsican polyphonic song
cothoza mfana
country
country blues
country gospel
country music
country pop
country r&b
country rock
country-rap
countrypolitan
couple de sonneurs
coupé-décalé
cowpunk
cretan music
crossover jazz
crossover music
crossover thrash
crossover thrash metal
crunk
crunk&b
crunkcore
crust punk
csárdás
cuarteto
cuban rumba
cuddlecore
cueca
cumbia
cumbia villera
cybergrind
dabka
dadra
daina
dalauna
dance
dance music
dance-pop
dance-punk
dance-rock
dancehall
dangdut
danger music
dansband
danza
danzĂłn
dark ambient
dark cabaret
dark pop
darkcore
darkstep
darkwave
de ascultat la servici
de codru
de dragoste
de jale
de pahar
death industrial
death metal
death rock
death/doom
deathcore
deathgrind
deathrock
deep funk
deep house
deep soul
degung
delta blues
dementia
desert rock
desi
detroit blues
detroit techno
dub techno
dhamar
dhimotiká
dhrupad
dhun
digital hardcore
dirge
dirty dutch
dirty rap
dirty rap/pornocore
dirty south
disco
disco house
disco polo
disney
disney hardcore
disney pop
diva house
divine rock
dixieland
dixieland jazz
djambadon
djent
dodompa
doina
dombola
dondang sayang
donegal fiddle tradition
dongjing
doo wop
doom metal
doomcore
downtempo
drag
dream pop
drone doom
drone metal
drone music
dronology
drum and bass
dub
dub house
dubanguthu
dubstep
dubtronica
dunedin sound
dunun
dutch jazz
décima
early music
east coast blues
east coast hip hop
easy listening
electric blues
electric folk
electro
electro backbeat
electro hop
electro house
electro punk
electro-industrial
electro-swing
electroclash
electrofunk
electronic
electronic art music
electronic body music
electronic dance
electronic luk thung
electronic music
electronic rock
electronica
electropop
elevator music
emo
emo pop
emo rap
emocore
emotronic
enka
epic doom metal
epic metal
eremwu eu
ethereal pop
ethereal wave
euro
euro disco
eurobeat
eurodance
europop
eurotrance
eurourban
exotica
experimental music
experimental noise
experimental pop
experimental rock
extreme metal
ezengileer
fado
falak
fandango
farruca
fife and drum blues
filk
film score
filmi
filmi-ghazal
finger-style
fjatpangarri
flamenco
flamenco rumba
flower power
foaie verde
fofa
folk hop
folk metal
folk music
folk pop
folk punk
folk rock
folktronica
forrĂł
franco-country
freak-folk
freakbeat
free improvisation
free jazz
free music
freestyle
freestyle house
freetekno
french pop
frenchcore
frevo
fricote
fuji
fuji music
fulia
full on
funaná
funeral doom
funk
funk metal
funk rock
funkcore
funky house
furniture music
fusion jazz
g-funk
gaana
gabba
gabber
gagaku
gaikyoku
gaita
galant
gamad
gambang kromong
gamelan
gamelan angklung
gamelan bang
gamelan bebonangan
gamelan buh
gamelan degung
gamelan gede
gamelan kebyar
gamelan salendro
gamelan selunding
gamelan semar pegulingan
gamewave
gammeldans
gandrung
gangsta rap
gar
garage rock
garrotin
gavotte
gelugpa chanting
gender wayang
gending
german folk music
gharbi
gharnati
ghazal
ghazal-song
ghetto house
ghettotech
girl group
glam metal
glam punk
glam rock
glitch
gnawa
go-go
goa
goa trance
gong-chime music
goombay
goregrind
goshu ondo
gospel music
gothic metal
gothic rock
granadinas
grebo
gregorian chant
grime
grindcore
groove metal
group sounds
grunge
grupera
guaguanbo
guajira
guasca
guitarra baiana
guitarradas
gumbe
gunchei
gunka
guoyue
gwo ka
gwo ka moderne
gypsy jazz
gypsy punk
gypsybilly
gyu ke
habanera
hajnali
hakka
halling
hambo
hands up
hapa haole
happy hardcore
haqibah
hard
hard bop
hard house
hard rock
hard trance
hardcore hip hop
hardcore metal
hardcore punk
hardcore techno
hardstyle
harepa
harmonica blues
hasaposérviko
heart attack
heartland rock
heavy beat
heavy metal
hesher
hi-nrg
highlands
highlife
highlife fusion
hillybilly music
hindustani classical music
hip hop
hip hop & rap
hip hop soul
hip house
hiplife
hiragasy
hiva usu
hong kong and cantonese pop
hong kong english pop
honky tonk
honkyoku
hora lunga
hornpipe
horror punk
horrorcore
horrorcore rap
house
house music
hua'er
huasteco
huayno
hula
humor
humppa
hunguhungu
hyangak
hymn
hyphy
hát chau van
hát chèo
hát cãi luong
hát tuồng
ibiza music
icaro
idm
igbo music
ijexá
ilahije
illbient
impressionist music
improvisational
incidental music
indian pop
indie folk
indie music
indie pop
indie rock
indietronica
indo jazz
indo rock
indonesian pop
indoyĂftika
industrial death metal
industrial hip hop
industrial metal
industrial music
industrial musical
industrial rock
instrumental rock
intelligent dance music
international latin
inuit music
iranian pop
irish folk
irish rebel music
iscathamiya
isicathamiya
isikhwela jo
island
isolationist
italo dance
italo disco
italo house
itsmeños
izvorna bosanska muzika
j'ouvert
j-fusion
j-pop
j-rock
jaipongan
jaliscienses
jam band
jam rock
jamana kura
jamrieng samai
jangle pop
japanese pop
jarana
jariang
jarochos
jawaiian
jazz
jazz blues
jazz fusion
jazz metal
jazz rap
jazz-funk
jazz-rock
jegog
jenkka
jesus music
jibaro
jig
jig punk
jing ping
jingle
jit
jitterbug
jive
joged
joged bumbung
joik
jonnycore
joropo
jota
jtek
jug band
jujitsu
juju
juke joint blues
jump blues
jumpstyle
jungle
junkanoo
juré
jĂąjĂş
k-pop
kaba
kabuki
kachÄshÄ«
kadans
kagok
kagyupa chanting
kaiso
kalamatianĂł
kalattuut
kalinda
kamba pop
kan ha diskan
kansas city blues
kantrum
kantádhes
kargyraa
karma
kaseko
katajjaq
kawachi ondo
kayĹŤkyoku
ke-kwe
kebyar
kecak
kecapi suling
kertok
khaleeji
khap
khelimaski djili
khene
khoomei
khorovodi
khplam wai
khrung sai
khyal
kilapanda
kinko
kirtan
kiwi rock
kizomba
klape
klasik
klezmer
kliningan
kléftiko
kochare
kolomyjka
komagaku
kompa
konpa
korean pop
koumpaneia
kpanlogo
krakowiak
krautrock
kriti
kroncong
krump
krzesany
kuduro
kulintang
kulning
kumina
kun-borrk
kundere
kundiman
kussundé
kutumba wake
kveding
kvæði
kwaito
kwassa kwassa
kwela
käng
kélé
kĩkũyũ pop
la la
latin american
latin jazz
latin pop
latin rap
lavway
laĂŻko
laĂŻkĂł
le leagan
legényes
lelio
letkajenkka
levenslied
lhamo
lieder
light music
light rock
likanos
liquid drum&bass
liquid funk
liquindi
llanera
llanto
lo-fi
lo-fi music
loki djili
long-song
louisiana blues
louisiana swamp pop
lounge music
lovers rock
lowercase
lubbock sound
lucknavi thumri
luhya omutibo
luk grung
lullaby
lundu
lundum
m-base
madchester
madrigal
mafioso rap
maglaal
magnificat
mahori
mainstream jazz
makossa
makossa-soukous
malagueñas
malawian jazz
malhun
maloya
maluf
maluka
mambo
manaschi
mandarin pop
manding swing
mango
mangue bit
mangulina
manikay
manila sound
manouche
manzuma
mapouka
mapouka-serré
marabi
maracatu
marga
mariachi
marimba
marinera
marrabenta
martial industrial
martinetes
maskanda
mass
matamuerte
math rock
mathcore
matt bello
maxixe
mazurka
mbalax
mbaqanga
mbube
mbumba
medh
medieval folk rock
medieval metal
medieval music
meditation
mejorana
melhoun
melhûn
melodic black metal
melodic death metal
melodic hardcore
melodic metalcore
melodic music
melodic trance
memphis blues
memphis rap
memphis soul
mento
merengue
merengue tĂpico moderno
merengue-bomba
meringue
merseybeat
metal
metalcore
metallic hardcore
mexican pop
mexican rock
mexican son
meykhana
mezwed
miami bass
microhouse
middle of the road
midwest hip hop
milonga
min'yo
mineras
mini compas
mini-jazz
minimal techno
minimalist music
minimalist trance
minneapolis sound
minstrel show
minuet
mirolĂłyia
modal jazz
modern classical
modern classical music
modern laika
modern rock
modinha
mohabelo
montuno
monumental dance
mor lam
mor lam sing
morna
motorpop
motown
mozambique
mpb
mugam
multicultural
murga
musette
museve
mushroom jazz
music drama
music hall
musiqi-e assil
musique concrète
mutuashi
muwashshah
muzak
méringue
mĂşsica campesina
mĂşsica criolla
mĂşsica de la interior
mĂşsica llanera
mĂşsica nordestina
mĂşsica popular brasileira
mĂşsica tropical
nagauta
nakasi
nangma
nanguan
narcocorrido
nardcore
narodna muzika
nasheed
nashville sound
nashville sound/countrypolitan
national socialist black metal
naturalismo
nederpop
neo soul
neo-classical metal
neo-medieval
neo-prog
neo-psychedelia
neoclassical
neoclassical metal
neoclassical music
neofolk
neotraditional country
nerdcore
neue deutsche härte
neue deutsche welle
new age music
new beat
new instrumental
new jack swing
new orleans blues
new orleans jazz
new pop
new prog
new rave
new romantic
new school hip hop
new taiwanese song
new wave
new wave of british heavy metal
new wave of new wave
new weird america
new york blues
new york house
newgrass
nganja
nightcore
nintendocore
nisiĂłtika
no wave
noh
noise music
noise pop
noise rock
nongak
norae undong
nordic folk dance music
nordic folk music
nortec
norteño
northern soul
nota
nu jazz
nu metal
nu soul
nu skool breaks
nueva canciĂłn
nyatiti
néo kýma
obscuro
oi!
old school hip hop
old-time
oldies
olonkho
oltului
ondo
opera
operatic pop
oratorio
orchestra
orchestral
organ trio
organic ambient
organum
orgel
oriental metal
ottava rima
outlaw country
outsider music
p-funk
pagan metal
pagan rock
pagode
paisley underground
palm wine
palm-wine
pambiche
panambih
panchai baja
panchavadyam
pansori
paranda
parang
parody
parranda
partido alto
pasillo
patriotic
peace punk
pelimanni music
petenera
peyote song
philadelphia soul
piano blues
piano rock
piedmont blues
pimba
pinoy pop
pinoy rock
pinpeat orchestra
piphat
piyyutim
plainchant
plena
pleng phua cheewit
pleng thai sakorn
political hip hop
polka
polo
polonaise
pols
polska
pong lang
pop
pop folk
pop music
pop punk
pop rap
pop rock
pop sunda
pornocore
porro
post disco
post-britpop
post-disco
post-grunge
post-hardcore
post-industrial
post-metal
post-minimalism
post-punk
post-rock
post-romanticism
pow-wow
power electronics
power metal
power noise
power pop
powerviolence
ppongtchak
praise song
program symphony
progressive bluegrass
progressive country
progressive death metal
progressive electronic
progressive electronic music
progressive folk
progressive folk music
progressive house
progressive metal
progressive power metal
progressive rock
progressive trance
progressive thrash metal
protopunk
psych folk
psychedelic music
psychedelic pop
psychedelic rock
psychedelic trance
psychobilly
punk blues
punk cabaret
punk jazz
punk rock
punta
punta rock
qasidah
qasidah modern
qawwali
quadrille
quan ho
queercore
quiet storm
rada
raga
raga rock
ragga
ragga jungle
raggamuffin
ragtime
rai
rake-and-scrape
ramkbach
ramvong
ranchera
rap
rap metal
rap rock
rapcore
rara
rare groove
rasiya
rave
raw rock
raĂŻ
rebetiko
red dirt
reel
reggae
reggae 110
reggae bultrĂłn
reggae en español
reggae fusion
reggae highlife
reggaefusion
reggaeton
rekilaulu
relax music
religious
rembetiko
renaissance music
requiem
rhapsody
rhyming spiritual
rhythm & blues
rhythm and blues
ricercar
riot grrrl
rock
rock and roll
rock en español
rock opera
rockabilly
rocksteady
rococo
romantic flow
romantic period in music
rondeaux
ronggeng
roots reggae
roots rock
roots rock reggae
rumba
russian pop
rĂmur
sabar
sacred harp
sacred music
sadcore
saibara
sakara
salegy
salsa
salsa erotica
salsa romantica
saltarello
samba
samba-canção
samba-reggae
samba-rock
sambai
sanjo
sato kagura
sawt
saya
scat
schlager
schottisch
schranz
scottish baroque music
screamo
scrumpy and western
sea shanty
sean nĂłs
second viennese school
sega music
seggae
seis
semba
sephardic music
serialism
set dance
sevdalinka
sevillana
shabab
shabad
shalako
shan'ge
shango
shape note
shibuya-kei
shidaiqu
shima uta
shock rock
shoegaze
shoegazer
shoka
shomyo
show tune
sica
siguiriyas
silat
sinawi
situational
ska
ska punk
skacore
skald
skate punk
skiffle
slack-key guitar
slide
slowcore
sludge metal
slängpolska
smooth jazz
soca
soft rock
son
son montuno
son-batá
sonata
songo
songo-salsa
sophisti-pop
soukous
soul
soul blues
soul jazz
soul music
southern gospel
southern harmony
southern hip hop
southern metal
southern rock
southern soul
space age pop
space music
space rock
spectralism
speed garage
speed metal
speedcore
spirituals
spouge
sprechgesang
square dance
squee
st. louis blues
stand-up
steelband
stoner metal
stoner rock
straight edge
strathspeys
stride
string
string quartet
sufi music
suite
sunshine pop
suomirock
super eurobeat
surf ballad
surf instrumental
surf music
surf pop
surf rock
swamp blues
swamp pop
swamp rock
swing
swing music
swingbeat
sygyt
symphonic
symphonic black metal
symphonic metal
symphonic poem
symphonic rock
symphony
synthcore
synthpop
synthpunk
t'ong guitar
taarab
tai tu
taiwanese pop
tala
talempong
tambu
tamburitza
tamil christian keerthanai
tango
tanguk
tappa
tarana
tarantella
taranto
tech
tech house
tech trance
technical death metal
technical metal
techno
technoid
technopop
techstep
techtonik
teen pop
tejano
tejano music
tekno
tembang sunda
teutonic thrash metal
texas blues
thai pop
thillana
thrash metal
thrashcore
thumri
tibetan pop
tiento
timbila
tin pan alley
tinga
tinku
toeshey
togaku
trad jazz
traditional bluegrass
traditional heavy metal
traditional pop music
trallalero
trance
tribal house
trikitixa
trip hop
trip rock
tropicalia
tropicalismo
tropipop
truck-driving country
tumba
turbo-folk
turkish music
turkish pop
turntablism
tuvan throat-singing
twee pop
twist
two tone
táncház
uk garage
uk pub rock
unblack metal
underground music
uplifting
uplifting trance
urban cowboy
urban folk
urban jazz
vallenato
vaudeville
venezuela
verbunkos
verismo
viking metal
villanella
virelai
vispop
visual kei
visual music
vocal
vocal house
vocal jazz
vocal music
volksmusik
waila
waltz
wangga
warabe uta
wassoulou
weld
were music
west coast hip hop
west coast jazz
western
western blues
western swing
witch house
wizard rock
women's music
wong shadow
wonky pop
wood
work song
world fusion
world fusion music
world music
worldbeat
xhosa music
xoomii
yo-pop
yodeling
yukar
yé-yé
zajal
zapin
zarzuela
zeibekiko
zeuhl
ziglibithy
zouglou
zouk
zouk chouv
zouklove
zulu music
zydeco
beetbox-beets-c1877b7/beetsplug/lastimport.py 0000664 0000000 0000000 00000022270 15073551743 0021365 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Rafael Bodill https://github.com/rafi
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import pylast
from pylast import TopItem, _extract, _number
from beets import config, dbcore, plugins, ui
from beets.dbcore import types
API_URL = "https://ws.audioscrobbler.com/2.0/"
class LastImportPlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
config["lastfm"].add(
{
"user": "",
"api_key": plugins.LASTFM_KEY,
}
)
config["lastfm"]["user"].redact = True
config["lastfm"]["api_key"].redact = True
self.config.add(
{
"per_page": 500,
"retry_limit": 3,
}
)
self.item_types = {
"play_count": types.INTEGER,
}
def commands(self):
cmd = ui.Subcommand("lastimport", help="import last.fm play-count")
def func(lib, opts, args):
import_lastfm(lib, self._log)
cmd.func = func
return [cmd]
class CustomUser(pylast.User):
"""Custom user class derived from pylast.User, and overriding the
_get_things method to return MBID and album. Also introduces new
get_top_tracks_by_page method to allow access to more than one page of top
tracks.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _get_things(
self, method, thing, thing_type, params=None, cacheable=True
):
"""Returns a list of the most played thing_types by this thing, in a
tuple with the total number of pages of results. Includes an MBID, if
found.
"""
doc = self._request(f"{self.ws_prefix}.{method}", cacheable, params)
toptracks_node = doc.getElementsByTagName("toptracks")[0]
total_pages = int(toptracks_node.getAttribute("totalPages"))
seq = []
for node in doc.getElementsByTagName(thing):
title = _extract(node, "name")
artist = _extract(node, "name", 1)
mbid = _extract(node, "mbid")
playcount = _number(_extract(node, "playcount"))
thing = thing_type(artist, title, self.network)
thing.mbid = mbid
seq.append(TopItem(thing, playcount))
return seq, total_pages
def get_top_tracks_by_page(
self, period=pylast.PERIOD_OVERALL, limit=None, page=1, cacheable=True
):
"""Returns the top tracks played by a user, in a tuple with the total
number of pages of results.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_7DAYS
o PERIOD_1MONTH
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params["period"] = period
params["page"] = page
if limit:
params["limit"] = limit
return self._get_things(
"getTopTracks", "track", pylast.Track, params, cacheable
)
def import_lastfm(lib, log):
user = config["lastfm"]["user"].as_str()
per_page = config["lastimport"]["per_page"].get(int)
if not user:
raise ui.UserError("You must specify a user name for lastimport")
log.info("Fetching last.fm library for @{}", user)
page_total = 1
page_current = 0
found_total = 0
unknown_total = 0
retry_limit = config["lastimport"]["retry_limit"].get(int)
# Iterate through a yet to be known page total count
while page_current < page_total:
log.info(
"Querying page #{}{}...",
page_current + 1,
f"/{page_total}" if page_total > 1 else "",
)
for retry in range(0, retry_limit):
tracks, page_total = fetch_tracks(user, page_current + 1, per_page)
if page_total < 1:
# It means nothing to us!
raise ui.UserError("Last.fm reported no data.")
if tracks:
found, unknown = process_tracks(lib, tracks, log)
found_total += found
unknown_total += unknown
break
else:
log.error("ERROR: unable to read page #{}", page_current + 1)
if retry < retry_limit:
log.info(
"Retrying page #{}... ({}/{} retry)",
page_current + 1,
retry + 1,
retry_limit,
)
else:
log.error(
"FAIL: unable to fetch page #{}, ",
"tried {} times",
page_current,
retry + 1,
)
page_current += 1
log.info("... done!")
log.info("finished processing {} song pages", page_total)
log.info("{} unknown play-counts", unknown_total)
log.info("{} play-counts imported", found_total)
def fetch_tracks(user, page, limit):
"""JSON format:
[
{
"mbid": "...",
"artist": "...",
"title": "...",
"playcount": "..."
}
]
"""
network = pylast.LastFMNetwork(api_key=config["lastfm"]["api_key"])
user_obj = CustomUser(user, network)
results, total_pages = user_obj.get_top_tracks_by_page(
limit=limit, page=page
)
return [
{
"mbid": track.item.mbid if track.item.mbid else "",
"artist": {"name": track.item.artist.name},
"name": track.item.title,
"playcount": track.weight,
}
for track in results
], total_pages
def process_tracks(lib, tracks, log):
total = len(tracks)
total_found = 0
total_fails = 0
log.info("Received {} tracks in this page, processing...", total)
for num in range(0, total):
song = None
trackid = tracks[num]["mbid"].strip() if tracks[num]["mbid"] else None
artist = (
tracks[num]["artist"].get("name", "").strip()
if tracks[num]["artist"].get("name", "")
else None
)
title = tracks[num]["name"].strip() if tracks[num]["name"] else None
album = ""
if "album" in tracks[num]:
album = (
tracks[num]["album"].get("name", "").strip()
if tracks[num]["album"]
else None
)
log.debug("query: {} - {} ({})", artist, title, album)
# First try to query by musicbrainz's trackid
if trackid:
song = lib.items(
dbcore.query.MatchQuery("mb_trackid", trackid)
).get()
# If not, try just album/title
if song is None:
log.debug(
"no album match, trying by album/title: {} - {}", album, title
)
query = dbcore.AndQuery(
[
dbcore.query.SubstringQuery("album", album),
dbcore.query.SubstringQuery("title", title),
]
)
song = lib.items(query).get()
# If not, try just artist/title
if song is None:
log.debug("no album match, trying by artist/title")
query = dbcore.AndQuery(
[
dbcore.query.SubstringQuery("artist", artist),
dbcore.query.SubstringQuery("title", title),
]
)
song = lib.items(query).get()
# Last resort, try just replacing to utf-8 quote
if song is None:
title = title.replace("'", "\u2019")
log.debug("no title match, trying utf-8 single quote")
query = dbcore.AndQuery(
[
dbcore.query.SubstringQuery("artist", artist),
dbcore.query.SubstringQuery("title", title),
]
)
song = lib.items(query).get()
if song is not None:
count = int(song.get("play_count", 0))
new_count = int(tracks[num].get("playcount", 1))
log.debug(
"match: {0.artist} - {0.title} ({0.album}) updating:"
" play_count {1} => {2}",
song,
count,
new_count,
)
song["play_count"] = new_count
song.store()
total_found += 1
else:
total_fails += 1
log.info(" - No match: {} - {} ({})", artist, title, album)
if total_fails > 0:
log.info(
"Acquired {}/{} play-counts ({} unknown)",
total_found,
total,
total_fails,
)
return total_found, total_fails
beetbox-beets-c1877b7/beetsplug/limit.py 0000664 0000000 0000000 00000005670 15073551743 0020312 0 ustar 00root root 0000000 0000000 # This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds head/tail functionality to list/ls.
1. Implemented as `lslimit` command with `--head` and `--tail` options. This is
the idiomatic way to use this plugin.
2. Implemented as query prefix `<` for head functionality only. This is the
composable way to use the plugin (plays nicely with anything that uses the
query language).
"""
from collections import deque
from itertools import islice
from beets.dbcore import FieldQuery
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, print_
def lslimit(lib, opts, args):
"""Query command with head/tail."""
if (opts.head is not None) and (opts.tail is not None):
raise ValueError("Only use one of --head and --tail")
if (opts.head or opts.tail or 0) < 0:
raise ValueError("Limit value must be non-negative")
if opts.album:
objs = lib.albums(args)
else:
objs = lib.items(args)
if opts.head is not None:
objs = islice(objs, opts.head)
elif opts.tail is not None:
objs = deque(objs, opts.tail)
for obj in objs:
print_(format(obj))
lslimit_cmd = Subcommand("lslimit", help="query with optional head or tail")
lslimit_cmd.parser.add_option(
"--head", action="store", type="int", default=None
)
lslimit_cmd.parser.add_option(
"--tail", action="store", type="int", default=None
)
lslimit_cmd.parser.add_all_common_options()
lslimit_cmd.func = lslimit
class LimitPlugin(BeetsPlugin):
"""Query limit functionality via command and query prefix."""
def commands(self):
"""Expose `lslimit` subcommand."""
return [lslimit_cmd]
def queries(self):
class HeadQuery(FieldQuery):
"""This inner class pattern allows the query to track state."""
n = 0
N = None
def __init__(self, *args, **kwargs) -> None:
"""Force the query to be slow so that 'value_match' is called."""
super().__init__(*args, **kwargs)
self.fast = False
@classmethod
def value_match(cls, pattern, value):
if cls.N is None:
cls.N = int(pattern)
if cls.N < 0:
raise ValueError("Limit value must be non-negative")
cls.n += 1
return cls.n <= cls.N
return {"<": HeadQuery}
beetbox-beets-c1877b7/beetsplug/listenbrainz.py 0000664 0000000 0000000 00000023550 15073551743 0021675 0 ustar 00root root 0000000 0000000 """Adds Listenbrainz support to Beets."""
import datetime
import musicbrainzngs
import requests
from beets import config, ui
from beets.plugins import BeetsPlugin
from beetsplug.lastimport import process_tracks
class ListenBrainzPlugin(BeetsPlugin):
"""A Beets plugin for interacting with ListenBrainz."""
ROOT = "http://api.listenbrainz.org/1/"
def __init__(self):
"""Initialize the plugin."""
super().__init__()
self.token = self.config["token"].get()
self.username = self.config["username"].get()
self.AUTH_HEADER = {"Authorization": f"Token {self.token}"}
config["listenbrainz"]["token"].redact = True
def commands(self):
"""Add beet UI commands to interact with ListenBrainz."""
lbupdate_cmd = ui.Subcommand(
"lbimport", help="Import ListenBrainz history"
)
def func(lib, opts, args):
self._lbupdate(lib, self._log)
lbupdate_cmd.func = func
return [lbupdate_cmd]
def _lbupdate(self, lib, log):
"""Obtain view count from Listenbrainz."""
found_total = 0
unknown_total = 0
ls = self.get_listens()
tracks = self.get_tracks_from_listens(ls)
log.info("Found {} listens", len(ls))
if tracks:
found, unknown = process_tracks(lib, tracks, log)
found_total += found
unknown_total += unknown
log.info("... done!")
log.info("{} unknown play-counts", unknown_total)
log.info("{} play-counts imported", found_total)
def _make_request(self, url, params=None):
"""Makes a request to the ListenBrainz API."""
try:
response = requests.get(
url=url,
headers=self.AUTH_HEADER,
timeout=10,
params=params,
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
self._log.debug("Invalid Search Error: {}", e)
return None
def get_listens(self, min_ts=None, max_ts=None, count=None):
"""Gets the listen history of a given user.
Args:
username: User to get listen history of.
min_ts: History before this timestamp will not be returned.
DO NOT USE WITH max_ts.
max_ts: History after this timestamp will not be returned.
DO NOT USE WITH min_ts.
count: How many listens to return. If not specified,
uses a default from the server.
Returns:
A list of listen info dictionaries if there's an OK status.
Raises:
An HTTPError if there's a failure.
A ValueError if the JSON in the response is invalid.
An IndexError if the JSON is not structured as expected.
"""
url = f"{self.ROOT}/user/{self.username}/listens"
params = {
k: v
for k, v in {
"min_ts": min_ts,
"max_ts": max_ts,
"count": count,
}.items()
if v is not None
}
response = self._make_request(url, params)
if response is not None:
return response["payload"]["listens"]
else:
return None
def get_tracks_from_listens(self, listens):
"""Returns a list of tracks from a list of listens."""
tracks = []
for track in listens:
if track["track_metadata"].get("release_name") is None:
continue
mbid_mapping = track["track_metadata"].get("mbid_mapping", {})
mbid = None
if mbid_mapping.get("recording_mbid") is None:
# search for the track using title and release
mbid = self.get_mb_recording_id(track)
tracks.append(
{
"album": {
"name": track["track_metadata"].get("release_name")
},
"name": track["track_metadata"].get("track_name"),
"artist": {
"name": track["track_metadata"].get("artist_name")
},
"mbid": mbid,
"release_mbid": mbid_mapping.get("release_mbid"),
"listened_at": track.get("listened_at"),
}
)
return tracks
def get_mb_recording_id(self, track):
"""Returns the MusicBrainz recording ID for a track."""
resp = musicbrainzngs.search_recordings(
query=track["track_metadata"].get("track_name"),
release=track["track_metadata"].get("release_name"),
strict=True,
)
if resp.get("recording-count") == "1":
return resp.get("recording-list")[0].get("id")
else:
return None
def get_playlists_createdfor(self, username):
"""Returns a list of playlists created by a user."""
url = f"{self.ROOT}/user/{username}/playlists/createdfor"
return self._make_request(url)
def get_listenbrainz_playlists(self):
resp = self.get_playlists_createdfor(self.username)
playlists = resp.get("playlists")
listenbrainz_playlists = []
for playlist in playlists:
playlist_info = playlist.get("playlist")
if playlist_info.get("creator") == "listenbrainz":
title = playlist_info.get("title")
self._log.debug("Playlist title: {}", title)
playlist_type = (
"Exploration" if "Exploration" in title else "Jams"
)
if "week of" in title:
date_str = title.split("week of ")[1].split(" ")[0]
date = datetime.datetime.strptime(
date_str, "%Y-%m-%d"
).date()
else:
continue
identifier = playlist_info.get("identifier")
id = identifier.split("/")[-1]
listenbrainz_playlists.append(
{"type": playlist_type, "date": date, "identifier": id}
)
listenbrainz_playlists = sorted(
listenbrainz_playlists, key=lambda x: x["type"]
)
listenbrainz_playlists = sorted(
listenbrainz_playlists, key=lambda x: x["date"], reverse=True
)
for playlist in listenbrainz_playlists:
self._log.debug("Playlist: {0[type]} - {0[date]}", playlist)
return listenbrainz_playlists
def get_playlist(self, identifier):
"""Returns a playlist."""
url = f"{self.ROOT}/playlist/{identifier}"
return self._make_request(url)
def get_tracks_from_playlist(self, playlist):
"""This function returns a list of tracks in the playlist."""
tracks = []
for track in playlist.get("playlist").get("track"):
identifier = track.get("identifier")
if isinstance(identifier, list):
identifier = identifier[0]
tracks.append(
{
"artist": track.get("creator", "Unknown artist"),
"identifier": identifier.split("/")[-1],
"title": track.get("title"),
}
)
return self.get_track_info(tracks)
def get_track_info(self, tracks):
track_info = []
for track in tracks:
identifier = track.get("identifier")
resp = musicbrainzngs.get_recording_by_id(
identifier, includes=["releases", "artist-credits"]
)
recording = resp.get("recording")
title = recording.get("title")
artist_credit = recording.get("artist-credit", [])
if artist_credit:
artist = artist_credit[0].get("artist", {}).get("name")
else:
artist = None
releases = recording.get("release-list", [])
if releases:
album = releases[0].get("title")
date = releases[0].get("date")
year = date.split("-")[0] if date else None
else:
album = None
year = None
track_info.append(
{
"identifier": identifier,
"title": title,
"artist": artist,
"album": album,
"year": year,
}
)
return track_info
def get_weekly_playlist(self, playlist_type, most_recent=True):
# Fetch all playlists
playlists = self.get_listenbrainz_playlists()
# Filter playlists by type
filtered_playlists = [
p for p in playlists if p["type"] == playlist_type
]
# Sort playlists by date in descending order
sorted_playlists = sorted(
filtered_playlists, key=lambda x: x["date"], reverse=True
)
# Select the most recent or older playlist based on the most_recent flag
selected_playlist = (
sorted_playlists[0] if most_recent else sorted_playlists[1]
)
self._log.debug(
f"Selected playlist: {selected_playlist['type']} "
f"- {selected_playlist['date']}"
)
# Fetch and return tracks from the selected playlist
playlist = self.get_playlist(selected_playlist.get("identifier"))
return self.get_tracks_from_playlist(playlist)
def get_weekly_exploration(self):
return self.get_weekly_playlist("Exploration", most_recent=True)
def get_weekly_jams(self):
return self.get_weekly_playlist("Jams", most_recent=True)
def get_last_weekly_exploration(self):
return self.get_weekly_playlist("Exploration", most_recent=False)
def get_last_weekly_jams(self):
return self.get_weekly_playlist("Jams", most_recent=False)
beetbox-beets-c1877b7/beetsplug/loadext.py 0000664 0000000 0000000 00000002755 15073551743 0020635 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2019, Jack Wilsdon
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Load SQLite extensions."""
import sqlite3
from beets.dbcore import Database
from beets.plugins import BeetsPlugin
class LoadExtPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
if not Database.supports_extensions:
self._log.warning(
"loadext is enabled but the current SQLite "
"installation does not support extensions"
)
return
self.register_listener("library_opened", self.library_opened)
def library_opened(self, lib):
for v in self.config:
ext = v.as_filename()
self._log.debug("loading extension {}", ext)
try:
lib.load_extension(ext)
except sqlite3.OperationalError as e:
self._log.error("failed to load extension {}: {}", ext, e)
beetbox-beets-c1877b7/beetsplug/lyrics.py 0000664 0000000 0000000 00000112745 15073551743 0020503 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches, embeds, and displays lyrics."""
from __future__ import annotations
import atexit
import itertools
import math
import re
import textwrap
from contextlib import contextmanager, suppress
from dataclasses import dataclass
from functools import cached_property, partial, total_ordering
from html import unescape
from http import HTTPStatus
from itertools import groupby
from pathlib import Path
from typing import TYPE_CHECKING, Iterable, Iterator, NamedTuple
from urllib.parse import quote, quote_plus, urlencode, urlparse
import langdetect
import requests
from bs4 import BeautifulSoup
from unidecode import unidecode
import beets
from beets import plugins, ui
from beets.autotag.distance import string_dist
from beets.util.config import sanitize_choices
if TYPE_CHECKING:
from beets.importer import ImportTask
from beets.library import Item, Library
from beets.logging import BeetsLogger as Logger
from ._typing import (
GeniusAPI,
GoogleCustomSearchAPI,
JSONDict,
LRCLibAPI,
TranslatorAPI,
)
USER_AGENT = f"beets/{beets.__version__}"
INSTRUMENTAL_LYRICS = "[Instrumental]"
class NotFoundError(requests.exceptions.HTTPError):
pass
class CaptchaError(requests.exceptions.HTTPError):
pass
class TimeoutSession(requests.Session):
def request(self, *args, **kwargs):
"""Wrap the request method to raise an exception on HTTP errors."""
kwargs.setdefault("timeout", 10)
r = super().request(*args, **kwargs)
if r.status_code == HTTPStatus.NOT_FOUND:
raise NotFoundError("HTTP Error: Not Found", response=r)
if 300 <= r.status_code < 400:
raise CaptchaError("Captcha is required", response=r)
r.raise_for_status()
return r
r_session = TimeoutSession()
r_session.headers.update({"User-Agent": USER_AGENT})
@atexit.register
def close_session():
"""Close the requests session on shut down."""
r_session.close()
# Utilities.
def search_pairs(item):
"""Yield a pairs of artists and titles to search for.
The first item in the pair is the name of the artist, the second
item is a list of song names.
In addition to the artist and title obtained from the `item` the
method tries to strip extra information like paranthesized suffixes
and featured artists from the strings and add them as candidates.
The artist sort name is added as a fallback candidate to help in
cases where artist name includes special characters or is in a
non-latin script.
The method also tries to split multiple titles separated with `/`.
"""
def generate_alternatives(string, patterns):
"""Generate string alternatives by extracting first matching group for
each given pattern.
"""
alternatives = [string]
for pattern in patterns:
match = re.search(pattern, string, re.IGNORECASE)
if match:
alternatives.append(match.group(1))
return alternatives
title, artist, artist_sort = (
item.title.strip(),
item.artist.strip(),
item.artist_sort.strip(),
)
if not title or not artist:
return ()
patterns = [
# Remove any featuring artists from the artists name
rf"(.*?) {plugins.feat_tokens()}"
]
# Skip various artists
artists = []
lower_artist = artist.lower()
if "various" not in lower_artist:
artists.extend(generate_alternatives(artist, patterns))
# Use the artist_sort as fallback only if it differs from artist to avoid
# repeated remote requests with the same search terms
artist_sort_lower = artist_sort.lower()
if (
artist_sort
and lower_artist != artist_sort_lower
and "various" not in artist_sort_lower
):
artists.append(artist_sort)
patterns = [
# Remove a parenthesized suffix from a title string. Common
# examples include (live), (remix), and (acoustic).
r"(.+?)\s+[(].*[)]$",
# Remove any featuring artists from the title
rf"(.*?) {plugins.feat_tokens(for_artist=False)}",
# Remove part of title after colon ':' for songs with subtitles
r"(.+?)\s*:.*",
]
titles = generate_alternatives(title, patterns)
# Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe)
# and each of them.
multi_titles = []
for title in titles:
multi_titles.append([title])
if " / " in title:
multi_titles.append([x.strip() for x in title.split(" / ")])
return itertools.product(artists, multi_titles)
def slug(text: str) -> str:
"""Make a URL-safe, human-readable version of the given text
This will do the following:
1. decode unicode characters into ASCII
2. shift everything to lowercase
3. strip whitespace
4. replace other non-word characters with dashes
5. strip extra dashes
"""
return re.sub(r"\W+", "-", unidecode(text).lower().strip()).strip("-")
class RequestHandler:
_log: Logger
def debug(self, message: str, *args) -> None:
"""Log a debug message with the class name."""
self._log.debug(f"{self.__class__.__name__}: {message}", *args)
def info(self, message: str, *args) -> None:
"""Log an info message with the class name."""
self._log.info(f"{self.__class__.__name__}: {message}", *args)
def warn(self, message: str, *args) -> None:
"""Log warning with the class name."""
self._log.warning(f"{self.__class__.__name__}: {message}", *args)
@staticmethod
def format_url(url: str, params: JSONDict | None) -> str:
if not params:
return url
return f"{url}?{urlencode(params)}"
def fetch_text(
self, url: str, params: JSONDict | None = None, **kwargs
) -> str:
"""Return text / HTML data from the given URL.
Set the encoding to None to let requests handle it because some sites
set it incorrectly.
"""
url = self.format_url(url, params)
self.debug("Fetching HTML from {}", url)
r = r_session.get(url, **kwargs)
r.encoding = None
return r.text
def fetch_json(self, url: str, params: JSONDict | None = None, **kwargs):
"""Return JSON data from the given URL."""
url = self.format_url(url, params)
self.debug("Fetching JSON from {}", url)
return r_session.get(url, **kwargs).json()
def post_json(self, url: str, params: JSONDict | None = None, **kwargs):
"""Send POST request and return JSON response."""
url = self.format_url(url, params)
self.debug("Posting JSON to {}", url)
return r_session.post(url, **kwargs).json()
@contextmanager
def handle_request(self) -> Iterator[None]:
try:
yield
except requests.JSONDecodeError:
self.warn("Could not decode response JSON data")
except requests.RequestException as exc:
self.warn("Request error: {}", exc)
class BackendClass(type):
@property
def name(cls) -> str:
"""Return lowercase name of the backend class."""
return cls.__name__.lower()
class Backend(RequestHandler, metaclass=BackendClass):
def __init__(self, config, log):
self._log = log
self.config = config
def fetch(
self, artist: str, title: str, album: str, length: int
) -> tuple[str, str] | None:
raise NotImplementedError
@dataclass
@total_ordering
class LRCLyrics:
#: Percentage tolerance for max duration difference between lyrics and item.
DURATION_DIFF_TOLERANCE = 0.05
target_duration: float
id: int
duration: float
instrumental: bool
plain: str
synced: str | None
def __le__(self, other: LRCLyrics) -> bool:
"""Compare two lyrics items by their score."""
return self.dist < other.dist
@classmethod
def make(
cls, candidate: LRCLibAPI.Item, target_duration: float
) -> LRCLyrics:
return cls(
target_duration,
candidate["id"],
candidate["duration"] or 0.0,
candidate["instrumental"],
candidate["plainLyrics"],
candidate["syncedLyrics"],
)
@cached_property
def duration_dist(self) -> float:
"""Return the absolute difference between lyrics and target duration."""
return abs(self.duration - self.target_duration)
@cached_property
def is_valid(self) -> bool:
"""Return whether the lyrics item is valid.
Lyrics duration must be within the tolerance defined by
:attr:`DURATION_DIFF_TOLERANCE`.
"""
return (
self.duration_dist
<= self.target_duration * self.DURATION_DIFF_TOLERANCE
)
@cached_property
def dist(self) -> tuple[bool, float]:
"""Distance/score of the given lyrics item.
Return a tuple with the following values:
1. Absolute difference between lyrics and target duration
2. Boolean telling whether synced lyrics are available.
Best lyrics match is the one that has the closest duration to
``target_duration`` and has synced lyrics available.
"""
return not self.synced, self.duration_dist
def get_text(self, want_synced: bool) -> str:
if self.instrumental:
return INSTRUMENTAL_LYRICS
if want_synced and self.synced:
return "\n".join(map(str.strip, self.synced.splitlines()))
return self.plain
class LRCLib(Backend):
"""Fetch lyrics from the LRCLib API."""
BASE_URL = "https://lrclib.net/api"
GET_URL = f"{BASE_URL}/get"
SEARCH_URL = f"{BASE_URL}/search"
def fetch_candidates(
self, artist: str, title: str, album: str, length: int
) -> Iterator[list[LRCLibAPI.Item]]:
"""Yield lyrics candidates for the given song data.
I found that the ``/get`` endpoint sometimes returns inaccurate or
unsynced lyrics, while ``search`` yields more suitable candidates.
Therefore, we prioritize the latter and rank the results using our own
algorithm. If the search does not give suitable lyrics, we fall back to
the ``/get`` endpoint.
Return an iterator over lists of candidates.
"""
base_params = {"artist_name": artist, "track_name": title}
get_params = {**base_params, "duration": length}
if album:
get_params["album_name"] = album
yield self.fetch_json(self.SEARCH_URL, params=base_params)
with suppress(NotFoundError):
yield [self.fetch_json(self.GET_URL, params=get_params)]
@classmethod
def pick_best_match(cls, lyrics: Iterable[LRCLyrics]) -> LRCLyrics | None:
"""Return best matching lyrics item from the given list."""
return min((li for li in lyrics if li.is_valid), default=None)
def fetch(
self, artist: str, title: str, album: str, length: int
) -> tuple[str, str] | None:
"""Fetch lyrics text for the given song data."""
evaluate_item = partial(LRCLyrics.make, target_duration=length)
for group in self.fetch_candidates(artist, title, album, length):
candidates = [evaluate_item(item) for item in group]
if item := self.pick_best_match(candidates):
lyrics = item.get_text(self.config["synced"])
return lyrics, f"{self.GET_URL}/{item.id}"
return None
class MusiXmatch(Backend):
URL_TEMPLATE = "https://www.musixmatch.com/lyrics/{}/{}"
REPLACEMENTS = {
r"\s+": "-",
"<": "Less_Than",
">": "Greater_Than",
"#": "Number_",
r"[\[\{]": "(",
r"[\]\}]": ")",
}
@classmethod
def encode(cls, text: str) -> str:
for old, new in cls.REPLACEMENTS.items():
text = re.sub(old, new, text)
return quote(unidecode(text))
@classmethod
def build_url(cls, *args: str) -> str:
return cls.URL_TEMPLATE.format(*map(cls.encode, args))
def fetch(self, artist: str, title: str, *_) -> tuple[str, str] | None:
url = self.build_url(artist, title)
html = self.fetch_text(url)
if "We detected that your IP is blocked" in html:
self.warn("Failed: Blocked IP address")
return None
html_parts = html.split(']+>|
.*", "", html_part))
lyrics = "\n".join(lyrics_parts)
lyrics = lyrics.strip(',"').replace("\\n", "\n")
# another odd case: sometimes only that string remains, for
# missing songs. this seems to happen after being blocked
# above, when filling in the CAPTCHA.
if "Instant lyrics for all your music." in lyrics:
return None
# sometimes there are non-existent lyrics with some content
if "Lyrics | Musixmatch" in lyrics:
return None
return lyrics, url
class Html:
collapse_space = partial(re.compile(r"(^| ) +", re.M).sub, r"\1")
expand_br = partial(re.compile(r"\s* ]*>\s*", re.I).sub, "\n")
#: two newlines between paragraphs on the same line (musica, letras.mus.br)
merge_blocks = partial(re.compile(r"(?)
]*>").sub, "\n\n")
#: a single new line between paragraphs on separate lines
#: (paroles.net, sweetslyrics.com, lacoccinelle.net)
merge_lines = partial(re.compile(r"
\s+]*>(?!___)").sub, "\n")
#: remove empty divs (lacoccinelle.net)
remove_empty_tags = partial(
re.compile(r"(<(div|span)[^>]*>\s*\2>)").sub, ""
)
#: remove Google Ads tags (musica.com)
remove_aside = partial(re.compile("").sub, "")
#: remove adslot-Content_1 div from the lyrics text (paroles.net)
remove_adslot = partial(
re.compile(r"\n[^\n]+-- Content_\d+ --.*?\n", re.S).sub,
"\n",
)
#: remove text formatting (azlyrics.com, lacocinelle.net)
remove_formatting = partial(
re.compile(r" *?(i|em|pre|strong)[^>]*>").sub, ""
)
@classmethod
def normalize_space(cls, text: str) -> str:
text = unescape(text).replace("\r", "").replace("\xa0", " ")
return cls.collapse_space(cls.expand_br(text))
@classmethod
def remove_ads(cls, text: str) -> str:
return cls.remove_adslot(cls.remove_aside(text))
@classmethod
def merge_paragraphs(cls, text: str) -> str:
return cls.merge_blocks(cls.merge_lines(cls.remove_empty_tags(text)))
class SoupMixin:
@classmethod
def pre_process_html(cls, html: str) -> str:
"""Pre-process the HTML content before scraping."""
return Html.normalize_space(html)
@classmethod
def get_soup(cls, html: str) -> BeautifulSoup:
return BeautifulSoup(cls.pre_process_html(html), "html.parser")
class SearchResult(NamedTuple):
artist: str
title: str
url: str
@property
def source(self) -> str:
return urlparse(self.url).netloc
class SearchBackend(SoupMixin, Backend):
@cached_property
def dist_thresh(self) -> float:
return self.config["dist_thresh"].get(float)
def check_match(
self, target_artist: str, target_title: str, result: SearchResult
) -> bool:
"""Check if the given search result is a 'good enough' match."""
max_dist = max(
string_dist(target_artist, result.artist),
string_dist(target_title, result.title),
)
if (max_dist := round(max_dist, 2)) <= self.dist_thresh:
return True
if math.isclose(max_dist, self.dist_thresh, abs_tol=0.4):
# log out the candidate that did not make it but was close.
# This may show a matching candidate with some noise in the name
self.debug(
"({0.artist}, {0.title}) does not match ({1}, {2}) but dist"
" was close: {3:.2f}",
result,
target_artist,
target_title,
max_dist,
)
return False
def search(self, artist: str, title: str) -> Iterable[SearchResult]:
"""Search for the given query and yield search results."""
raise NotImplementedError
def get_results(self, artist: str, title: str) -> Iterable[SearchResult]:
check_match = partial(self.check_match, artist, title)
for candidate in self.search(artist, title):
if check_match(candidate):
yield candidate
def fetch(self, artist: str, title: str, *_) -> tuple[str, str] | None:
"""Fetch lyrics for the given artist and title."""
for result in self.get_results(artist, title):
if (html := self.fetch_text(result.url)) and (
lyrics := self.scrape(html)
):
return lyrics, result.url
return None
@classmethod
def scrape(cls, html: str) -> str | None:
"""Scrape the lyrics from the given HTML."""
raise NotImplementedError
class Genius(SearchBackend):
"""Fetch lyrics from Genius via genius-api.
Because genius doesn't allow accessing lyrics via the api, we first query
the api for a url matching our artist & title, then scrape the HTML text
for the JSON data containing the lyrics.
"""
SEARCH_URL = "https://api.genius.com/search"
LYRICS_IN_JSON_RE = re.compile(r'(?<=.\\"html\\":\\").*?(?=(? dict[str, str]:
return {"Authorization": f"Bearer {self.config['genius_api_key']}"}
def search(self, artist: str, title: str) -> Iterable[SearchResult]:
search_data: GeniusAPI.Search = self.fetch_json(
self.SEARCH_URL,
params={"q": f"{artist} {title}"},
headers=self.headers,
)
for r in (hit["result"] for hit in search_data["response"]["hits"]):
yield SearchResult(r["artist_names"], r["title"], r["url"])
@classmethod
def scrape(cls, html: str) -> str | None:
if m := cls.LYRICS_IN_JSON_RE.search(html):
html_text = cls.remove_backslash(m[0]).replace(r"\n", "\n")
return cls.get_soup(html_text).get_text().strip()
return None
class Tekstowo(SearchBackend):
"""Fetch lyrics from Tekstowo.pl."""
BASE_URL = "https://www.tekstowo.pl"
SEARCH_URL = f"{BASE_URL}/szukaj,{{}}.html"
def build_url(self, artist, title):
artistitle = f"{artist.title()} {title.title()}"
return self.SEARCH_URL.format(quote_plus(unidecode(artistitle)))
def search(self, artist: str, title: str) -> Iterable[SearchResult]:
if html := self.fetch_text(self.build_url(title, artist)):
soup = self.get_soup(html)
for tag in soup.select("div[class=flex-group] > a[title*=' - ']"):
artist, title = str(tag["title"]).split(" - ", 1)
yield SearchResult(
artist, title, f"{self.BASE_URL}{tag['href']}"
)
return None
@classmethod
def scrape(cls, html: str) -> str | None:
soup = cls.get_soup(html)
if lyrics_div := soup.select_one("div.song-text > div.inner-text"):
return lyrics_div.get_text()
return None
class Google(SearchBackend):
"""Fetch lyrics from Google search results."""
SEARCH_URL = "https://www.googleapis.com/customsearch/v1"
#: Exclude some letras.mus.br pages which do not contain lyrics.
EXCLUDE_PAGES = [
"significado.html",
"traduccion.html",
"traducao.html",
"significados.html",
]
#: Regular expression to match noise in the URL title.
URL_TITLE_NOISE_RE = re.compile(
r"""
\b
(
paroles(\ et\ traduction|\ de\ chanson)?
| letras?(\ de)?
| liedtexte
| dainĹł\ ĹľodĹľiai
| original\ song\ full\ text\.
| official
| 20[12]\d\ version
| (absolute\ |az)?lyrics(\ complete)?
| www\S+
| \S+\.(com|net|mus\.br)
)
([^\w.]|$)
""",
re.IGNORECASE | re.VERBOSE,
)
#: Split cleaned up URL title into artist and title parts.
URL_TITLE_PARTS_RE = re.compile(r" +(?:[ :|-]+|par|by) +|, ")
SOURCE_DIST_FACTOR = {"www.azlyrics.com": 0.5, "www.songlyrics.com": 0.6}
ignored_domains: set[str] = set()
@classmethod
def pre_process_html(cls, html: str) -> str:
"""Pre-process the HTML content before scraping."""
html = Html.remove_ads(super().pre_process_html(html))
return Html.remove_formatting(Html.merge_paragraphs(html))
def fetch_text(self, *args, **kwargs) -> str:
"""Handle an error so that we can continue with the next URL."""
kwargs.setdefault("allow_redirects", False)
with self.handle_request():
try:
return super().fetch_text(*args, **kwargs)
except CaptchaError:
self.ignored_domains.add(urlparse(args[0]).netloc)
raise
@staticmethod
def get_part_dist(artist: str, title: str, part: str) -> float:
"""Return the distance between the given part and the artist and title.
A number between -1 and 1 is returned, where -1 means the part is
closer to the artist and 1 means it is closer to the title.
"""
return string_dist(artist, part) - string_dist(title, part)
@classmethod
def make_search_result(
cls, artist: str, title: str, item: GoogleCustomSearchAPI.Item
) -> SearchResult:
"""Parse artist and title from the URL title and return a search result."""
url_title = (
# get full title from metatags if available
item.get("pagemap", {}).get("metatags", [{}])[0].get("og:title")
# default to the dispolay title
or item["title"]
)
clean_title = cls.URL_TITLE_NOISE_RE.sub("", url_title).strip(" .-|")
# split it into parts which may be part of the artist or the title
# `dict.fromkeys` removes duplicates keeping the order
parts = list(dict.fromkeys(cls.URL_TITLE_PARTS_RE.split(clean_title)))
if len(parts) == 1:
part = parts[0]
if m := re.search(rf"(?i)\W*({re.escape(title)})\W*", part):
# artist and title may not have a separator
result_title = m[1]
result_artist = part.replace(m[0], "")
else:
# assume that this is the title
result_artist, result_title = "", parts[0]
else:
# sort parts by their similarity to the artist
result_artist = min(parts, key=lambda p: string_dist(artist, p))
result_title = min(parts, key=lambda p: string_dist(title, p))
return SearchResult(result_artist, result_title, item["link"])
def search(self, artist: str, title: str) -> Iterable[SearchResult]:
params = {
"key": self.config["google_API_key"].as_str(),
"cx": self.config["google_engine_ID"].as_str(),
"q": f"{artist} {title}",
"siteSearch": "www.musixmatch.com",
"siteSearchFilter": "e",
"excludeTerms": ", ".join(self.EXCLUDE_PAGES),
}
data: GoogleCustomSearchAPI.Response = self.fetch_json(
self.SEARCH_URL, params=params
)
for item in data.get("items", []):
yield self.make_search_result(artist, title, item)
def get_results(self, *args) -> Iterable[SearchResult]:
"""Try results from preferred sources first."""
for result in sorted(
super().get_results(*args),
key=lambda r: self.SOURCE_DIST_FACTOR.get(r.source, 1),
):
if result.source not in self.ignored_domains:
yield result
@classmethod
def scrape(cls, html: str) -> str | None:
# Get the longest text element (if any).
if strings := sorted(cls.get_soup(html).stripped_strings, key=len):
return strings[-1]
return None
@dataclass
class Translator(RequestHandler):
TRANSLATE_URL = "https://api.cognitive.microsofttranslator.com/translate"
LINE_PARTS_RE = re.compile(r"^(\[\d\d:\d\d.\d\d\]|) *(.*)$")
SEPARATOR = " | "
remove_translations = partial(re.compile(r" / [^\n]+").sub, "")
_log: Logger
api_key: str
to_language: str
from_languages: list[str]
@classmethod
def from_config(
cls,
log: Logger,
api_key: str,
to_language: str,
from_languages: list[str] | None = None,
) -> Translator:
return cls(
log,
api_key,
to_language.upper(),
[x.upper() for x in from_languages or []],
)
def get_translations(self, texts: Iterable[str]) -> list[tuple[str, str]]:
"""Return translations for the given texts.
To reduce the translation 'cost', we translate unique texts, and then
map the translations back to the original texts.
"""
unique_texts = list(dict.fromkeys(texts))
text = self.SEPARATOR.join(unique_texts)
data: list[TranslatorAPI.Response] = self.post_json(
self.TRANSLATE_URL,
headers={"Ocp-Apim-Subscription-Key": self.api_key},
json=[{"text": text}],
params={"api-version": "3.0", "to": self.to_language},
)
translated_text = data[0]["translations"][0]["text"]
translations = translated_text.split(self.SEPARATOR)
trans_by_text = dict(zip(unique_texts, translations))
return list(zip(texts, (trans_by_text.get(t, "") for t in texts)))
@classmethod
def split_line(cls, line: str) -> tuple[str, str]:
"""Split line to (timestamp, text)."""
if m := cls.LINE_PARTS_RE.match(line):
return m[1], m[2]
return "", ""
def append_translations(self, lines: Iterable[str]) -> list[str]:
"""Append translations to the given lyrics texts.
Lines may contain timestamps from LRCLib which need to be temporarily
removed for the translation. They can take any of these forms:
- empty
Text - text only
[00:00:00] - timestamp only
[00:00:00] Text - timestamp with text
"""
# split into [(timestamp, text), ...]]
ts_and_text = list(map(self.split_line, lines))
timestamps = [ts for ts, _ in ts_and_text]
text_pairs = self.get_translations([ln for _, ln in ts_and_text])
# only add the separator for non-empty translations
texts = [" / ".join(filter(None, p)) for p in text_pairs]
# only add the space between non-empty timestamps and texts
return [" ".join(filter(None, p)) for p in zip(timestamps, texts)]
def translate(self, new_lyrics: str, old_lyrics: str) -> str:
"""Translate the given lyrics to the target language.
Check old lyrics for existing translations and return them if their
original text matches the new lyrics. This is to avoid translating
the same lyrics multiple times.
If the lyrics are already in the target language or not in any of
of the source languages (if configured), they are returned as is.
The footer with the source URL is preserved, if present.
"""
if (
" / " in old_lyrics
and self.remove_translations(old_lyrics) == new_lyrics
):
self.info("🔵 Translations already exist")
return old_lyrics
lyrics_language = langdetect.detect(new_lyrics).upper()
if lyrics_language == self.to_language:
self.info(
"🔵 Lyrics are already in the target language {.to_language}",
self,
)
return new_lyrics
if self.from_languages and lyrics_language not in self.from_languages:
self.info(
"🔵 Configuration {.from_languages} does not permit translating"
" from {}",
self,
lyrics_language,
)
return new_lyrics
lyrics, *url = new_lyrics.split("\n\nSource: ")
with self.handle_request():
translated_lines = self.append_translations(lyrics.splitlines())
self.info("🟢 Translated lyrics to {.to_language}", self)
return "\n\nSource: ".join(["\n".join(translated_lines), *url])
@dataclass
class RestFiles:
# The content for the base index.rst generated in ReST mode.
REST_INDEX_TEMPLATE = textwrap.dedent("""
Lyrics
======
* :ref:`Song index
`
* :ref:`search`
Artist index:
.. toctree::
:maxdepth: 1
:glob:
artists/*
""").strip()
# The content for the base conf.py generated.
REST_CONF_TEMPLATE = textwrap.dedent("""
master_doc = "index"
project = "Lyrics"
copyright = "none"
author = "Various Authors"
latex_documents = [
(master_doc, "Lyrics.tex", project, author, "manual"),
]
epub_exclude_files = ["search.html"]
epub_tocdepth = 1
epub_tocdup = False
""").strip()
directory: Path
@cached_property
def artists_dir(self) -> Path:
dir = self.directory / "artists"
dir.mkdir(parents=True, exist_ok=True)
return dir
def write_indexes(self) -> None:
"""Write conf.py and index.rst files necessary for Sphinx
We write minimal configurations that are necessary for Sphinx
to operate. We do not overwrite existing files so that
customizations are respected."""
index_file = self.directory / "index.rst"
if not index_file.exists():
index_file.write_text(self.REST_INDEX_TEMPLATE)
conf_file = self.directory / "conf.py"
if not conf_file.exists():
conf_file.write_text(self.REST_CONF_TEMPLATE)
def write_artist(self, artist: str, items: Iterable[Item]) -> None:
parts = [
f"{artist}\n{'=' * len(artist)}",
".. contents::\n :local:",
]
for album, items in groupby(items, key=lambda i: i.album):
parts.append(f"{album}\n{'-' * len(album)}")
parts.extend(
part
for i in items
if (title := f":index:`{i.title.strip()}`")
for part in (
f"{title}\n{'~' * len(title)}",
textwrap.indent(i.lyrics, "| "),
)
)
file = self.artists_dir / f"{slug(artist)}.rst"
file.write_text("\n\n".join(parts).strip())
def write(self, items: list[Item]) -> None:
self.directory.mkdir(exist_ok=True, parents=True)
self.write_indexes()
items.sort(key=lambda i: i.albumartist)
for artist, artist_items in groupby(items, key=lambda i: i.albumartist):
self.write_artist(artist.strip(), artist_items)
d = self.directory
text = f"""
ReST files generated. to build, use one of:
sphinx-build -b html {d} {d / "html"}
sphinx-build -b epub {d} {d / "epub"}
sphinx-build -b latex {d} {d / "latex"} && make -C {d / "latex"} all-pdf
"""
ui.print_(textwrap.dedent(text))
class LyricsPlugin(RequestHandler, plugins.BeetsPlugin):
BACKEND_BY_NAME = {
b.name: b for b in [LRCLib, Google, Genius, Tekstowo, MusiXmatch]
}
@cached_property
def backends(self) -> list[Backend]:
user_sources = self.config["sources"].get()
chosen = sanitize_choices(user_sources, self.BACKEND_BY_NAME)
if "google" in chosen and not self.config["google_API_key"].get():
self.warn("Disabling Google source: no API key configured.")
chosen.remove("google")
return [self.BACKEND_BY_NAME[c](self.config, self._log) for c in chosen]
@cached_property
def translator(self) -> Translator | None:
config = self.config["translate"]
if config["api_key"].get() and config["to_language"].get():
return Translator.from_config(self._log, **config.flatten())
return None
def __init__(self):
super().__init__()
self.config.add(
{
"auto": True,
"translate": {
"api_key": None,
"from_languages": [],
"to_language": None,
},
"dist_thresh": 0.11,
"google_API_key": None,
"google_engine_ID": "009217259823014548361:lndtuqkycfu",
"genius_api_key": (
"Ryq93pUGm8bM6eUWwD_M3NOFFDAtp2yEE7W"
"76V-uFL5jks5dNvcGCdarqFjDhP9c"
),
"fallback": None,
"force": False,
"local": False,
"print": False,
"synced": False,
# Musixmatch is disabled by default as they are currently blocking
# requests with the beets user agent.
"sources": [
n for n in self.BACKEND_BY_NAME if n != "musixmatch"
],
}
)
self.config["translate"]["api_key"].redact = True
self.config["google_API_key"].redact = True
self.config["google_engine_ID"].redact = True
self.config["genius_api_key"].redact = True
if self.config["auto"]:
self.import_stages = [self.imported]
def commands(self):
cmd = ui.Subcommand("lyrics", help="fetch song lyrics")
cmd.parser.add_option(
"-p",
"--print",
action="store_true",
default=self.config["print"].get(),
help="print lyrics to console",
)
cmd.parser.add_option(
"-r",
"--write-rest",
dest="rest_directory",
action="store",
default=None,
metavar="dir",
help="write lyrics to given directory as ReST files",
)
cmd.parser.add_option(
"-f",
"--force",
action="store_true",
default=self.config["force"].get(),
help="always re-download lyrics",
)
cmd.parser.add_option(
"-l",
"--local",
action="store_true",
default=self.config["local"].get(),
help="do not fetch missing lyrics",
)
def func(lib: Library, opts, args) -> None:
# The "write to files" option corresponds to the
# import_write config value.
self.config.set(vars(opts))
items = list(lib.items(args))
for item in items:
self.add_item_lyrics(item, ui.should_write())
if item.lyrics and opts.print:
ui.print_(item.lyrics)
if opts.rest_directory and (
items := [i for i in items if i.lyrics]
):
RestFiles(Path(opts.rest_directory)).write(items)
cmd.func = func
return [cmd]
def imported(self, _, task: ImportTask) -> None:
"""Import hook for fetching lyrics automatically."""
for item in task.imported_items():
self.add_item_lyrics(item, False)
def find_lyrics(self, item: Item) -> str:
album, length = item.album, round(item.length)
matches = (
[
lyrics
for t in titles
if (lyrics := self.get_lyrics(a, t, album, length))
]
for a, titles in search_pairs(item)
)
return "\n\n---\n\n".join(next(filter(None, matches), []))
def add_item_lyrics(self, item: Item, write: bool) -> None:
"""Fetch and store lyrics for a single item. If ``write``, then the
lyrics will also be written to the file itself.
"""
if self.config["local"]:
return
if not self.config["force"] and item.lyrics:
self.info("🔵 Lyrics already present: {}", item)
return
if lyrics := self.find_lyrics(item):
self.info("🟢 Found lyrics: {}", item)
if translator := self.translator:
lyrics = translator.translate(lyrics, item.lyrics)
else:
self.info("đź”´ Lyrics not found: {}", item)
lyrics = self.config["fallback"].get()
if lyrics not in {None, item.lyrics}:
item.lyrics = lyrics
if write:
item.try_write()
item.store()
def get_lyrics(self, artist: str, title: str, *args) -> str | None:
"""Fetch lyrics, trying each source in turn. Return a string or
None if no lyrics were found.
"""
self.info("Fetching lyrics for {} - {}", artist, title)
for backend in self.backends:
with backend.handle_request():
if lyrics_info := backend.fetch(artist, title, *args):
lyrics, url = lyrics_info
return f"{lyrics}\n\nSource: {url}"
return None
beetbox-beets-c1877b7/beetsplug/mbcollection.py 0000664 0000000 0000000 00000013703 15073551743 0021642 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright (c) 2011, Jeffrey Aylesworth
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import re
import musicbrainzngs
from beets import config, ui
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
SUBMISSION_CHUNK_SIZE = 200
FETCH_CHUNK_SIZE = 100
UUID_REGEX = r"^[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}$"
def mb_call(func, *args, **kwargs):
"""Call a MusicBrainz API function and catch exceptions."""
try:
return func(*args, **kwargs)
except musicbrainzngs.AuthenticationError:
raise ui.UserError("authentication with MusicBrainz failed")
except (musicbrainzngs.ResponseError, musicbrainzngs.NetworkError) as exc:
raise ui.UserError(f"MusicBrainz API error: {exc}")
except musicbrainzngs.UsageError:
raise ui.UserError("MusicBrainz credentials missing")
def submit_albums(collection_id, release_ids):
"""Add all of the release IDs to the indicated collection. Multiple
requests are made if there are many release IDs to submit.
"""
for i in range(0, len(release_ids), SUBMISSION_CHUNK_SIZE):
chunk = release_ids[i : i + SUBMISSION_CHUNK_SIZE]
mb_call(musicbrainzngs.add_releases_to_collection, collection_id, chunk)
class MusicBrainzCollectionPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
config["musicbrainz"]["pass"].redact = True
musicbrainzngs.auth(
config["musicbrainz"]["user"].as_str(),
config["musicbrainz"]["pass"].as_str(),
)
self.config.add(
{
"auto": False,
"collection": "",
"remove": False,
}
)
if self.config["auto"]:
self.import_stages = [self.imported]
def _get_collection(self):
collections = mb_call(musicbrainzngs.get_collections)
if not collections["collection-list"]:
raise ui.UserError("no collections exist for user")
# Get all release collection IDs, avoiding event collections
collection_ids = [
x["id"]
for x in collections["collection-list"]
if x["entity-type"] == "release"
]
if not collection_ids:
raise ui.UserError("No release collection found.")
# Check that the collection exists so we can present a nice error
collection = self.config["collection"].as_str()
if collection:
if collection not in collection_ids:
raise ui.UserError(f"invalid collection ID: {collection}")
return collection
# No specified collection. Just return the first collection ID
return collection_ids[0]
def _get_albums_in_collection(self, id):
def _fetch(offset):
res = mb_call(
musicbrainzngs.get_releases_in_collection,
id,
limit=FETCH_CHUNK_SIZE,
offset=offset,
)["collection"]
return [x["id"] for x in res["release-list"]], res["release-count"]
offset = 0
albums_in_collection, release_count = _fetch(offset)
for i in range(0, release_count, FETCH_CHUNK_SIZE):
albums_in_collection += _fetch(offset)[0]
offset += FETCH_CHUNK_SIZE
return albums_in_collection
def commands(self):
mbupdate = Subcommand("mbupdate", help="Update MusicBrainz collection")
mbupdate.parser.add_option(
"-r",
"--remove",
action="store_true",
default=None,
dest="remove",
help="Remove albums not in beets library",
)
mbupdate.func = self.update_collection
return [mbupdate]
def remove_missing(self, collection_id, lib_albums):
lib_ids = {x.mb_albumid for x in lib_albums}
albums_in_collection = self._get_albums_in_collection(collection_id)
remove_me = list(set(albums_in_collection) - lib_ids)
for i in range(0, len(remove_me), FETCH_CHUNK_SIZE):
chunk = remove_me[i : i + FETCH_CHUNK_SIZE]
mb_call(
musicbrainzngs.remove_releases_from_collection,
collection_id,
chunk,
)
def update_collection(self, lib, opts, args):
self.config.set_args(opts)
remove_missing = self.config["remove"].get(bool)
self.update_album_list(lib, lib.albums(), remove_missing)
def imported(self, session, task):
"""Add each imported album to the collection."""
if task.is_album:
self.update_album_list(session.lib, [task.album])
def update_album_list(self, lib, album_list, remove_missing=False):
"""Update the MusicBrainz collection from a list of Beets albums"""
collection_id = self._get_collection()
# Get a list of all the album IDs.
album_ids = []
for album in album_list:
aid = album.mb_albumid
if aid:
if re.match(UUID_REGEX, aid):
album_ids.append(aid)
else:
self._log.info("skipping invalid MBID: {}", aid)
# Submit to MusicBrainz.
self._log.info("Updating MusicBrainz collection {}...", collection_id)
submit_albums(collection_id, album_ids)
if remove_missing:
self.remove_missing(collection_id, lib.albums())
self._log.info("...MusicBrainz collection updated.")
beetbox-beets-c1877b7/beetsplug/mbsubmit.py 0000664 0000000 0000000 00000006517 15073551743 0021017 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson and Diego Moreda.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Aid in submitting information to MusicBrainz.
This plugin allows the user to print track information in a format that is
parseable by the MusicBrainz track parser [1]. Programmatic submitting is not
implemented by MusicBrainz yet.
[1] https://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings
"""
import subprocess
from beets import ui
from beets.autotag import Recommendation
from beets.plugins import BeetsPlugin
from beets.ui.commands import PromptChoice
from beets.util import displayable_path
from beetsplug.info import print_data
class MBSubmitPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"format": "$track. $title - $artist ($length)",
"threshold": "medium",
"picard_path": "picard",
}
)
# Validate and store threshold.
self.threshold = self.config["threshold"].as_choice(
{
"none": Recommendation.none,
"low": Recommendation.low,
"medium": Recommendation.medium,
"strong": Recommendation.strong,
}
)
self.register_listener(
"before_choose_candidate", self.before_choose_candidate_event
)
def before_choose_candidate_event(self, session, task):
if task.rec <= self.threshold:
return [
PromptChoice("p", "Print tracks", self.print_tracks),
PromptChoice("o", "Open files with Picard", self.picard),
]
def picard(self, session, task):
paths = []
for p in task.paths:
paths.append(displayable_path(p))
try:
picard_path = self.config["picard_path"].as_str()
subprocess.Popen([picard_path] + paths)
self._log.info("launched picard from\n{}", picard_path)
except OSError as exc:
self._log.error("Could not open picard, got error:\n{}", exc)
def print_tracks(self, session, task):
for i in sorted(task.items, key=lambda i: i.track):
print_data(None, i, self.config["format"].as_str())
def commands(self):
"""Add beet UI commands for mbsubmit."""
mbsubmit_cmd = ui.Subcommand(
"mbsubmit", help="Submit Tracks to MusicBrainz"
)
def func(lib, opts, args):
items = lib.items(args)
self._mbsubmit(items)
mbsubmit_cmd.func = func
return [mbsubmit_cmd]
def _mbsubmit(self, items):
"""Print track information to be submitted to MusicBrainz."""
for i in sorted(items, key=lambda i: i.track):
print_data(None, i, self.config["format"].as_str())
beetbox-beets-c1877b7/beetsplug/mbsync.py 0000664 0000000 0000000 00000015103 15073551743 0020457 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Jakob Schnitzer.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Synchronise library metadata with metadata source backends."""
from collections import defaultdict
from beets import autotag, library, metadata_plugins, ui, util
from beets.plugins import BeetsPlugin, apply_item_changes
class MBSyncPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
def commands(self):
cmd = ui.Subcommand("mbsync", help="update metadata from musicbrainz")
cmd.parser.add_option(
"-p",
"--pretend",
action="store_true",
help="show all changes but do nothing",
)
cmd.parser.add_option(
"-m",
"--move",
action="store_true",
dest="move",
help="move files in the library directory",
)
cmd.parser.add_option(
"-M",
"--nomove",
action="store_false",
dest="move",
help="don't move files in library",
)
cmd.parser.add_option(
"-W",
"--nowrite",
action="store_false",
default=None,
dest="write",
help="don't write updated metadata to files",
)
cmd.parser.add_format_option()
cmd.func = self.func
return [cmd]
def func(self, lib, opts, args):
"""Command handler for the mbsync function."""
move = ui.should_move(opts.move)
pretend = opts.pretend
write = ui.should_write(opts.write)
self.singletons(lib, args, move, pretend, write)
self.albums(lib, args, move, pretend, write)
def singletons(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for items matched by
query.
"""
for item in lib.items(query + ["singleton:true"]):
if not item.mb_trackid:
self._log.info(
"Skipping singleton with no mb_trackid: {}", item
)
continue
if not (
track_info := metadata_plugins.track_for_id(item.mb_trackid)
):
self._log.info(
"Recording ID not found: {0.mb_trackid} for track {0}", item
)
continue
# Apply.
with lib.transaction():
autotag.apply_item_metadata(item, track_info)
apply_item_changes(lib, item, move, pretend, write)
def albums(self, lib, query, move, pretend, write):
"""Retrieve and apply info from the autotagger for albums matched by
query and their items.
"""
# Process matching albums.
for album in lib.albums(query):
if not album.mb_albumid:
self._log.info("Skipping album with no mb_albumid: {}", album)
continue
if not (
album_info := metadata_plugins.album_for_id(album.mb_albumid)
):
self._log.info(
"Release ID {0.mb_albumid} not found for album {0}", album
)
continue
# Map release track and recording MBIDs to their information.
# Recordings can appear multiple times on a release, so each MBID
# maps to a list of TrackInfo objects.
releasetrack_index = {}
track_index = defaultdict(list)
for track_info in album_info.tracks:
releasetrack_index[track_info.release_track_id] = track_info
track_index[track_info.track_id].append(track_info)
# Construct a track mapping according to MBIDs (release track MBIDs
# first, if available, and recording MBIDs otherwise). This should
# work for albums that have missing or extra tracks.
mapping = {}
items = list(album.items())
for item in items:
if (
item.mb_releasetrackid
and item.mb_releasetrackid in releasetrack_index
):
mapping[item] = releasetrack_index[item.mb_releasetrackid]
else:
candidates = track_index[item.mb_trackid]
if len(candidates) == 1:
mapping[item] = candidates[0]
else:
# If there are multiple copies of a recording, they are
# disambiguated using their disc and track number.
for c in candidates:
if (
c.medium_index == item.track
and c.medium == item.disc
):
mapping[item] = c
break
# Apply.
self._log.debug("applying changes to {}", album)
with lib.transaction():
autotag.apply_metadata(album_info, mapping)
changed = False
# Find any changed item to apply changes to album.
any_changed_item = items[0]
for item in items:
item_changed = ui.show_model_changes(item)
changed |= item_changed
if item_changed:
any_changed_item = item
apply_item_changes(lib, item, move, pretend, write)
if not changed:
# No change to any item.
continue
if not pretend:
# Update album structure to reflect an item in it.
for key in library.Album.item_keys:
album[key] = any_changed_item[key]
album.store()
# Move album art (and any inconsistent items).
if move and lib.directory in util.ancestry(items[0].path):
self._log.debug("moving album {}", album)
album.move()
beetbox-beets-c1877b7/beetsplug/metasync/ 0000775 0000000 0000000 00000000000 15073551743 0020435 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beetsplug/metasync/__init__.py 0000664 0000000 0000000 00000010215 15073551743 0022545 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Heinz Wiesinger.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Synchronize information from music player libraries"""
from abc import ABCMeta, abstractmethod
from importlib import import_module
from confuse import ConfigValueError
from beets import ui
from beets.plugins import BeetsPlugin
METASYNC_MODULE = "beetsplug.metasync"
# Dictionary to map the MODULE and the CLASS NAME of meta sources
SOURCES = {
"amarok": "Amarok",
"itunes": "Itunes",
}
class MetaSource(metaclass=ABCMeta):
def __init__(self, config, log):
self.item_types = {}
self.config = config
self._log = log
@abstractmethod
def sync_from_source(self, item):
pass
def load_meta_sources():
"""Returns a dictionary of all the MetaSources
E.g., {'itunes': Itunes} with isinstance(Itunes, MetaSource) true
"""
meta_sources = {}
for module_path, class_name in SOURCES.items():
module = import_module(f"{METASYNC_MODULE}.{module_path}")
meta_sources[class_name.lower()] = getattr(module, class_name)
return meta_sources
META_SOURCES = load_meta_sources()
def load_item_types():
"""Returns a dictionary containing the item_types of all the MetaSources"""
item_types = {}
for meta_source in META_SOURCES.values():
item_types.update(meta_source.item_types)
return item_types
class MetaSyncPlugin(BeetsPlugin):
item_types = load_item_types()
def __init__(self):
super().__init__()
def commands(self):
cmd = ui.Subcommand(
"metasync", help="update metadata from music player libraries"
)
cmd.parser.add_option(
"-p",
"--pretend",
action="store_true",
help="show all changes but do nothing",
)
cmd.parser.add_option(
"-s",
"--source",
default=[],
action="append",
dest="sources",
help="comma-separated list of sources to sync",
)
cmd.parser.add_format_option()
cmd.func = self.func
return [cmd]
def func(self, lib, opts, args):
"""Command handler for the metasync function."""
pretend = opts.pretend
sources = []
for source in opts.sources:
sources.extend(source.split(","))
sources = sources or self.config["source"].as_str_seq()
meta_source_instances = {}
items = lib.items(args)
# Avoid needlessly instantiating meta sources (can be expensive)
if not items:
self._log.info("No items found matching query")
return
# Instantiate the meta sources
for player in sources:
try:
cls = META_SOURCES[player]
except KeyError:
self._log.error("Unknown metadata source '{}'", player)
try:
meta_source_instances[player] = cls(self.config, self._log)
except (ImportError, ConfigValueError) as e:
self._log.error(
"Failed to instantiate metadata source {!r}: {}", player, e
)
# Avoid needlessly iterating over items
if not meta_source_instances:
self._log.error("No valid metadata sources found")
return
# Sync the items with all of the meta sources
for item in items:
for meta_source in meta_source_instances.values():
meta_source.sync_from_source(item)
changed = ui.show_model_changes(item)
if changed and not pretend:
item.store()
beetbox-beets-c1877b7/beetsplug/metasync/amarok.py 0000664 0000000 0000000 00000007567 15073551743 0022300 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Heinz Wiesinger.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Synchronize information from amarok's library via dbus"""
from datetime import datetime
from os.path import basename
from time import mktime
from xml.sax.saxutils import quoteattr
from beets.dbcore import types
from beets.util import displayable_path
from beetsplug.metasync import MetaSource
def import_dbus():
try:
return __import__("dbus")
except ImportError:
return None
dbus = import_dbus()
class Amarok(MetaSource):
item_types = {
"amarok_rating": types.INTEGER,
"amarok_score": types.FLOAT,
"amarok_uid": types.STRING,
"amarok_playcount": types.INTEGER,
"amarok_firstplayed": types.DATE,
"amarok_lastplayed": types.DATE,
}
query_xml = """
"""
def __init__(self, config, log):
super().__init__(config, log)
if not dbus:
raise ImportError("failed to import dbus")
self.collection = dbus.SessionBus().get_object(
"org.kde.amarok", "/Collection"
)
def sync_from_source(self, item):
path = displayable_path(item.path)
# amarok unfortunately doesn't allow searching for the full path, only
# for the patch relative to the mount point. But the full path is part
# of the result set. So query for the filename and then try to match
# the correct item from the results we get back
results = self.collection.Query(
self.query_xml.format(quoteattr(basename(path)))
)
for result in results:
if result["xesam:url"] != path:
continue
item.amarok_rating = result["xesam:userRating"]
item.amarok_score = result["xesam:autoRating"]
item.amarok_playcount = result["xesam:useCount"]
item.amarok_uid = result["xesam:id"].replace(
"amarok-sqltrackuid://", ""
)
if result["xesam:firstUsed"][0][0] != 0:
# These dates are stored as timestamps in amarok's db, but
# exposed over dbus as fixed integers in the current timezone.
first_played = datetime(
result["xesam:firstUsed"][0][0],
result["xesam:firstUsed"][0][1],
result["xesam:firstUsed"][0][2],
result["xesam:firstUsed"][1][0],
result["xesam:firstUsed"][1][1],
result["xesam:firstUsed"][1][2],
)
if result["xesam:lastUsed"][0][0] != 0:
last_played = datetime(
result["xesam:lastUsed"][0][0],
result["xesam:lastUsed"][0][1],
result["xesam:lastUsed"][0][2],
result["xesam:lastUsed"][1][0],
result["xesam:lastUsed"][1][1],
result["xesam:lastUsed"][1][2],
)
else:
last_played = first_played
item.amarok_firstplayed = mktime(first_played.timetuple())
item.amarok_lastplayed = mktime(last_played.timetuple())
beetbox-beets-c1877b7/beetsplug/metasync/itunes.py 0000664 0000000 0000000 00000010672 15073551743 0022324 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Tom Jaspers.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Synchronize information from iTunes's library"""
import os
import plistlib
import shutil
import tempfile
from contextlib import contextmanager
from time import mktime
from urllib.parse import unquote, urlparse
from confuse import ConfigValueError
from beets import util
from beets.dbcore import types
from beets.util import bytestring_path, syspath
from beetsplug.metasync import MetaSource
@contextmanager
def create_temporary_copy(path):
temp_dir = bytestring_path(tempfile.mkdtemp())
temp_path = os.path.join(temp_dir, b"temp_itunes_lib")
shutil.copyfile(syspath(path), syspath(temp_path))
try:
yield temp_path
finally:
shutil.rmtree(syspath(temp_dir))
def _norm_itunes_path(path):
# Itunes prepends the location with 'file://' on posix systems,
# and with 'file://localhost/' on Windows systems.
# The actual path to the file is always saved as posix form
# E.g., 'file://Users/Music/bar' or 'file://localhost/G:/Music/bar'
# The entire path will also be capitalized (e.g., '/Music/Alt-J')
# Note that this means the path will always have a leading separator,
# which is unwanted in the case of Windows systems.
# E.g., '\\G:\\Music\\bar' needs to be stripped to 'G:\\Music\\bar'
return util.bytestring_path(
os.path.normpath(unquote(urlparse(path).path)).lstrip("\\")
).lower()
class Itunes(MetaSource):
item_types = {
"itunes_rating": types.INTEGER, # 0..100 scale
"itunes_playcount": types.INTEGER,
"itunes_skipcount": types.INTEGER,
"itunes_lastplayed": types.DATE,
"itunes_lastskipped": types.DATE,
"itunes_dateadded": types.DATE,
}
def __init__(self, config, log):
super().__init__(config, log)
config.add({"itunes": {"library": "~/Music/iTunes/iTunes Library.xml"}})
# Load the iTunes library, which has to be the .xml one (not the .itl)
library_path = config["itunes"]["library"].as_filename()
try:
self._log.debug("loading iTunes library from {}", library_path)
with create_temporary_copy(library_path) as library_copy:
with open(library_copy, "rb") as library_copy_f:
raw_library = plistlib.load(library_copy_f)
except OSError as e:
raise ConfigValueError(f"invalid iTunes library: {e.strerror}")
except Exception:
# It's likely the user configured their '.itl' library (<> xml)
if os.path.splitext(library_path)[1].lower() != ".xml":
hint = (
": please ensure that the configured path"
" points to the .XML library"
)
else:
hint = ""
raise ConfigValueError(f"invalid iTunes library{hint}")
# Make the iTunes library queryable using the path
self.collection = {
_norm_itunes_path(track["Location"]): track
for track in raw_library["Tracks"].values()
if "Location" in track
}
def sync_from_source(self, item):
result = self.collection.get(util.bytestring_path(item.path).lower())
if not result:
self._log.warning("no iTunes match found for {}", item)
return
item.itunes_rating = result.get("Rating")
item.itunes_playcount = result.get("Play Count")
item.itunes_skipcount = result.get("Skip Count")
if result.get("Play Date UTC"):
item.itunes_lastplayed = mktime(
result.get("Play Date UTC").timetuple()
)
if result.get("Skip Date"):
item.itunes_lastskipped = mktime(
result.get("Skip Date").timetuple()
)
if result.get("Date Added"):
item.itunes_dateadded = mktime(result.get("Date Added").timetuple())
beetbox-beets-c1877b7/beetsplug/missing.py 0000664 0000000 0000000 00000017362 15073551743 0020646 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Pedro Silva.
# Copyright 2017, Quentin Young.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""List missing tracks."""
from collections import defaultdict
from collections.abc import Iterator
import musicbrainzngs
from musicbrainzngs.musicbrainz import MusicBrainzError
from beets import config, metadata_plugins
from beets.dbcore import types
from beets.library import Album, Item, Library
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, print_
MB_ARTIST_QUERY = r"mb_albumartistid::^\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$"
def _missing_count(album):
"""Return number of missing items in `album`."""
return (album.albumtotal or 0) - len(album.items())
def _item(track_info, album_info, album_id):
"""Build and return `item` from `track_info` and `album info`
objects. `item` is missing what fields cannot be obtained from
MusicBrainz alone (encoder, rg_track_gain, rg_track_peak,
rg_album_gain, rg_album_peak, original_year, original_month,
original_day, length, bitrate, format, samplerate, bitdepth,
channels, mtime.)
"""
t = track_info
a = album_info
return Item(
**{
"album_id": album_id,
"album": a.album,
"albumartist": a.artist,
"albumartist_credit": a.artist_credit,
"albumartist_sort": a.artist_sort,
"albumdisambig": a.albumdisambig,
"albumstatus": a.albumstatus,
"albumtype": a.albumtype,
"artist": t.artist,
"artist_credit": t.artist_credit,
"artist_sort": t.artist_sort,
"asin": a.asin,
"catalognum": a.catalognum,
"comp": a.va,
"country": a.country,
"day": a.day,
"disc": t.medium,
"disctitle": t.disctitle,
"disctotal": a.mediums,
"label": a.label,
"language": a.language,
"length": t.length,
"mb_albumid": a.album_id,
"mb_artistid": t.artist_id,
"mb_releasegroupid": a.releasegroup_id,
"mb_trackid": t.track_id,
"media": t.media,
"month": a.month,
"script": a.script,
"title": t.title,
"track": t.index,
"tracktotal": len(a.tracks),
"year": a.year,
}
)
class MissingPlugin(BeetsPlugin):
"""List missing tracks"""
album_types = {
"missing": types.INTEGER,
}
def __init__(self):
super().__init__()
self.config.add(
{
"count": False,
"total": False,
"album": False,
}
)
self.album_template_fields["missing"] = _missing_count
self._command = Subcommand("missing", help=__doc__, aliases=["miss"])
self._command.parser.add_option(
"-c",
"--count",
dest="count",
action="store_true",
help="count missing tracks per album",
)
self._command.parser.add_option(
"-t",
"--total",
dest="total",
action="store_true",
help="count total of missing tracks",
)
self._command.parser.add_option(
"-a",
"--album",
dest="album",
action="store_true",
help="show missing albums for artist instead of tracks",
)
self._command.parser.add_format_option()
def commands(self):
def _miss(lib, opts, args):
self.config.set_args(opts)
albms = self.config["album"].get()
helper = self._missing_albums if albms else self._missing_tracks
helper(lib, args)
self._command.func = _miss
return [self._command]
def _missing_tracks(self, lib, query):
"""Print a listing of tracks missing from each album in the library
matching query.
"""
albums = lib.albums(query)
count = self.config["count"].get()
total = self.config["total"].get()
fmt = config["format_album" if count else "format_item"].get()
if total:
print(sum([_missing_count(a) for a in albums]))
return
# Default format string for count mode.
if count:
fmt += ": $missing"
for album in albums:
if count:
if _missing_count(album):
print_(format(album, fmt))
else:
for item in self._missing(album):
print_(format(item, fmt))
def _missing_albums(self, lib: Library, query: list[str]) -> None:
"""Print a listing of albums missing from each artist in the library
matching query.
"""
query.append(MB_ARTIST_QUERY)
# build dict mapping artist to set of their album ids in library
album_ids_by_artist = defaultdict(set)
for album in lib.albums(query):
# TODO(@snejus): Some releases have different `albumartist` for the
# same `mb_albumartistid`. Since we're grouping by the combination
# of these two fields, we end up processing the same
# `mb_albumartistid` multiple times: calling MusicBrainz API and
# reporting the same set of missing albums. Instead, we should
# group by `mb_albumartistid` field only.
artist = (album["albumartist"], album["mb_albumartistid"])
album_ids_by_artist[artist].add(album)
total_missing = 0
calculating_total = self.config["total"].get()
for (artist, artist_id), album_ids in album_ids_by_artist.items():
try:
resp = musicbrainzngs.browse_release_groups(artist=artist_id)
except MusicBrainzError as err:
self._log.info(
"Couldn't fetch info for artist '{}' ({}) - '{}'",
artist,
artist_id,
err,
)
continue
missing_titles = [
f"{artist} - {rg['title']}"
for rg in resp["release-group-list"]
if rg["id"] not in album_ids
]
if calculating_total:
total_missing += len(missing_titles)
else:
for title in missing_titles:
print(title)
if calculating_total:
print(total_missing)
def _missing(self, album: Album) -> Iterator[Item]:
"""Query MusicBrainz to determine items missing from `album`."""
if len(album.items()) == album.albumtotal:
return
item_mbids = {x.mb_trackid for x in album.items()}
# fetch missing items
# TODO: Implement caching that without breaking other stuff
if album_info := metadata_plugins.album_for_id(album.mb_albumid):
for track_info in album_info.tracks:
if track_info.track_id not in item_mbids:
self._log.debug(
"track {.track_id} in album {.album_id}",
track_info,
album_info,
)
yield _item(track_info, album_info, album.id)
beetbox-beets-c1877b7/beetsplug/mpdstats.py 0000664 0000000 0000000 00000030215 15073551743 0021024 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Peter Schnebel and Johann Klähn.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import os
import time
import mpd
from beets import config, plugins, ui
from beets.dbcore import types
from beets.dbcore.query import PathQuery
from beets.util import displayable_path
# If we lose the connection, how many times do we want to retry and how
# much time should we wait between retries?
RETRIES = 10
RETRY_INTERVAL = 5
DUPLICATE_PLAY_THRESHOLD = 10.0
mpd_config = config["mpd"]
def is_url(path):
"""Try to determine if the path is an URL."""
if isinstance(path, bytes): # if it's bytes, then it's a path
return False
return path.split("://", 1)[0] in ["http", "https"]
class MPDClientWrapper:
def __init__(self, log):
self._log = log
self.music_directory = mpd_config["music_directory"].as_str()
self.strip_path = mpd_config["strip_path"].as_str()
# Ensure strip_path end with '/'
if not self.strip_path.endswith("/"):
self.strip_path += "/"
self._log.debug("music_directory: {.music_directory}", self)
self._log.debug("strip_path: {.strip_path}", self)
self.client = mpd.MPDClient()
def connect(self):
"""Connect to the MPD."""
host = mpd_config["host"].as_str()
port = mpd_config["port"].get(int)
if host[0] in ["/", "~"]:
host = os.path.expanduser(host)
self._log.info("connecting to {}:{}", host, port)
try:
self.client.connect(host, port)
except OSError as e:
raise ui.UserError(f"could not connect to MPD: {e}")
password = mpd_config["password"].as_str()
if password:
try:
self.client.password(password)
except mpd.CommandError as e:
raise ui.UserError(f"could not authenticate to MPD: {e}")
def disconnect(self):
"""Disconnect from the MPD."""
self.client.close()
self.client.disconnect()
def get(self, command, retries=RETRIES):
"""Wrapper for requests to the MPD server. Tries to re-connect if the
connection was lost (f.ex. during MPD's library refresh).
"""
try:
return getattr(self.client, command)()
except (OSError, mpd.ConnectionError) as err:
self._log.error("{}", err)
if retries <= 0:
# if we exited without breaking, we couldn't reconnect in time :(
raise ui.UserError("communication with MPD server failed")
time.sleep(RETRY_INTERVAL)
try:
self.disconnect()
except mpd.ConnectionError:
pass
self.connect()
return self.get(command, retries=retries - 1)
def currentsong(self):
"""Return the path to the currently playing song, along with its
songid. Prefixes paths with the music_directory, to get the absolute
path.
In some cases, we need to remove the local path from MPD server,
we replace 'strip_path' with ''.
`strip_path` defaults to ''.
"""
result = None
entry = self.get("currentsong")
if "file" in entry:
if not is_url(entry["file"]):
file = entry["file"]
if file.startswith(self.strip_path):
file = file[len(self.strip_path) :]
result = os.path.join(self.music_directory, file)
else:
result = entry["file"]
self._log.debug("returning: {}", result)
return result, entry.get("id")
def status(self):
"""Return the current status of the MPD."""
return self.get("status")
def events(self):
"""Return list of events. This may block a long time while waiting for
an answer from MPD.
"""
return self.get("idle")
class MPDStats:
def __init__(self, lib, log):
self.lib = lib
self._log = log
self.do_rating = mpd_config["rating"].get(bool)
self.rating_mix = mpd_config["rating_mix"].get(float)
self.played_ratio_threshold = mpd_config["played_ratio_threshold"].get(
float
)
self.now_playing = None
self.mpd = MPDClientWrapper(log)
def rating(self, play_count, skip_count, rating, skipped):
"""Calculate a new rating for a song based on play count, skip count,
old rating and the fact if it was skipped or not.
"""
if skipped:
rolling = rating - rating / 2.0
else:
rolling = rating + (1.0 - rating) / 2.0
stable = (play_count + 1.0) / (play_count + skip_count + 2.0)
return self.rating_mix * stable + (1.0 - self.rating_mix) * rolling
def get_item(self, path):
"""Return the beets item related to path."""
query = PathQuery("path", path)
item = self.lib.items(query).get()
if item:
return item
else:
self._log.info("item not found: {}", displayable_path(path))
def update_item(self, item, attribute, value=None, increment=None):
"""Update the beets item. Set attribute to value or increment the value
of attribute. If the increment argument is used the value is cast to
the corresponding type.
"""
if item is None:
return
if increment is not None:
item.load()
value = type(increment)(item.get(attribute, 0)) + increment
if value is not None:
item[attribute] = value
item.store()
self._log.debug(
"updated: {} = {} [{.filepath}]",
attribute,
item[attribute],
item,
)
def update_rating(self, item, skipped):
"""Update the rating for a beets item. The `item` can either be a
beets `Item` or None. If the item is None, nothing changes.
"""
if item is None:
return
item.load()
rating = self.rating(
int(item.get("play_count", 0)),
int(item.get("skip_count", 0)),
float(item.get("rating", 0.5)),
skipped,
)
self.update_item(item, "rating", rating)
def handle_song_change(self, song):
"""Determine if a song was skipped or not and update its attributes.
To this end the difference between the song's supposed end time
and the current time is calculated. If it's greater than a threshold,
the song is considered skipped.
Returns whether the change was manual (skipped previous song or not)
"""
elapsed = song["elapsed_at_start"] + (time.time() - song["started"])
skipped = elapsed / song["duration"] < self.played_ratio_threshold
if skipped:
self.handle_skipped(song)
else:
self.handle_played(song)
if self.do_rating:
self.update_rating(song["beets_item"], skipped)
return skipped
def handle_played(self, song):
"""Updates the play count of a song."""
self.update_item(song["beets_item"], "play_count", increment=1)
self._log.info("played {}", displayable_path(song["path"]))
def handle_skipped(self, song):
"""Updates the skip count of a song."""
self.update_item(song["beets_item"], "skip_count", increment=1)
self._log.info("skipped {}", displayable_path(song["path"]))
def on_stop(self, status):
self._log.info("stop")
# if the current song stays the same it means that we stopped on the
# current track and should not record a skip.
if self.now_playing and self.now_playing["id"] != status.get("songid"):
self.handle_song_change(self.now_playing)
self.now_playing = None
def on_pause(self, status):
self._log.info("pause")
self.now_playing = None
def on_play(self, status):
path, songid = self.mpd.currentsong()
if not path:
return
played, duration = map(int, status["time"].split(":", 1))
if self.now_playing:
if self.now_playing["path"] != path:
self.handle_song_change(self.now_playing)
else:
# In case we got mpd play event with same song playing
# multiple times,
# assume low diff means redundant second play event
# after natural song start.
diff = abs(time.time() - self.now_playing["started"])
if diff <= DUPLICATE_PLAY_THRESHOLD:
return
if self.now_playing["path"] == path and played == 0:
self.handle_song_change(self.now_playing)
if is_url(path):
self._log.info("playing stream {}", displayable_path(path))
self.now_playing = None
return
self._log.info("playing {}", displayable_path(path))
self.now_playing = {
"started": time.time(),
"elapsed_at_start": played,
"duration": duration,
"path": path,
"id": songid,
"beets_item": self.get_item(path),
}
self.update_item(
self.now_playing["beets_item"],
"last_played",
value=int(time.time()),
)
def run(self):
self.mpd.connect()
events = ["player"]
while True:
if "player" in events:
status = self.mpd.status()
handler = getattr(self, f"on_{status['state']}", None)
if handler:
handler(status)
else:
self._log.debug('unhandled status "{}"', status)
events = self.mpd.events()
class MPDStatsPlugin(plugins.BeetsPlugin):
item_types = {
"play_count": types.INTEGER,
"skip_count": types.INTEGER,
"last_played": types.DATE,
"rating": types.FLOAT,
}
def __init__(self):
super().__init__()
mpd_config.add(
{
"music_directory": config["directory"].as_filename(),
"strip_path": "",
"rating": True,
"rating_mix": 0.75,
"host": os.environ.get("MPD_HOST", "localhost"),
"port": int(os.environ.get("MPD_PORT", 6600)),
"password": "",
"played_ratio_threshold": 0.85,
}
)
mpd_config["password"].redact = True
def commands(self):
cmd = ui.Subcommand(
"mpdstats", help="run a MPD client to gather play statistics"
)
cmd.parser.add_option(
"--host",
dest="host",
type="string",
help="set the hostname of the server to connect to",
)
cmd.parser.add_option(
"--port",
dest="port",
type="int",
help="set the port of the MPD server to connect to",
)
cmd.parser.add_option(
"--password",
dest="password",
type="string",
help="set the password of the MPD server to connect to",
)
def func(lib, opts, args):
mpd_config.set_args(opts)
# Overrides for MPD settings.
if opts.host:
mpd_config["host"] = opts.host.decode("utf-8")
if opts.port:
mpd_config["host"] = int(opts.port)
if opts.password:
mpd_config["password"] = opts.password.decode("utf-8")
try:
MPDStats(lib, self._log).run()
except KeyboardInterrupt:
pass
cmd.func = func
return [cmd]
beetbox-beets-c1877b7/beetsplug/mpdupdate.py 0000664 0000000 0000000 00000010006 15073551743 0021144 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates an MPD index whenever the library is changed.
Put something like the following in your config.yaml to configure:
mpd:
host: localhost
port: 6600
password: seekrit
"""
import os
import socket
from beets import config
from beets.plugins import BeetsPlugin
# No need to introduce a dependency on an MPD library for such a
# simple use case. Here's a simple socket abstraction to make things
# easier.
class BufferedSocket:
"""Socket abstraction that allows reading by line."""
def __init__(self, host, port, sep=b"\n"):
if host[0] in ["/", "~"]:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(os.path.expanduser(host))
else:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.buf = b""
self.sep = sep
def readline(self):
while self.sep not in self.buf:
data = self.sock.recv(1024)
if not data:
break
self.buf += data
if self.sep in self.buf:
res, self.buf = self.buf.split(self.sep, 1)
return res + self.sep
else:
return b""
def send(self, data):
self.sock.send(data)
def close(self):
self.sock.close()
class MPDUpdatePlugin(BeetsPlugin):
def __init__(self):
super().__init__()
config["mpd"].add(
{
"host": os.environ.get("MPD_HOST", "localhost"),
"port": int(os.environ.get("MPD_PORT", 6600)),
"password": "",
}
)
config["mpd"]["password"].redact = True
# For backwards compatibility, use any values from the
# plugin-specific "mpdupdate" section.
for key in config["mpd"].keys():
if self.config[key].exists():
config["mpd"][key] = self.config[key].get()
self.register_listener("database_change", self.db_change)
def db_change(self, lib, model):
self.register_listener("cli_exit", self.update)
def update(self, lib):
self.update_mpd(
config["mpd"]["host"].as_str(),
config["mpd"]["port"].get(int),
config["mpd"]["password"].as_str(),
)
def update_mpd(self, host="localhost", port=6600, password=None):
"""Sends the "update" command to the MPD server indicated,
possibly authenticating with a password first.
"""
self._log.info("Updating MPD database...")
try:
s = BufferedSocket(host, port)
except OSError:
self._log.warning("MPD connection failed", exc_info=True)
return
resp = s.readline()
if b"OK MPD" not in resp:
self._log.warning("MPD connection failed: {0!r}", resp)
return
if password:
s.send(f'password "{password}"\n'.encode())
resp = s.readline()
if b"OK" not in resp:
self._log.warning("Authentication failed: {0!r}", resp)
s.send(b"close\n")
s.close()
return
s.send(b"update\n")
resp = s.readline()
if b"updating_db" not in resp:
self._log.warning("Update failed: {0!r}", resp)
s.send(b"close\n")
s.close()
self._log.info("Database updated.")
beetbox-beets-c1877b7/beetsplug/musicbrainz.py 0000664 0000000 0000000 00000075455 15073551743 0021532 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Searches for albums in the MusicBrainz database."""
from __future__ import annotations
import traceback
from collections import Counter
from contextlib import suppress
from functools import cached_property
from itertools import product
from typing import TYPE_CHECKING, Any, Iterable, Sequence
from urllib.parse import urljoin
import musicbrainzngs
from confuse.exceptions import NotFoundError
import beets
import beets.autotag.hooks
from beets import config, plugins, util
from beets.metadata_plugins import MetadataSourcePlugin
from beets.util.id_extractors import extract_release_id
if TYPE_CHECKING:
from typing import Literal
from beets.library import Item
from ._typing import JSONDict
VARIOUS_ARTISTS_ID = "89ad4ac3-39f7-470e-963a-56509c546377"
BASE_URL = "https://musicbrainz.org/"
SKIPPED_TRACKS = ["[data track]"]
FIELDS_TO_MB_KEYS = {
"barcode": "barcode",
"catalognum": "catno",
"country": "country",
"label": "label",
"media": "format",
"year": "date",
}
musicbrainzngs.set_useragent("beets", beets.__version__, "https://beets.io/")
class MusicBrainzAPIError(util.HumanReadableError):
"""An error while talking to MusicBrainz. The `query` field is the
parameter to the action and may have any type.
"""
def __init__(self, reason, verb, query, tb=None):
self.query = query
if isinstance(reason, musicbrainzngs.WebServiceError):
reason = "MusicBrainz not reachable"
super().__init__(reason, verb, tb)
def get_message(self):
return f"{self._reasonstr()} in {self.verb} with query {self.query!r}"
RELEASE_INCLUDES = list(
{
"artists",
"media",
"recordings",
"release-groups",
"labels",
"artist-credits",
"aliases",
"recording-level-rels",
"work-rels",
"work-level-rels",
"artist-rels",
"isrcs",
"url-rels",
"release-rels",
"tags",
}
& set(musicbrainzngs.VALID_INCLUDES["release"])
)
TRACK_INCLUDES = list(
{
"artists",
"aliases",
"isrcs",
"work-level-rels",
"artist-rels",
}
& set(musicbrainzngs.VALID_INCLUDES["recording"])
)
BROWSE_INCLUDES = [
"artist-credits",
"work-rels",
"artist-rels",
"recording-rels",
"release-rels",
]
if "work-level-rels" in musicbrainzngs.VALID_BROWSE_INCLUDES["recording"]:
BROWSE_INCLUDES.append("work-level-rels")
BROWSE_CHUNKSIZE = 100
BROWSE_MAXTRACKS = 500
def _preferred_alias(aliases: list[JSONDict]):
"""Given an list of alias structures for an artist credit, select
and return the user's preferred alias alias or None if no matching
alias is found.
"""
if not aliases:
return
# Only consider aliases that have locales set.
valid_aliases = [a for a in aliases if "locale" in a]
# Get any ignored alias types and lower case them to prevent case issues
ignored_alias_types = config["import"]["ignored_alias_types"].as_str_seq()
ignored_alias_types = [a.lower() for a in ignored_alias_types]
# Search configured locales in order.
for locale in config["import"]["languages"].as_str_seq():
# Find matching primary aliases for this locale that are not
# being ignored
matches = []
for alias in valid_aliases:
if (
alias["locale"] == locale
and "primary" in alias
and alias.get("type", "").lower() not in ignored_alias_types
):
matches.append(alias)
# Skip to the next locale if we have no matches
if not matches:
continue
return matches[0]
def _multi_artist_credit(
credit: list[JSONDict], include_join_phrase: bool
) -> tuple[list[str], list[str], list[str]]:
"""Given a list representing an ``artist-credit`` block, accumulate
data into a triple of joined artist name lists: canonical, sort, and
credit.
"""
artist_parts = []
artist_sort_parts = []
artist_credit_parts = []
for el in credit:
if isinstance(el, str):
# Join phrase.
if include_join_phrase:
artist_parts.append(el)
artist_credit_parts.append(el)
artist_sort_parts.append(el)
else:
alias = _preferred_alias(el["artist"].get("alias-list", ()))
# An artist.
if alias:
cur_artist_name = alias["alias"]
else:
cur_artist_name = el["artist"]["name"]
artist_parts.append(cur_artist_name)
# Artist sort name.
if alias:
artist_sort_parts.append(alias["sort-name"])
elif "sort-name" in el["artist"]:
artist_sort_parts.append(el["artist"]["sort-name"])
else:
artist_sort_parts.append(cur_artist_name)
# Artist credit.
if "name" in el:
artist_credit_parts.append(el["name"])
else:
artist_credit_parts.append(cur_artist_name)
return (
artist_parts,
artist_sort_parts,
artist_credit_parts,
)
def track_url(trackid: str) -> str:
return urljoin(BASE_URL, f"recording/{trackid}")
def _flatten_artist_credit(credit: list[JSONDict]) -> tuple[str, str, str]:
"""Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and
credit.
"""
artist_parts, artist_sort_parts, artist_credit_parts = _multi_artist_credit(
credit, include_join_phrase=True
)
return (
"".join(artist_parts),
"".join(artist_sort_parts),
"".join(artist_credit_parts),
)
def _artist_ids(credit: list[JSONDict]) -> list[str]:
"""
Given a list representing an ``artist-credit``,
return a list of artist IDs
"""
artist_ids: list[str] = []
for el in credit:
if isinstance(el, dict):
artist_ids.append(el["artist"]["id"])
return artist_ids
def _get_related_artist_names(relations, relation_type):
"""Given a list representing the artist relationships extract the names of
the remixers and concatenate them.
"""
related_artists = []
for relation in relations:
if relation["type"] == relation_type:
related_artists.append(relation["artist"]["name"])
return ", ".join(related_artists)
def album_url(albumid: str) -> str:
return urljoin(BASE_URL, f"release/{albumid}")
def _preferred_release_event(
release: dict[str, Any],
) -> tuple[str | None, str | None]:
"""Given a release, select and return the user's preferred release
event as a tuple of (country, release_date). Fall back to the
default release event if a preferred event is not found.
"""
preferred_countries: Sequence[str] = config["match"]["preferred"][
"countries"
].as_str_seq()
for country in preferred_countries:
for event in release.get("release-event-list", {}):
try:
if country in event["area"]["iso-3166-1-code-list"]:
return country, event["date"]
except KeyError:
pass
return release.get("country"), release.get("date")
def _set_date_str(
info: beets.autotag.hooks.AlbumInfo,
date_str: str,
original: bool = False,
):
"""Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo
object, set the object's release date fields appropriately. If
`original`, then set the original_year, etc., fields.
"""
if date_str:
date_parts = date_str.split("-")
for key in ("year", "month", "day"):
if date_parts:
date_part = date_parts.pop(0)
try:
date_num = int(date_part)
except ValueError:
continue
if original:
key = f"original_{key}"
setattr(info, key, date_num)
def _is_translation(r):
_trans_key = "transl-tracklisting"
return r["type"] == _trans_key and r["direction"] == "backward"
def _find_actual_release_from_pseudo_release(
pseudo_rel: JSONDict,
) -> JSONDict | None:
try:
relations = pseudo_rel["release"]["release-relation-list"]
except KeyError:
return None
# currently we only support trans(liter)ation's
translations = [r for r in relations if _is_translation(r)]
if not translations:
return None
actual_id = translations[0]["target"]
return musicbrainzngs.get_release_by_id(actual_id, RELEASE_INCLUDES)
def _merge_pseudo_and_actual_album(
pseudo: beets.autotag.hooks.AlbumInfo, actual: beets.autotag.hooks.AlbumInfo
) -> beets.autotag.hooks.AlbumInfo | None:
"""
Merges a pseudo release with its actual release.
This implementation is naive, it doesn't overwrite fields,
like status or ids.
According to the ticket PICARD-145, the main release id should be used.
But the ticket has been in limbo since over a decade now.
It also suggests the introduction of the tag `musicbrainz_pseudoreleaseid`,
but as of this field can't be found in any official Picard docs,
hence why we did not implement that for now.
"""
merged = pseudo.copy()
from_actual = {
k: actual[k]
for k in [
"media",
"mediums",
"country",
"catalognum",
"year",
"month",
"day",
"original_year",
"original_month",
"original_day",
"label",
"barcode",
"asin",
"style",
"genre",
]
}
merged.update(from_actual)
return merged
class MusicBrainzPlugin(MetadataSourcePlugin):
def __init__(self):
"""Set up the python-musicbrainz-ngs module according to settings
from the beets configuration. This should be called at startup.
"""
super().__init__()
self.config.add(
{
"host": "musicbrainz.org",
"https": False,
"ratelimit": 1,
"ratelimit_interval": 1,
"genres": False,
"external_ids": {
"discogs": False,
"bandcamp": False,
"spotify": False,
"deezer": False,
"tidal": False,
},
"extra_tags": [],
},
)
# TODO: Remove in 3.0.0
with suppress(NotFoundError):
self.config["search_limit"] = self.config["match"][
"searchlimit"
].get()
self._log.warning(
"'musicbrainz.searchlimit' option is deprecated and will be "
"removed in 3.0.0. Use 'musicbrainz.search_limit' instead."
)
hostname = self.config["host"].as_str()
https = self.config["https"].get(bool)
# Only call set_hostname when a custom server is configured. Since
# musicbrainz-ngs connects to musicbrainz.org with HTTPS by default
if hostname != "musicbrainz.org":
musicbrainzngs.set_hostname(hostname, https)
musicbrainzngs.set_rate_limit(
self.config["ratelimit_interval"].as_number(),
self.config["ratelimit"].get(int),
)
def track_info(
self,
recording: JSONDict,
index: int | None = None,
medium: int | None = None,
medium_index: int | None = None,
medium_total: int | None = None,
) -> beets.autotag.hooks.TrackInfo:
"""Translates a MusicBrainz recording result dictionary into a beets
``TrackInfo`` object. Three parameters are optional and are used
only for tracks that appear on releases (non-singletons): ``index``,
the overall track number; ``medium``, the disc number;
``medium_index``, the track's index on its medium; ``medium_total``,
the number of tracks on the medium. Each number is a 1-based index.
"""
info = beets.autotag.hooks.TrackInfo(
title=recording["title"],
track_id=recording["id"],
index=index,
medium=medium,
medium_index=medium_index,
medium_total=medium_total,
data_source=self.data_source,
data_url=track_url(recording["id"]),
)
if recording.get("artist-credit"):
# Get the artist names.
(
info.artist,
info.artist_sort,
info.artist_credit,
) = _flatten_artist_credit(recording["artist-credit"])
(
info.artists,
info.artists_sort,
info.artists_credit,
) = _multi_artist_credit(
recording["artist-credit"], include_join_phrase=False
)
info.artists_ids = _artist_ids(recording["artist-credit"])
info.artist_id = info.artists_ids[0]
if recording.get("artist-relation-list"):
info.remixer = _get_related_artist_names(
recording["artist-relation-list"], relation_type="remixer"
)
if recording.get("length"):
info.length = int(recording["length"]) / 1000.0
info.trackdisambig = recording.get("disambiguation")
if recording.get("isrc-list"):
info.isrc = ";".join(recording["isrc-list"])
lyricist = []
composer = []
composer_sort = []
for work_relation in recording.get("work-relation-list", ()):
if work_relation["type"] != "performance":
continue
info.work = work_relation["work"]["title"]
info.mb_workid = work_relation["work"]["id"]
if "disambiguation" in work_relation["work"]:
info.work_disambig = work_relation["work"]["disambiguation"]
for artist_relation in work_relation["work"].get(
"artist-relation-list", ()
):
if "type" in artist_relation:
type = artist_relation["type"]
if type == "lyricist":
lyricist.append(artist_relation["artist"]["name"])
elif type == "composer":
composer.append(artist_relation["artist"]["name"])
composer_sort.append(
artist_relation["artist"]["sort-name"]
)
if lyricist:
info.lyricist = ", ".join(lyricist)
if composer:
info.composer = ", ".join(composer)
info.composer_sort = ", ".join(composer_sort)
arranger = []
for artist_relation in recording.get("artist-relation-list", ()):
if "type" in artist_relation:
type = artist_relation["type"]
if type == "arranger":
arranger.append(artist_relation["artist"]["name"])
if arranger:
info.arranger = ", ".join(arranger)
# Supplementary fields provided by plugins
extra_trackdatas = plugins.send("mb_track_extract", data=recording)
for extra_trackdata in extra_trackdatas:
info.update(extra_trackdata)
return info
def album_info(self, release: JSONDict) -> beets.autotag.hooks.AlbumInfo:
"""Takes a MusicBrainz release result dictionary and returns a beets
AlbumInfo object containing the interesting data about that release.
"""
# Get artist name using join phrases.
artist_name, artist_sort_name, artist_credit_name = (
_flatten_artist_credit(release["artist-credit"])
)
(
artists_names,
artists_sort_names,
artists_credit_names,
) = _multi_artist_credit(
release["artist-credit"], include_join_phrase=False
)
ntracks = sum(len(m["track-list"]) for m in release["medium-list"])
# The MusicBrainz API omits 'artist-relation-list' and 'work-relation-list'
# when the release has more than 500 tracks. So we use browse_recordings
# on chunks of tracks to recover the same information in this case.
if ntracks > BROWSE_MAXTRACKS:
self._log.debug("Album {} has too many tracks", release["id"])
recording_list = []
for i in range(0, ntracks, BROWSE_CHUNKSIZE):
self._log.debug("Retrieving tracks starting at {}", i)
recording_list.extend(
musicbrainzngs.browse_recordings(
release=release["id"],
limit=BROWSE_CHUNKSIZE,
includes=BROWSE_INCLUDES,
offset=i,
)["recording-list"]
)
track_map = {r["id"]: r for r in recording_list}
for medium in release["medium-list"]:
for recording in medium["track-list"]:
recording_info = track_map[recording["recording"]["id"]]
recording["recording"] = recording_info
# Basic info.
track_infos = []
index = 0
for medium in release["medium-list"]:
disctitle = medium.get("title")
format = medium.get("format")
if format in config["match"]["ignored_media"].as_str_seq():
continue
all_tracks = medium["track-list"]
if (
"data-track-list" in medium
and not config["match"]["ignore_data_tracks"]
):
all_tracks += medium["data-track-list"]
track_count = len(all_tracks)
if "pregap" in medium:
all_tracks.insert(0, medium["pregap"])
for track in all_tracks:
if (
"title" in track["recording"]
and track["recording"]["title"] in SKIPPED_TRACKS
):
continue
if (
"video" in track["recording"]
and track["recording"]["video"] == "true"
and config["match"]["ignore_video_tracks"]
):
continue
# Basic information from the recording.
index += 1
ti = self.track_info(
track["recording"],
index,
int(medium["position"]),
int(track["position"]),
track_count,
)
ti.release_track_id = track["id"]
ti.disctitle = disctitle
ti.media = format
ti.track_alt = track["number"]
# Prefer track data, where present, over recording data.
if track.get("title"):
ti.title = track["title"]
if track.get("artist-credit"):
# Get the artist names.
(
ti.artist,
ti.artist_sort,
ti.artist_credit,
) = _flatten_artist_credit(track["artist-credit"])
(
ti.artists,
ti.artists_sort,
ti.artists_credit,
) = _multi_artist_credit(
track["artist-credit"], include_join_phrase=False
)
ti.artists_ids = _artist_ids(track["artist-credit"])
ti.artist_id = ti.artists_ids[0]
if track.get("length"):
ti.length = int(track["length"]) / (1000.0)
track_infos.append(ti)
album_artist_ids = _artist_ids(release["artist-credit"])
info = beets.autotag.hooks.AlbumInfo(
album=release["title"],
album_id=release["id"],
artist=artist_name,
artist_id=album_artist_ids[0],
artists=artists_names,
artists_ids=album_artist_ids,
tracks=track_infos,
mediums=len(release["medium-list"]),
artist_sort=artist_sort_name,
artists_sort=artists_sort_names,
artist_credit=artist_credit_name,
artists_credit=artists_credit_names,
data_source=self.data_source,
data_url=album_url(release["id"]),
barcode=release.get("barcode"),
)
info.va = info.artist_id == VARIOUS_ARTISTS_ID
if info.va:
info.artist = config["va_name"].as_str()
info.asin = release.get("asin")
info.releasegroup_id = release["release-group"]["id"]
info.albumstatus = release.get("status")
if release["release-group"].get("title"):
info.release_group_title = release["release-group"].get("title")
# Get the disambiguation strings at the release and release group level.
if release["release-group"].get("disambiguation"):
info.releasegroupdisambig = release["release-group"].get(
"disambiguation"
)
if release.get("disambiguation"):
info.albumdisambig = release.get("disambiguation")
# Get the "classic" Release type. This data comes from a legacy API
# feature before MusicBrainz supported multiple release types.
if "type" in release["release-group"]:
reltype = release["release-group"]["type"]
if reltype:
info.albumtype = reltype.lower()
# Set the new-style "primary" and "secondary" release types.
albumtypes = []
if "primary-type" in release["release-group"]:
rel_primarytype = release["release-group"]["primary-type"]
if rel_primarytype:
albumtypes.append(rel_primarytype.lower())
if "secondary-type-list" in release["release-group"]:
if release["release-group"]["secondary-type-list"]:
for sec_type in release["release-group"]["secondary-type-list"]:
albumtypes.append(sec_type.lower())
info.albumtypes = albumtypes
# Release events.
info.country, release_date = _preferred_release_event(release)
release_group_date = release["release-group"].get("first-release-date")
if not release_date:
# Fall back if release-specific date is not available.
release_date = release_group_date
if release_date:
_set_date_str(info, release_date, False)
_set_date_str(info, release_group_date, True)
# Label name.
if release.get("label-info-list"):
label_info = release["label-info-list"][0]
if label_info.get("label"):
label = label_info["label"]["name"]
if label != "[no label]":
info.label = label
info.catalognum = label_info.get("catalog-number")
# Text representation data.
if release.get("text-representation"):
rep = release["text-representation"]
info.script = rep.get("script")
info.language = rep.get("language")
# Media (format).
if release["medium-list"]:
# If all media are the same, use that medium name
if len({m.get("format") for m in release["medium-list"]}) == 1:
info.media = release["medium-list"][0].get("format")
# Otherwise, let's just call it "Media"
else:
info.media = "Media"
if self.config["genres"]:
sources = [
release["release-group"].get("tag-list", []),
release.get("tag-list", []),
]
genres: Counter[str] = Counter()
for source in sources:
for genreitem in source:
genres[genreitem["name"]] += int(genreitem["count"])
info.genre = "; ".join(
genre
for genre, _count in sorted(genres.items(), key=lambda g: -g[1])
)
# We might find links to external sources (Discogs, Bandcamp, ...)
external_ids = self.config["external_ids"].get()
wanted_sources = {
site for site, wanted in external_ids.items() if wanted
}
if wanted_sources and (url_rels := release.get("url-relation-list")):
urls = {}
for source, url in product(wanted_sources, url_rels):
if f"{source}.com" in (target := url["target"]):
urls[source] = target
self._log.debug(
"Found link to {} release via MusicBrainz",
source.capitalize(),
)
for source, url in urls.items():
setattr(
info, f"{source}_album_id", extract_release_id(source, url)
)
extra_albumdatas = plugins.send("mb_album_extract", data=release)
for extra_albumdata in extra_albumdatas:
info.update(extra_albumdata)
return info
@cached_property
def extra_mb_field_by_tag(self) -> dict[str, str]:
"""Map configured extra tags to their MusicBrainz API field names.
Process user configuration to determine which additional MusicBrainz
fields should be included in search queries.
"""
mb_field_by_tag = {
t: FIELDS_TO_MB_KEYS[t]
for t in self.config["extra_tags"].as_str_seq()
if t in FIELDS_TO_MB_KEYS
}
if mb_field_by_tag:
self._log.debug("Additional search terms: {}", mb_field_by_tag)
return mb_field_by_tag
def get_album_criteria(
self, items: Sequence[Item], artist: str, album: str, va_likely: bool
) -> dict[str, str]:
criteria = {
"release": album,
"alias": album,
"tracks": str(len(items)),
} | ({"arid": VARIOUS_ARTISTS_ID} if va_likely else {"artist": artist})
for tag, mb_field in self.extra_mb_field_by_tag.items():
most_common, _ = util.plurality(i.get(tag) for i in items)
value = str(most_common)
if tag == "catalognum":
value = value.replace(" ", "")
criteria[mb_field] = value
return criteria
def _search_api(
self,
query_type: Literal["recording", "release"],
filters: dict[str, str],
) -> list[JSONDict]:
"""Perform MusicBrainz API search and return results.
Execute a search against the MusicBrainz API for recordings or releases
using the provided criteria. Handles API errors by converting them into
MusicBrainzAPIError exceptions with contextual information.
"""
filters = {
k: _v for k, v in filters.items() if (_v := v.lower().strip())
}
self._log.debug(
"Searching for MusicBrainz {}s with: {!r}", query_type, filters
)
try:
method = getattr(musicbrainzngs, f"search_{query_type}s")
res = method(limit=self.config["search_limit"].get(), **filters)
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(
exc, f"{query_type} search", filters, traceback.format_exc()
)
return res[f"{query_type}-list"]
def candidates(
self,
items: Sequence[Item],
artist: str,
album: str,
va_likely: bool,
) -> Iterable[beets.autotag.hooks.AlbumInfo]:
criteria = self.get_album_criteria(items, artist, album, va_likely)
release_ids = (r["id"] for r in self._search_api("release", criteria))
yield from filter(None, map(self.album_for_id, release_ids))
def item_candidates(
self, item: Item, artist: str, title: str
) -> Iterable[beets.autotag.hooks.TrackInfo]:
criteria = {"artist": artist, "recording": title, "alias": title}
yield from filter(
None, map(self.track_info, self._search_api("recording", criteria))
)
def album_for_id(
self, album_id: str
) -> beets.autotag.hooks.AlbumInfo | None:
"""Fetches an album by its MusicBrainz ID and returns an AlbumInfo
object or None if the album is not found. May raise a
MusicBrainzAPIError.
"""
self._log.debug("Requesting MusicBrainz release {}", album_id)
if not (albumid := self._extract_id(album_id)):
self._log.debug("Invalid MBID ({}).", album_id)
return None
try:
res = musicbrainzngs.get_release_by_id(albumid, RELEASE_INCLUDES)
# resolve linked release relations
actual_res = None
if res["release"].get("status") == "Pseudo-Release":
actual_res = _find_actual_release_from_pseudo_release(res)
except musicbrainzngs.ResponseError:
self._log.debug("Album ID match failed.")
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(
exc, "get release by ID", albumid, traceback.format_exc()
)
# release is potentially a pseudo release
release = self.album_info(res["release"])
# should be None unless we're dealing with a pseudo release
if actual_res is not None:
actual_release = self.album_info(actual_res["release"])
return _merge_pseudo_and_actual_album(release, actual_release)
else:
return release
def track_for_id(
self, track_id: str
) -> beets.autotag.hooks.TrackInfo | None:
"""Fetches a track by its MusicBrainz ID. Returns a TrackInfo object
or None if no track is found. May raise a MusicBrainzAPIError.
"""
if not (trackid := self._extract_id(track_id)):
self._log.debug("Invalid MBID ({}).", track_id)
return None
try:
res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES)
except musicbrainzngs.ResponseError:
self._log.debug("Track ID match failed.")
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(
exc, "get recording by ID", trackid, traceback.format_exc()
)
return self.track_info(res["recording"])
beetbox-beets-c1877b7/beetsplug/parentwork.py 0000664 0000000 0000000 00000020032 15073551743 0021355 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2017, Dorian Soergel.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Gets parent work, its disambiguation and id, composer, composer sort name
and work composition date
"""
import musicbrainzngs
from beets import ui
from beets.plugins import BeetsPlugin
def direct_parent_id(mb_workid, work_date=None):
"""Given a Musicbrainz work id, find the id one of the works the work is
part of and the first composition date it encounters.
"""
work_info = musicbrainzngs.get_work_by_id(
mb_workid, includes=["work-rels", "artist-rels"]
)
if "artist-relation-list" in work_info["work"] and work_date is None:
for artist in work_info["work"]["artist-relation-list"]:
if artist["type"] == "composer":
if "end" in artist.keys():
work_date = artist["end"]
if "work-relation-list" in work_info["work"]:
for direct_parent in work_info["work"]["work-relation-list"]:
if (
direct_parent["type"] == "parts"
and direct_parent.get("direction") == "backward"
):
direct_id = direct_parent["work"]["id"]
return direct_id, work_date
return None, work_date
def work_parent_id(mb_workid):
"""Find the parent work id and composition date of a work given its id."""
work_date = None
while True:
new_mb_workid, work_date = direct_parent_id(mb_workid, work_date)
if not new_mb_workid:
return mb_workid, work_date
mb_workid = new_mb_workid
return mb_workid, work_date
def find_parentwork_info(mb_workid):
"""Get the MusicBrainz information dict about a parent work, including
the artist relations, and the composition date for a work's parent work.
"""
parent_id, work_date = work_parent_id(mb_workid)
work_info = musicbrainzngs.get_work_by_id(
parent_id, includes=["artist-rels"]
)
return work_info, work_date
class ParentWorkPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"auto": False,
"force": False,
}
)
if self.config["auto"]:
self.import_stages = [self.imported]
def commands(self):
def func(lib, opts, args):
self.config.set_args(opts)
force_parent = self.config["force"].get(bool)
write = ui.should_write()
for item in lib.items(args):
changed = self.find_work(item, force_parent, verbose=True)
if changed:
item.store()
if write:
item.try_write()
command = ui.Subcommand(
"parentwork", help="fetch parent works, composers and dates"
)
command.parser.add_option(
"-f",
"--force",
dest="force",
action="store_true",
default=None,
help="re-fetch when parent work is already present",
)
command.func = func
return [command]
def imported(self, session, task):
"""Import hook for fetching parent works automatically."""
force_parent = self.config["force"].get(bool)
for item in task.imported_items():
self.find_work(item, force_parent, verbose=False)
item.store()
def get_info(self, item, work_info):
"""Given the parent work info dict, fetch parent_composer,
parent_composer_sort, parentwork, parentwork_disambig, mb_workid and
composer_ids.
"""
parent_composer = []
parent_composer_sort = []
parentwork_info = {}
composer_exists = False
if "artist-relation-list" in work_info["work"]:
for artist in work_info["work"]["artist-relation-list"]:
if artist["type"] == "composer":
composer_exists = True
parent_composer.append(artist["artist"]["name"])
parent_composer_sort.append(artist["artist"]["sort-name"])
if "end" in artist.keys():
parentwork_info["parentwork_date"] = artist["end"]
parentwork_info["parent_composer"] = ", ".join(parent_composer)
parentwork_info["parent_composer_sort"] = ", ".join(
parent_composer_sort
)
if not composer_exists:
self._log.debug(
"no composer for {}; add one at "
"https://musicbrainz.org/work/{}",
item,
work_info["work"]["id"],
)
parentwork_info["parentwork"] = work_info["work"]["title"]
parentwork_info["mb_parentworkid"] = work_info["work"]["id"]
if "disambiguation" in work_info["work"]:
parentwork_info["parentwork_disambig"] = work_info["work"][
"disambiguation"
]
else:
parentwork_info["parentwork_disambig"] = None
return parentwork_info
def find_work(self, item, force, verbose):
"""Finds the parent work of a recording and populates the tags
accordingly.
The parent work is found recursively, by finding the direct parent
repeatedly until there are no more links in the chain. We return the
final, topmost work in the chain.
Namely, the tags parentwork, parentwork_disambig, mb_parentworkid,
parent_composer, parent_composer_sort and work_date are populated.
"""
if not item.mb_workid:
self._log.info(
"No work for {0}, add one at https://musicbrainz.org/recording/{0.mb_trackid}",
item,
)
return
hasparent = hasattr(item, "parentwork")
work_changed = True
if hasattr(item, "parentwork_workid_current"):
work_changed = item.parentwork_workid_current != item.mb_workid
if force or not hasparent or work_changed:
try:
work_info, work_date = find_parentwork_info(item.mb_workid)
except musicbrainzngs.musicbrainz.WebServiceError as e:
self._log.debug("error fetching work: {}", e)
return
parent_info = self.get_info(item, work_info)
parent_info["parentwork_workid_current"] = item.mb_workid
if "parent_composer" in parent_info:
self._log.debug(
"Work fetched: {} - {}",
parent_info["parentwork"],
parent_info["parent_composer"],
)
else:
self._log.debug(
"Work fetched: {} - no parent composer",
parent_info["parentwork"],
)
elif hasparent:
self._log.debug("{}: Work present, skipping", item)
return
# apply all non-null values to the item
for key, value in parent_info.items():
if value:
item[key] = value
if work_date:
item["work_date"] = work_date
if verbose:
return ui.show_model_changes(
item,
fields=[
"parentwork",
"parentwork_disambig",
"mb_parentworkid",
"parent_composer",
"parent_composer_sort",
"work_date",
"parentwork_workid_current",
"parentwork_date",
],
)
beetbox-beets-c1877b7/beetsplug/permissions.py 0000664 0000000 0000000 00000010145 15073551743 0021540 0 ustar 00root root 0000000 0000000 """Fixes file permissions after the file gets written on import. Put something
like the following in your config.yaml to configure:
permissions:
file: 644
dir: 755
"""
import os
import stat
from beets import config
from beets.plugins import BeetsPlugin
from beets.util import ancestry, displayable_path, syspath
def convert_perm(perm):
"""Convert a string to an integer, interpreting the text as octal.
Or, if `perm` is an integer, reinterpret it as an octal number that
has been "misinterpreted" as decimal.
"""
if isinstance(perm, int):
perm = str(perm)
return int(perm, 8)
def check_permissions(path, permission):
"""Check whether the file's permissions equal the given vector.
Return a boolean.
"""
return oct(stat.S_IMODE(os.stat(syspath(path)).st_mode)) == oct(permission)
def assert_permissions(path, permission, log):
"""Check whether the file's permissions are as expected, otherwise,
log a warning message. Return a boolean indicating the match, like
`check_permissions`.
"""
if not check_permissions(path, permission):
log.warning("could not set permissions on {}", displayable_path(path))
log.debug(
"set permissions to {}, but permissions are now {}",
permission,
os.stat(syspath(path)).st_mode & 0o777,
)
def dirs_in_library(library, item):
"""Creates a list of ancestor directories in the beets library path."""
return [
ancestor for ancestor in ancestry(item) if ancestor.startswith(library)
][1:]
class Permissions(BeetsPlugin):
def __init__(self):
super().__init__()
# Adding defaults.
self.config.add(
{
"file": "644",
"dir": "755",
}
)
self.register_listener("item_imported", self.fix)
self.register_listener("album_imported", self.fix)
self.register_listener("art_set", self.fix_art)
def fix(self, lib, item=None, album=None):
"""Fix the permissions for an imported Item or Album."""
files = []
dirs = set()
if item:
files.append(item.path)
dirs.update(dirs_in_library(lib.directory, item.path))
elif album:
for album_item in album.items():
files.append(album_item.path)
dirs.update(dirs_in_library(lib.directory, album_item.path))
self.set_permissions(files=files, dirs=dirs)
def fix_art(self, album):
"""Fix the permission for Album art file."""
if album.artpath:
self.set_permissions(files=[album.artpath])
def set_permissions(self, files=[], dirs=[]):
# Get the configured permissions. The user can specify this either a
# string (in YAML quotes) or, for convenience, as an integer so the
# quotes can be omitted. In the latter case, we need to reinterpret the
# integer as octal, not decimal.
file_perm = config["permissions"]["file"].get()
dir_perm = config["permissions"]["dir"].get()
file_perm = convert_perm(file_perm)
dir_perm = convert_perm(dir_perm)
for path in files:
# Changing permissions on the destination file.
self._log.debug(
"setting file permissions on {}",
displayable_path(path),
)
if not check_permissions(path, file_perm):
os.chmod(syspath(path), file_perm)
# Checks if the destination path has the permissions configured.
assert_permissions(path, file_perm, self._log)
# Change permissions for the directories.
for path in dirs:
# Changing permissions on the destination directory.
self._log.debug(
"setting directory permissions on {}",
displayable_path(path),
)
if not check_permissions(path, dir_perm):
os.chmod(syspath(path), dir_perm)
# Checks if the destination path has the permissions configured.
assert_permissions(path, dir_perm, self._log)
beetbox-beets-c1877b7/beetsplug/play.py 0000664 0000000 0000000 00000017005 15073551743 0020134 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, David Hamp-Gonsalves
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Send the results of a query to the configured music player as a playlist."""
import shlex
import subprocess
from os.path import relpath
from beets import config, ui, util
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets.ui.commands import PromptChoice
from beets.util import get_temp_filename
# Indicate where arguments should be inserted into the command string.
# If this is missing, they're placed at the end.
ARGS_MARKER = "$args"
def play(
command_str,
selection,
paths,
open_args,
log,
item_type="track",
keep_open=False,
):
"""Play items in paths with command_str and optional arguments. If
keep_open, return to beets, otherwise exit once command runs.
"""
# Print number of tracks or albums to be played, log command to be run.
item_type += "s" if len(selection) > 1 else ""
ui.print_(f"Playing {len(selection)} {item_type}.")
log.debug("executing command: {} {!r}", command_str, open_args)
try:
if keep_open:
command = shlex.split(command_str)
command = command + open_args
subprocess.call(command)
else:
util.interactive_open(open_args, command_str)
except OSError as exc:
raise ui.UserError(f"Could not play the query: {exc}")
class PlayPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
config["play"].add(
{
"command": None,
"use_folders": False,
"relative_to": None,
"raw": False,
"warning_threshold": 100,
"bom": False,
}
)
self.register_listener(
"before_choose_candidate", self.before_choose_candidate_listener
)
def commands(self):
play_command = Subcommand(
"play", help="send music to a player as a playlist"
)
play_command.parser.add_album_option()
play_command.parser.add_option(
"-A",
"--args",
action="store",
help="add additional arguments to the command",
)
play_command.parser.add_option(
"-y",
"--yes",
action="store_true",
help="skip the warning threshold",
)
play_command.func = self._play_command
return [play_command]
def _play_command(self, lib, opts, args):
"""The CLI command function for `beet play`. Create a list of paths
from query, determine if tracks or albums are to be played.
"""
use_folders = config["play"]["use_folders"].get(bool)
relative_to = config["play"]["relative_to"].get()
if relative_to:
relative_to = util.normpath(relative_to)
# Perform search by album and add folders rather than tracks to
# playlist.
if opts.album:
selection = lib.albums(args)
paths = []
sort = lib.get_default_album_sort()
for album in selection:
if use_folders:
paths.append(album.item_dir())
else:
paths.extend(item.path for item in sort.sort(album.items()))
item_type = "album"
# Perform item query and add tracks to playlist.
else:
selection = lib.items(args)
paths = [item.path for item in selection]
item_type = "track"
if relative_to:
paths = [relpath(path, relative_to) for path in paths]
if not selection:
ui.print_(ui.colorize("text_warning", f"No {item_type} to play."))
return
open_args = self._playlist_or_paths(paths)
command_str = self._command_str(opts.args)
# Check if the selection exceeds configured threshold. If True,
# cancel, otherwise proceed with play command.
if opts.yes or not self._exceeds_threshold(
selection, command_str, open_args, item_type
):
play(command_str, selection, paths, open_args, self._log, item_type)
def _command_str(self, args=None):
"""Create a command string from the config command and optional args."""
command_str = config["play"]["command"].get()
if not command_str:
return util.open_anything()
# Add optional arguments to the player command.
if args:
if ARGS_MARKER in command_str:
return command_str.replace(ARGS_MARKER, args)
else:
return f"{command_str} {args}"
else:
# Don't include the marker in the command.
return command_str.replace(f" {ARGS_MARKER}", "")
def _playlist_or_paths(self, paths):
"""Return either the raw paths of items or a playlist of the items."""
if config["play"]["raw"]:
return paths
else:
return [self._create_tmp_playlist(paths)]
def _exceeds_threshold(
self, selection, command_str, open_args, item_type="track"
):
"""Prompt user whether to abort if playlist exceeds threshold. If
True, cancel playback. If False, execute play command.
"""
warning_threshold = config["play"]["warning_threshold"].get(int)
# Warn user before playing any huge playlists.
if warning_threshold and len(selection) > warning_threshold:
if len(selection) > 1:
item_type += "s"
ui.print_(
ui.colorize(
"text_warning",
f"You are about to queue {len(selection)} {item_type}.",
)
)
if ui.input_options(("Continue", "Abort")) == "a":
return True
return False
def _create_tmp_playlist(self, paths_list):
"""Create a temporary .m3u file. Return the filename."""
utf8_bom = config["play"]["bom"].get(bool)
filename = get_temp_filename(__name__, suffix=".m3u")
with open(filename, "wb") as m3u:
if utf8_bom:
m3u.write(b"\xef\xbb\xbf")
for item in paths_list:
m3u.write(item + b"\n")
return filename
def before_choose_candidate_listener(self, session, task):
"""Append a "Play" choice to the interactive importer prompt."""
return [PromptChoice("y", "plaY", self.importer_play)]
def importer_play(self, session, task):
"""Get items from current import task and send to play function."""
selection = task.items
paths = [item.path for item in selection]
open_args = self._playlist_or_paths(paths)
command_str = self._command_str()
if not self._exceeds_threshold(selection, command_str, open_args):
play(
command_str,
selection,
paths,
open_args,
self._log,
keep_open=True,
)
beetbox-beets-c1877b7/beetsplug/playlist.py 0000664 0000000 0000000 00000015442 15073551743 0021033 0 ustar 00root root 0000000 0000000 # This file is part of beets.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import os
import tempfile
from collections.abc import Sequence
from pathlib import Path
import beets
from beets.dbcore.query import BLOB_TYPE, InQuery
from beets.util import path_as_posix
def is_m3u_file(path: str) -> bool:
return Path(path).suffix.lower() in {".m3u", ".m3u8"}
class PlaylistQuery(InQuery[bytes]):
"""Matches files listed by a playlist file."""
@property
def subvals(self) -> Sequence[BLOB_TYPE]:
return [BLOB_TYPE(p) for p in self.pattern]
def __init__(self, _, pattern: str, __):
config = beets.config["playlist"]
# Get the full path to the playlist
playlist_paths = (
pattern,
os.path.abspath(
os.path.join(
config["playlist_dir"].as_filename(),
f"{pattern}.m3u",
)
),
)
paths = []
for playlist_path in playlist_paths:
if not is_m3u_file(playlist_path):
# This is not am M3U playlist, skip this candidate
continue
try:
f = open(beets.util.syspath(playlist_path), mode="rb")
except OSError:
continue
if config["relative_to"].get() == "library":
relative_to = beets.config["directory"].as_filename()
elif config["relative_to"].get() == "playlist":
relative_to = os.path.dirname(playlist_path)
else:
relative_to = config["relative_to"].as_filename()
relative_to = beets.util.bytestring_path(relative_to)
for line in f:
if line[0] == "#":
# ignore comments, and extm3u extension
continue
paths.append(
beets.util.normpath(
os.path.join(relative_to, line.rstrip())
)
)
f.close()
break
super().__init__("path", paths)
class PlaylistPlugin(beets.plugins.BeetsPlugin):
item_queries = {"playlist": PlaylistQuery}
def __init__(self):
super().__init__()
self.config.add(
{
"auto": False,
"playlist_dir": ".",
"relative_to": "library",
"forward_slash": False,
}
)
self.playlist_dir = self.config["playlist_dir"].as_filename()
self.changes = {}
if self.config["relative_to"].get() == "library":
self.relative_to = beets.util.bytestring_path(
beets.config["directory"].as_filename()
)
elif self.config["relative_to"].get() != "playlist":
self.relative_to = beets.util.bytestring_path(
self.config["relative_to"].as_filename()
)
else:
self.relative_to = None
if self.config["auto"]:
self.register_listener("item_moved", self.item_moved)
self.register_listener("item_removed", self.item_removed)
self.register_listener("cli_exit", self.cli_exit)
def item_moved(self, item, source, destination):
self.changes[source] = destination
def item_removed(self, item):
if not os.path.exists(beets.util.syspath(item.path)):
self.changes[item.path] = None
def cli_exit(self, lib):
for playlist in self.find_playlists():
self._log.info("Updating playlist: {}", playlist)
base_dir = beets.util.bytestring_path(
self.relative_to
if self.relative_to
else os.path.dirname(playlist)
)
try:
self.update_playlist(playlist, base_dir)
except beets.util.FilesystemError:
self._log.error("Failed to update playlist: {}", playlist)
def find_playlists(self):
"""Find M3U playlists in the playlist directory."""
playlist_dir = beets.util.syspath(self.playlist_dir)
try:
dir_contents = os.listdir(playlist_dir)
except OSError:
self._log.warning(
"Unable to open playlist directory {.playlist_dir}", self
)
return
for filename in dir_contents:
if is_m3u_file(filename):
yield os.path.join(self.playlist_dir, filename)
def update_playlist(self, filename, base_dir):
"""Find M3U playlists in the specified directory."""
changes = 0
deletions = 0
with tempfile.NamedTemporaryFile(mode="w+b", delete=False) as tempfp:
new_playlist = tempfp.name
with open(filename, mode="rb") as fp:
for line in fp:
original_path = line.rstrip(b"\r\n")
# Ensure that path from playlist is absolute
is_relative = not os.path.isabs(line)
if is_relative:
lookup = os.path.join(base_dir, original_path)
else:
lookup = original_path
try:
new_path = self.changes[beets.util.normpath(lookup)]
except KeyError:
if self.config["forward_slash"]:
line = path_as_posix(line)
tempfp.write(line)
else:
if new_path is None:
# Item has been deleted
deletions += 1
continue
changes += 1
if is_relative:
new_path = os.path.relpath(new_path, base_dir)
line = line.replace(original_path, new_path)
if self.config["forward_slash"]:
line = path_as_posix(line)
tempfp.write(line)
if changes or deletions:
self._log.info(
"Updated playlist {} ({} changes, {} deletions)",
filename,
changes,
deletions,
)
beets.util.copy(new_playlist, filename, replace=True)
beets.util.remove(new_playlist)
beetbox-beets-c1877b7/beetsplug/plexupdate.py 0000664 0000000 0000000 00000007012 15073551743 0021337 0 ustar 00root root 0000000 0000000 """Updates an Plex library whenever the beets library is changed.
Plex Home users enter the Plex Token to enable updating.
Put something like the following in your config.yaml to configure:
plex:
host: localhost
port: 32400
token: token
"""
from urllib.parse import urlencode, urljoin
from xml.etree import ElementTree
import requests
from beets import config
from beets.plugins import BeetsPlugin
def get_music_section(
host, port, token, library_name, secure, ignore_cert_errors
):
"""Getting the section key for the music library in Plex."""
api_endpoint = append_token("library/sections", token)
url = urljoin(f"{get_protocol(secure)}://{host}:{port}", api_endpoint)
# Sends request.
r = requests.get(
url,
verify=not ignore_cert_errors,
timeout=10,
)
# Parse xml tree and extract music section key.
tree = ElementTree.fromstring(r.content)
for child in tree.findall("Directory"):
if child.get("title") == library_name:
return child.get("key")
def update_plex(host, port, token, library_name, secure, ignore_cert_errors):
"""Ignore certificate errors if configured to."""
if ignore_cert_errors:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
"""Sends request to the Plex api to start a library refresh.
"""
# Getting section key and build url.
section_key = get_music_section(
host, port, token, library_name, secure, ignore_cert_errors
)
api_endpoint = f"library/sections/{section_key}/refresh"
api_endpoint = append_token(api_endpoint, token)
url = urljoin(f"{get_protocol(secure)}://{host}:{port}", api_endpoint)
# Sends request and returns requests object.
r = requests.get(
url,
verify=not ignore_cert_errors,
timeout=10,
)
return r
def append_token(url, token):
"""Appends the Plex Home token to the api call if required."""
if token:
url += f"?{urlencode({'X-Plex-Token': token})}"
return url
def get_protocol(secure):
if secure:
return "https"
else:
return "http"
class PlexUpdate(BeetsPlugin):
def __init__(self):
super().__init__()
# Adding defaults.
config["plex"].add(
{
"host": "localhost",
"port": 32400,
"token": "",
"library_name": "Music",
"secure": False,
"ignore_cert_errors": False,
}
)
config["plex"]["token"].redact = True
self.register_listener("database_change", self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update for the end"""
self.register_listener("cli_exit", self.update)
def update(self, lib):
"""When the client exists try to send refresh request to Plex server."""
self._log.info("Updating Plex library...")
# Try to send update request.
try:
update_plex(
config["plex"]["host"].get(),
config["plex"]["port"].get(),
config["plex"]["token"].get(),
config["plex"]["library_name"].get(),
config["plex"]["secure"].get(bool),
config["plex"]["ignore_cert_errors"].get(bool),
)
self._log.info("... started.")
except requests.exceptions.RequestException:
self._log.warning("Update failed.")
beetbox-beets-c1877b7/beetsplug/random.py 0000664 0000000 0000000 00000003603 15073551743 0020446 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Philippe Mongeau.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Get a random song or album from the library."""
from beets.plugins import BeetsPlugin
from beets.random import random_objs
from beets.ui import Subcommand, print_
def random_func(lib, opts, args):
"""Select some random items or albums and print the results."""
# Fetch all the objects matching the query into a list.
if opts.album:
objs = list(lib.albums(args))
else:
objs = list(lib.items(args))
# Print a random subset.
objs = random_objs(
objs, opts.album, opts.number, opts.time, opts.equal_chance
)
for obj in objs:
print_(format(obj))
random_cmd = Subcommand("random", help="choose a random track or album")
random_cmd.parser.add_option(
"-n",
"--number",
action="store",
type="int",
help="number of objects to choose",
default=1,
)
random_cmd.parser.add_option(
"-e",
"--equal-chance",
action="store_true",
help="each artist has the same chance",
)
random_cmd.parser.add_option(
"-t",
"--time",
action="store",
type="float",
help="total length in minutes of objects to choose",
)
random_cmd.parser.add_all_common_options()
random_cmd.func = random_func
class Random(BeetsPlugin):
def commands(self):
return [random_cmd]
beetbox-beets-c1877b7/beetsplug/replace.py 0000664 0000000 0000000 00000007566 15073551743 0020615 0 ustar 00root root 0000000 0000000 import shutil
from pathlib import Path
import mediafile
from beets import ui, util
from beets.library import Item, Library
from beets.plugins import BeetsPlugin
class ReplacePlugin(BeetsPlugin):
def commands(self):
cmd = ui.Subcommand(
"replace", help="replace audio file while keeping tags"
)
cmd.func = self.run
return [cmd]
def run(self, lib: Library, args: list[str]) -> None:
if len(args) < 2:
raise ui.UserError("Usage: beet replace ")
new_file_path: Path = Path(args[-1])
item_query: list[str] = args[:-1]
self.file_check(new_file_path)
item_list = list(lib.items(item_query))
if not item_list:
raise ui.UserError("No matching songs found.")
song = self.select_song(item_list)
if not song:
ui.print_("Operation cancelled.")
return
if not self.confirm_replacement(new_file_path, song):
ui.print_("Aborting replacement.")
return
self.replace_file(new_file_path, song)
def file_check(self, filepath: Path) -> None:
"""Check if the file exists and is supported"""
if not filepath.is_file():
raise ui.UserError(
f"'{util.displayable_path(filepath)}' is not a valid file."
)
try:
mediafile.MediaFile(util.syspath(filepath))
except mediafile.FileTypeError as fte:
raise ui.UserError(fte)
def select_song(self, items: list[Item]):
"""Present a menu of matching songs and get user selection."""
ui.print_("\nMatching songs:")
for i, item in enumerate(items, 1):
ui.print_(f"{i}. {util.displayable_path(item)}")
while True:
try:
index = int(
input(
f"Which song would you like to replace? "
f"[1-{len(items)}] (0 to cancel): "
)
)
if index == 0:
return None
if 1 <= index <= len(items):
return items[index - 1]
ui.print_(
f"Invalid choice. Please enter a number "
f"between 1 and {len(items)}."
)
except ValueError:
ui.print_("Invalid input. Please type in a number.")
def confirm_replacement(self, new_file_path: Path, song: Item):
"""Get user confirmation for the replacement."""
original_file_path: Path = Path(song.path.decode())
if not original_file_path.exists():
raise ui.UserError("The original song file was not found.")
ui.print_(
f"\nReplacing: {util.displayable_path(new_file_path)} "
f"-> {util.displayable_path(original_file_path)}"
)
decision: str = (
input("Are you sure you want to replace this track? (y/N): ")
.strip()
.casefold()
)
return decision in {"yes", "y"}
def replace_file(self, new_file_path: Path, song: Item) -> None:
"""Replace the existing file with the new one."""
original_file_path = Path(song.path.decode())
dest = original_file_path.with_suffix(new_file_path.suffix)
try:
shutil.move(util.syspath(new_file_path), util.syspath(dest))
except Exception as e:
raise ui.UserError(f"Error replacing file: {e}")
if (
new_file_path.suffix != original_file_path.suffix
and original_file_path.exists()
):
try:
original_file_path.unlink()
except Exception as e:
raise ui.UserError(f"Could not delete original file: {e}")
song.path = str(dest).encode()
song.store()
ui.print_("Replacement successful.")
beetbox-beets-c1877b7/beetsplug/replaygain.py 0000664 0000000 0000000 00000151602 15073551743 0021324 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Fabrice Laporte, Yevgeny Bezman, and Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import annotations
import collections
import enum
import math
import os
import queue
import signal
import subprocess
import sys
import warnings
from abc import ABC, abstractmethod
from dataclasses import dataclass
from multiprocessing.pool import ThreadPool
from threading import Event, Thread
from typing import TYPE_CHECKING, Any, Callable, TypeVar
from beets import ui
from beets.plugins import BeetsPlugin
from beets.util import command_output, displayable_path, syspath
if TYPE_CHECKING:
import optparse
from collections.abc import Sequence
from logging import Logger
from confuse import ConfigView
from beets.importer import ImportSession, ImportTask
from beets.library import Album, Item, Library
# Utilities.
class ReplayGainError(Exception):
"""Raised when a local (to a track or an album) error occurs in one
of the backends.
"""
class FatalReplayGainError(Exception):
"""Raised when a fatal error occurs in one of the backends."""
class FatalGstreamerPluginReplayGainError(FatalReplayGainError):
"""Raised when a fatal error occurs in the GStreamerBackend when
loading the required plugins."""
def call(args: list[str], log: Logger, **kwargs: Any):
"""Execute the command and return its output or raise a
ReplayGainError on failure.
"""
try:
return command_output(args, **kwargs)
except subprocess.CalledProcessError as e:
log.debug(e.output.decode("utf8", "ignore"))
raise ReplayGainError(f"{args[0]} exited with status {e.returncode}")
def db_to_lufs(db: float) -> float:
"""Convert db to LUFS.
According to https://wiki.hydrogenaud.io/index.php?title=
ReplayGain_2.0_specification#Reference_level
"""
return db - 107
def lufs_to_db(db: float) -> float:
"""Convert LUFS to db.
According to https://wiki.hydrogenaud.io/index.php?title=
ReplayGain_2.0_specification#Reference_level
"""
return db + 107
# Backend base and plumbing classes.
@dataclass
class Gain:
# gain: in LU to reference level
gain: float
# peak: part of full scale (FS is 1.0)
peak: float
class PeakMethod(enum.Enum):
true = 1
sample = 2
class RgTask:
"""State and methods for a single replaygain calculation (rg version).
Bundles the state (parameters and results) of a single replaygain
calculation (either for one item, one disk, or one full album).
This class provides methods to store the resulting gains and peaks as plain
old rg tags.
"""
def __init__(
self,
items: Sequence[Item],
album: Album | None,
target_level: float,
peak_method: PeakMethod | None,
backend_name: str,
log: Logger,
):
self.items = items
self.album = album
self.target_level = target_level
self.peak_method = peak_method
self.backend_name = backend_name
self._log = log
self.album_gain: Gain | None = None
self.track_gains: list[Gain] | None = None
def _store_track_gain(self, item: Item, track_gain: Gain):
"""Store track gain for a single item in the database."""
item.rg_track_gain = track_gain.gain
item.rg_track_peak = track_gain.peak
item.store()
self._log.debug(
"applied track gain {0.rg_track_gain} LU, peak {0.rg_track_peak} of FS",
item,
)
def _store_album_gain(self, item: Item, album_gain: Gain):
"""Store album gain for a single item in the database.
The caller needs to ensure that `self.album_gain is not None`.
"""
item.rg_album_gain = album_gain.gain
item.rg_album_peak = album_gain.peak
item.store()
self._log.debug(
"applied album gain {0.rg_album_gain} LU, peak {0.rg_album_peak} of FS",
item,
)
def _store_track(self, write: bool):
"""Store track gain for the first track of the task in the database."""
item = self.items[0]
if self.track_gains is None or len(self.track_gains) != 1:
# In some cases, backends fail to produce a valid
# `track_gains` without throwing FatalReplayGainError
# => raise non-fatal exception & continue
raise ReplayGainError(
f"ReplayGain backend `{self.backend_name}` failed for track"
f" {item}"
)
self._store_track_gain(item, self.track_gains[0])
if write:
item.try_write()
self._log.debug("done analyzing {}", item)
def _store_album(self, write: bool):
"""Store track/album gains for all tracks of the task in the database."""
if (
self.album_gain is None
or self.track_gains is None
or len(self.track_gains) != len(self.items)
):
# In some cases, backends fail to produce a valid
# `album_gain` without throwing FatalReplayGainError
# => raise non-fatal exception & continue
raise ReplayGainError(
f"ReplayGain backend `{self.backend_name}` failed "
f"for some tracks in album {self.album}"
)
for item, track_gain in zip(self.items, self.track_gains):
self._store_track_gain(item, track_gain)
self._store_album_gain(item, self.album_gain)
if write:
item.try_write()
self._log.debug("done analyzing {}", item)
def store(self, write: bool):
"""Store computed gains for the items of this task in the database."""
if self.album is not None:
self._store_album(write)
else:
self._store_track(write)
class R128Task(RgTask):
"""State and methods for a single replaygain calculation (r128 version).
Bundles the state (parameters and results) of a single replaygain
calculation (either for one item, one disk, or one full album).
This class provides methods to store the resulting gains and peaks as R128
tags.
"""
def __init__(
self,
items: Sequence[Item],
album: Album | None,
target_level: float,
backend_name: str,
log: Logger,
):
# R128_* tags do not store the track/album peak
super().__init__(items, album, target_level, None, backend_name, log)
def _store_track_gain(self, item: Item, track_gain: Gain):
item.r128_track_gain = track_gain.gain
item.store()
self._log.debug("applied r128 track gain {.r128_track_gain} LU", item)
def _store_album_gain(self, item: Item, album_gain: Gain):
"""
The caller needs to ensure that `self.album_gain is not None`.
"""
item.r128_album_gain = album_gain.gain
item.store()
self._log.debug("applied r128 album gain {.r128_album_gain} LU", item)
AnyRgTask = TypeVar("AnyRgTask", bound=RgTask)
class Backend(ABC):
"""An abstract class representing engine for calculating RG values."""
NAME = ""
do_parallel = False
def __init__(self, config: ConfigView, log: Logger):
"""Initialize the backend with the configuration view for the
plugin.
"""
self._log = log
@abstractmethod
def compute_track_gain(self, task: AnyRgTask) -> AnyRgTask:
"""Computes the track gain for the tracks belonging to `task`, and sets
the `track_gains` attribute on the task. Returns `task`.
"""
raise NotImplementedError()
@abstractmethod
def compute_album_gain(self, task: AnyRgTask) -> AnyRgTask:
"""Computes the album gain for the album belonging to `task`, and sets
the `album_gain` attribute on the task. Returns `task`.
"""
raise NotImplementedError()
# ffmpeg backend
class FfmpegBackend(Backend):
"""A replaygain backend using ffmpeg's ebur128 filter."""
NAME = "ffmpeg"
do_parallel = True
def __init__(self, config: ConfigView, log: Logger):
super().__init__(config, log)
self._ffmpeg_path = "ffmpeg"
# check that ffmpeg is installed
try:
ffmpeg_version_out = call([self._ffmpeg_path, "-version"], log)
except OSError:
raise FatalReplayGainError(
f"could not find ffmpeg at {self._ffmpeg_path}"
)
incompatible_ffmpeg = True
for line in ffmpeg_version_out.stdout.splitlines():
if line.startswith(b"configuration:"):
if b"--enable-libebur128" in line:
incompatible_ffmpeg = False
if line.startswith(b"libavfilter"):
version = line.split(b" ", 1)[1].split(b"/", 1)[0].split(b".")
version = tuple(map(int, version))
if version >= (6, 67, 100):
incompatible_ffmpeg = False
if incompatible_ffmpeg:
raise FatalReplayGainError(
"Installed FFmpeg version does not support ReplayGain."
"calculation. Either libavfilter version 6.67.100 or above or"
"the --enable-libebur128 configuration option is required."
)
def compute_track_gain(self, task: AnyRgTask) -> AnyRgTask:
"""Computes the track gain for the tracks belonging to `task`, and sets
the `track_gains` attribute on the task. Returns `task`.
"""
task.track_gains = [
self._analyse_item(
item,
task.target_level,
task.peak_method,
count_blocks=False,
)[0] # take only the gain, discarding number of gating blocks
for item in task.items
]
return task
def compute_album_gain(self, task: AnyRgTask) -> AnyRgTask:
"""Computes the album gain for the album belonging to `task`, and sets
the `album_gain` attribute on the task. Returns `task`.
"""
target_level_lufs = db_to_lufs(task.target_level)
# analyse tracks
# Gives a list of tuples (track_gain, track_n_blocks)
track_results: list[tuple[Gain, int]] = [
self._analyse_item(
item,
task.target_level,
task.peak_method,
count_blocks=True,
)
for item in task.items
]
track_gains: list[Gain] = [tg for tg, _nb in track_results]
# Album peak is maximum track peak
album_peak = max(tg.peak for tg in track_gains)
# Total number of BS.1770 gating blocks
n_blocks = sum(nb for _tg, nb in track_results)
def sum_of_track_powers(track_gain: Gain, track_n_blocks: int):
# convert `LU to target_level` -> LUFS
loudness = target_level_lufs - track_gain.gain
# This reverses ITU-R BS.1770-4 p. 6 equation (5) to convert
# from loudness to power. The result is the average gating
# block power.
power = 10 ** ((loudness + 0.691) / 10)
# Multiply that average power by the number of gating blocks to get
# the sum of all block powers in this track.
return track_n_blocks * power
# calculate album gain
if n_blocks > 0:
# Sum over all tracks to get the sum of BS.1770 gating block powers
# for the entire album.
sum_powers = sum(
sum_of_track_powers(tg, nb) for tg, nb in track_results
)
# compare ITU-R BS.1770-4 p. 6 equation (5)
# Album gain is the replaygain of the concatenation of all tracks.
album_gain = -0.691 + 10 * math.log10(sum_powers / n_blocks)
else:
album_gain = -70
# convert LUFS -> `LU to target_level`
album_gain = target_level_lufs - album_gain
self._log.debug(
"{.album}: gain {} LU, peak {}", task, album_gain, album_peak
)
task.album_gain = Gain(album_gain, album_peak)
task.track_gains = track_gains
return task
def _construct_cmd(
self, item: Item, peak_method: PeakMethod | None
) -> list[str]:
"""Construct the shell command to analyse items."""
return [
self._ffmpeg_path,
"-nostats",
"-hide_banner",
"-i",
str(item.filepath),
"-map",
"a:0",
"-filter",
f"ebur128=peak={'none' if peak_method is None else peak_method.name}",
"-f",
"null",
"-",
]
def _analyse_item(
self,
item: Item,
target_level: float,
peak_method: PeakMethod | None,
count_blocks: bool = True,
) -> tuple[Gain, int]:
"""Analyse item. Return a pair of a Gain object and the number
of gating blocks above the threshold.
If `count_blocks` is False, the number of gating blocks returned
will be 0.
"""
target_level_lufs = db_to_lufs(target_level)
# call ffmpeg
self._log.debug("analyzing {}", item)
cmd = self._construct_cmd(item, peak_method)
self._log.debug("executing {}", " ".join(map(displayable_path, cmd)))
output = call(cmd, self._log).stderr.splitlines()
# parse output
if peak_method is None:
peak = 0.0
else:
line_peak = self._find_line(
output,
# `peak_method` is non-`None` in this arm of the conditional
f" {peak_method.name.capitalize()} peak:".encode(),
start_line=len(output) - 1,
step_size=-1,
)
peak = self._parse_float(
output[
self._find_line(
output,
b" Peak:",
line_peak,
)
]
)
# convert TPFS -> part of FS
peak = 10 ** (peak / 20)
line_integrated_loudness = self._find_line(
output,
b" Integrated loudness:",
start_line=len(output) - 1,
step_size=-1,
)
gain = self._parse_float(
output[
self._find_line(
output,
b" I:",
line_integrated_loudness,
)
]
)
# convert LUFS -> LU from target level
gain = target_level_lufs - gain
# count BS.1770 gating blocks
n_blocks = 0
if count_blocks:
gating_threshold = self._parse_float(
output[
self._find_line(
output,
b" Threshold:",
start_line=line_integrated_loudness,
)
]
)
for line in output:
if not line.startswith(b"[Parsed_ebur128"):
continue
if line.endswith(b"Summary:"):
continue
line = line.split(b"M:", 1)
if len(line) < 2:
continue
if self._parse_float(b"M: " + line[1]) >= gating_threshold:
n_blocks += 1
self._log.debug(
"{}: {} blocks over {} LUFS", item, n_blocks, gating_threshold
)
self._log.debug("{}: gain {} LU, peak {}", item, gain, peak)
return Gain(gain, peak), n_blocks
def _find_line(
self,
output: Sequence[bytes],
search: bytes,
start_line: int = 0,
step_size: int = 1,
) -> int:
"""Return index of line beginning with `search`.
Begins searching at index `start_line` in `output`.
"""
end_index = len(output) if step_size > 0 else -1
for i in range(start_line, end_index, step_size):
if output[i].startswith(search):
return i
raise ReplayGainError(
f"ffmpeg output: missing {search!r} after line {start_line}"
)
def _parse_float(self, line: bytes) -> float:
"""Extract a float from a key value pair in `line`.
This format is expected: /[^:]:[[:space:]]*value.*/, where `value` is
the float.
"""
# extract value
parts = line.split(b":", 1)
if len(parts) < 2:
raise ReplayGainError(
f"ffmpeg output: expected key value pair, found {line!r}"
)
value = parts[1].lstrip()
# strip unit
value = value.split(b" ", 1)[0]
# cast value to float
try:
return float(value)
except ValueError:
raise ReplayGainError(
f"ffmpeg output: expected float value, found {value!r}"
)
# mpgain/aacgain CLI tool backend.
class CommandBackend(Backend):
NAME = "command"
do_parallel = True
def __init__(self, config: ConfigView, log: Logger):
super().__init__(config, log)
config.add(
{
"command": "",
"noclip": True,
}
)
self.command: str = config["command"].as_str()
if self.command:
# Explicit executable path.
if not os.path.isfile(self.command):
raise FatalReplayGainError(
f"replaygain command does not exist: {self.command}"
)
else:
# Check whether the program is in $PATH.
for cmd in ("mp3gain", "aacgain"):
try:
call([cmd, "-v"], self._log)
self.command = cmd
except OSError:
pass
if not self.command:
raise FatalReplayGainError(
"no replaygain command found: install mp3gain or aacgain"
)
self.noclip = config["noclip"].get(bool)
def compute_track_gain(self, task: AnyRgTask) -> AnyRgTask:
"""Computes the track gain for the tracks belonging to `task`, and sets
the `track_gains` attribute on the task. Returns `task`.
"""
supported_items = list(filter(self.format_supported, task.items))
output = self.compute_gain(supported_items, task.target_level, False)
task.track_gains = output
return task
def compute_album_gain(self, task: AnyRgTask) -> AnyRgTask:
"""Computes the album gain for the album belonging to `task`, and sets
the `album_gain` attribute on the task. Returns `task`.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = list(filter(self.format_supported, task.items))
if len(supported_items) != len(task.items):
self._log.debug("tracks are of unsupported format")
task.album_gain = None
task.track_gains = None
return task
output = self.compute_gain(supported_items, task.target_level, True)
task.album_gain = output[-1]
task.track_gains = output[:-1]
return task
def format_supported(self, item: Item) -> bool:
"""Checks whether the given item is supported by the selected tool."""
if "mp3gain" in self.command and item.format != "MP3":
return False
elif "aacgain" in self.command and item.format not in ("MP3", "AAC"):
return False
return True
def compute_gain(
self,
items: Sequence[Item],
target_level: float,
is_album: bool,
) -> list[Gain]:
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if not items:
self._log.debug("no supported tracks to analyze")
return []
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command. The "-o" option makes the output
# easily parseable (tab-delimited). "-s s" forces gain
# recalculation even if tags are already present and disables
# tag-writing; this turns the mp3gain/aacgain tool into a gain
# calculator rather than a tag manipulator because we take care
# of changing tags ourselves.
cmd: list[str] = [self.command, "-o", "-s", "s"]
if self.noclip:
# Adjust to avoid clipping.
cmd = cmd + ["-k"]
else:
# Disable clipping warning.
cmd = cmd + ["-c"]
cmd = cmd + ["-d", str(int(target_level - 89))]
cmd = cmd + [syspath(i.path) for i in items]
self._log.debug("analyzing {} files", len(items))
self._log.debug("executing {}", " ".join(map(displayable_path, cmd)))
output = call(cmd, self._log).stdout
self._log.debug("analysis finished")
return self.parse_tool_output(
output, len(items) + (1 if is_album else 0)
)
def parse_tool_output(self, text: bytes, num_lines: int) -> list[Gain]:
"""Given the tab-delimited output from an invocation of mp3gain
or aacgain, parse the text and return a list of dictionaries
containing information about each analyzed file.
"""
out = []
for line in text.split(b"\n")[1 : num_lines + 1]:
parts = line.split(b"\t")
if len(parts) != 6 or parts[0] == b"File":
self._log.debug("bad tool output: {}", text)
raise ReplayGainError("mp3gain failed")
# _file = parts[0]
# _mp3gain = int(parts[1])
gain = float(parts[2])
peak = float(parts[3]) / (1 << 15)
# _maxgain = int(parts[4])
# _mingain = int(parts[5])
out.append(Gain(gain, peak))
return out
# GStreamer-based backend.
class GStreamerBackend(Backend):
NAME = "gstreamer"
def __init__(self, config: ConfigView, log: Logger):
super().__init__(config, log)
self._import_gst()
# Initialized a GStreamer pipeline of the form filesrc ->
# decodebin -> audioconvert -> audioresample -> rganalysis ->
# fakesink The connection between decodebin and audioconvert is
# handled dynamically after decodebin figures out the type of
# the input file.
self._src = self.Gst.ElementFactory.make("filesrc", "src")
self._decbin = self.Gst.ElementFactory.make("decodebin", "decbin")
self._conv = self.Gst.ElementFactory.make("audioconvert", "conv")
self._res = self.Gst.ElementFactory.make("audioresample", "res")
self._rg = self.Gst.ElementFactory.make("rganalysis", "rg")
if (
self._src is None
or self._decbin is None
or self._conv is None
or self._res is None
or self._rg is None
):
raise FatalGstreamerPluginReplayGainError(
"Failed to load required GStreamer plugins"
)
# We check which files need gain ourselves, so all files given
# to rganalsys should have their gain computed, even if it
# already exists.
self._rg.set_property("forced", True)
self._sink = self.Gst.ElementFactory.make("fakesink", "sink")
self._pipe = self.Gst.Pipeline()
self._pipe.add(self._src)
self._pipe.add(self._decbin)
self._pipe.add(self._conv)
self._pipe.add(self._res)
self._pipe.add(self._rg)
self._pipe.add(self._sink)
self._src.link(self._decbin)
self._conv.link(self._res)
self._res.link(self._rg)
self._rg.link(self._sink)
self._bus = self._pipe.get_bus()
self._bus.add_signal_watch()
self._bus.connect("message::eos", self._on_eos)
self._bus.connect("message::error", self._on_error)
self._bus.connect("message::tag", self._on_tag)
# Needed for handling the dynamic connection between decodebin
# and audioconvert
self._decbin.connect("pad-added", self._on_pad_added)
self._decbin.connect("pad-removed", self._on_pad_removed)
self._main_loop = self.GLib.MainLoop()
self._files: list[bytes] = []
def _import_gst(self):
"""Import the necessary GObject-related modules and assign `Gst`
and `GObject` fields on this object.
"""
try:
import gi
except ImportError:
raise FatalReplayGainError(
"Failed to load GStreamer: python-gi not found"
)
try:
gi.require_version("Gst", "1.0")
except ValueError as e:
raise FatalReplayGainError(f"Failed to load GStreamer 1.0: {e}")
from gi.repository import GLib, GObject, Gst
# Calling GObject.threads_init() is not needed for
# PyGObject 3.10.2+
with warnings.catch_warnings():
warnings.simplefilter("ignore")
GObject.threads_init()
Gst.init([sys.argv[0]])
self.GObject = GObject
self.GLib = GLib
self.Gst = Gst
def compute(self, items: Sequence[Item], target_level: float, album: bool):
if len(items) == 0:
return
self._error = None
self._files = [i.path for i in items]
# FIXME: Turn this into DefaultDict[bytes, Gain]
self._file_tags: collections.defaultdict[bytes, dict[str, float]] = (
collections.defaultdict(dict)
)
self._rg.set_property("reference-level", target_level)
if album:
self._rg.set_property("num-tracks", len(self._files))
if self._set_first_file():
self._main_loop.run()
if self._error is not None:
raise self._error
def compute_track_gain(self, task: AnyRgTask) -> AnyRgTask:
"""Computes the track gain for the tracks belonging to `task`, and sets
the `track_gains` attribute on the task. Returns `task`.
"""
self.compute(task.items, task.target_level, False)
if len(self._file_tags) != len(task.items):
raise ReplayGainError("Some tracks did not receive tags")
ret = []
for item in task.items:
ret.append(
Gain(
self._file_tags[item.path]["TRACK_GAIN"],
self._file_tags[item.path]["TRACK_PEAK"],
)
)
task.track_gains = ret
return task
def compute_album_gain(self, task: AnyRgTask) -> AnyRgTask:
"""Computes the album gain for the album belonging to `task`, and sets
the `album_gain` attribute on the task. Returns `task`.
"""
items = list(task.items)
self.compute(items, task.target_level, True)
if len(self._file_tags) != len(items):
raise ReplayGainError("Some items in album did not receive tags")
# Collect track gains.
track_gains = []
for item in items:
try:
gain = self._file_tags[item.path]["TRACK_GAIN"]
peak = self._file_tags[item.path]["TRACK_PEAK"]
except KeyError:
raise ReplayGainError("results missing for track")
track_gains.append(Gain(gain, peak))
# Get album gain information from the last track.
last_tags = self._file_tags[items[-1].path]
try:
gain = last_tags["ALBUM_GAIN"]
peak = last_tags["ALBUM_PEAK"]
except KeyError:
raise ReplayGainError("results missing for album")
task.album_gain = Gain(gain, peak)
task.track_gains = track_gains
return task
def close(self):
self._bus.remove_signal_watch()
def _on_eos(self, bus, message):
# A file finished playing in all elements of the pipeline. The
# RG tags have already been propagated. If we don't have a next
# file, we stop processing.
if not self._set_next_file():
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
def _on_error(self, bus, message):
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
err, debug = message.parse_error()
f = self._src.get_property("location")
# A GStreamer error, either an unsupported format or a bug.
self._error = ReplayGainError(
f"Error {err!r} - {debug!r} on file {f!r}"
)
def _on_tag(self, bus, message):
tags = message.parse_tag()
def handle_tag(taglist, tag, userdata):
# The rganalysis element provides both the existing tags for
# files and the new computes tags. In order to ensure we
# store the computed tags, we overwrite the RG values of
# received a second time.
if tag == self.Gst.TAG_TRACK_GAIN:
self._file_tags[self._file]["TRACK_GAIN"] = taglist.get_double(
tag
)[1]
elif tag == self.Gst.TAG_TRACK_PEAK:
self._file_tags[self._file]["TRACK_PEAK"] = taglist.get_double(
tag
)[1]
elif tag == self.Gst.TAG_ALBUM_GAIN:
self._file_tags[self._file]["ALBUM_GAIN"] = taglist.get_double(
tag
)[1]
elif tag == self.Gst.TAG_ALBUM_PEAK:
self._file_tags[self._file]["ALBUM_PEAK"] = taglist.get_double(
tag
)[1]
elif tag == self.Gst.TAG_REFERENCE_LEVEL:
self._file_tags[self._file]["REFERENCE_LEVEL"] = (
taglist.get_double(tag)[1]
)
tags.foreach(handle_tag, None)
def _set_first_file(self) -> bool:
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
self._pipe.set_state(self.Gst.State.NULL)
self._src.set_property("location", os.fsdecode(syspath(self._file)))
self._pipe.set_state(self.Gst.State.PLAYING)
return True
def _set_file(self) -> bool:
"""Initialize the filesrc element with the next file to be analyzed."""
# No more files, we're done
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
# Ensure the filesrc element received the paused state of the
# pipeline in a blocking manner
self._src.sync_state_with_parent()
self._src.get_state(self.Gst.CLOCK_TIME_NONE)
# Ensure the decodebin element receives the paused state of the
# pipeline in a blocking manner
self._decbin.sync_state_with_parent()
self._decbin.get_state(self.Gst.CLOCK_TIME_NONE)
# Disconnect the decodebin element from the pipeline, set its
# state to READY to to clear it.
self._decbin.unlink(self._conv)
self._decbin.set_state(self.Gst.State.READY)
# Set a new file on the filesrc element, can only be done in the
# READY state
self._src.set_state(self.Gst.State.READY)
self._src.set_property("location", os.fsdecode(syspath(self._file)))
self._decbin.link(self._conv)
self._pipe.set_state(self.Gst.State.READY)
return True
def _set_next_file(self) -> bool:
"""Set the next file to be analyzed while keeping the pipeline
in the PAUSED state so that the rganalysis element can correctly
handle album gain.
"""
# A blocking pause
self._pipe.set_state(self.Gst.State.PAUSED)
self._pipe.get_state(self.Gst.CLOCK_TIME_NONE)
# Try setting the next file
ret = self._set_file()
if ret:
# Seek to the beginning in order to clear the EOS state of the
# various elements of the pipeline
self._pipe.seek_simple(
self.Gst.Format.TIME, self.Gst.SeekFlags.FLUSH, 0
)
self._pipe.set_state(self.Gst.State.PLAYING)
return ret
def _on_pad_added(self, decbin, pad):
sink_pad = self._conv.get_compatible_pad(pad, None)
assert sink_pad is not None
pad.link(sink_pad)
def _on_pad_removed(self, decbin, pad):
# Called when the decodebin element is disconnected from the
# rest of the pipeline while switching input files
peer = pad.get_peer()
assert peer is None
class AudioToolsBackend(Backend):
"""ReplayGain backend that uses `Python Audio Tools
`_ and its capabilities to read more
file formats and compute ReplayGain values using it replaygain module.
"""
NAME = "audiotools"
def __init__(self, config: ConfigView, log: Logger):
super().__init__(config, log)
self._import_audiotools()
def _import_audiotools(self):
"""Check whether it's possible to import the necessary modules.
There is no check on the file formats at runtime.
:raises :exc:`ReplayGainError`: if the modules cannot be imported
"""
try:
import audiotools
import audiotools.replaygain
except ImportError:
raise FatalReplayGainError(
"Failed to load audiotools: audiotools not found"
)
self._mod_audiotools = audiotools
self._mod_replaygain = audiotools.replaygain
def open_audio_file(self, item: Item):
"""Open the file to read the PCM stream from the using
``item.path``.
:return: the audiofile instance
:rtype: :class:`audiotools.AudioFile`
:raises :exc:`ReplayGainError`: if the file is not found or the
file format is not supported
"""
try:
audiofile = self._mod_audiotools.open(
os.fsdecode(syspath(item.path))
)
except OSError:
raise ReplayGainError(f"File {item.filepath} was not found")
except self._mod_audiotools.UnsupportedFile:
raise ReplayGainError(f"Unsupported file type {item.format}")
return audiofile
def init_replaygain(self, audiofile, item: Item):
"""Return an initialized :class:`audiotools.replaygain.ReplayGain`
instance, which requires the sample rate of the song(s) on which
the ReplayGain values will be computed. The item is passed in case
the sample rate is invalid to log the stored item sample rate.
:return: initialized replagain object
:rtype: :class:`audiotools.replaygain.ReplayGain`
:raises: :exc:`ReplayGainError` if the sample rate is invalid
"""
try:
rg = self._mod_replaygain.ReplayGain(audiofile.sample_rate())
except ValueError:
raise ReplayGainError(f"Unsupported sample rate {item.samplerate}")
return
return rg
def compute_track_gain(self, task: AnyRgTask) -> AnyRgTask:
"""Computes the track gain for the tracks belonging to `task`, and sets
the `track_gains` attribute on the task. Returns `task`.
"""
gains = [
self._compute_track_gain(i, task.target_level) for i in task.items
]
task.track_gains = gains
return task
def _with_target_level(self, gain: float, target_level: float):
"""Return `gain` relative to `target_level`.
Assumes `gain` is relative to 89 db.
"""
return gain + (target_level - 89)
def _title_gain(self, rg, audiofile, target_level: float):
"""Get the gain result pair from PyAudioTools using the `ReplayGain`
instance `rg` for the given `audiofile`.
Wraps `rg.title_gain(audiofile.to_pcm())` and throws a
`ReplayGainError` when the library fails.
"""
try:
# The method needs an audiotools.PCMReader instance that can
# be obtained from an audiofile instance.
gain, peak = rg.title_gain(audiofile.to_pcm())
except ValueError as exc:
# `audiotools.replaygain` can raise a `ValueError` if the sample
# rate is incorrect.
self._log.debug("error in rg.title_gain() call: {}", exc)
raise ReplayGainError("audiotools audio data error")
return self._with_target_level(gain, target_level), peak
def _compute_track_gain(self, item: Item, target_level: float):
"""Compute ReplayGain value for the requested item.
:rtype: :class:`Gain`
"""
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
# Each call to title_gain on a ReplayGain object returns peak and gain
# of the track.
rg_track_gain, rg_track_peak = self._title_gain(
rg, audiofile, target_level
)
self._log.debug(
"ReplayGain for track {0.artist} - {0.title}: {1:.2f}, {2:.2f}",
item,
rg_track_gain,
rg_track_peak,
)
return Gain(gain=rg_track_gain, peak=rg_track_peak)
def compute_album_gain(self, task: AnyRgTask) -> AnyRgTask:
"""Computes the album gain for the album belonging to `task`, and sets
the `album_gain` attribute on the task. Returns `task`.
"""
# The first item is taken and opened to get the sample rate to
# initialize the replaygain object. The object is used for all the
# tracks in the album to get the album values.
item = list(task.items)[0]
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
track_gains = []
for item in task.items:
audiofile = self.open_audio_file(item)
rg_track_gain, rg_track_peak = self._title_gain(
rg, audiofile, task.target_level
)
track_gains.append(Gain(gain=rg_track_gain, peak=rg_track_peak))
self._log.debug(
"ReplayGain for track {}: {.2f}, {.2f}",
item,
rg_track_gain,
rg_track_peak,
)
# After getting the values for all tracks, it's possible to get the
# album values.
rg_album_gain, rg_album_peak = rg.album_gain()
rg_album_gain = self._with_target_level(
rg_album_gain, task.target_level
)
self._log.debug(
"ReplayGain for album {.items[0].album}: {.2f}, {.2f}",
task,
rg_album_gain,
rg_album_peak,
)
task.album_gain = Gain(gain=rg_album_gain, peak=rg_album_peak)
task.track_gains = track_gains
return task
class ExceptionWatcher(Thread):
"""Monitors a queue for exceptions asynchronously.
Once an exception occurs, raise it and execute a callback.
"""
def __init__(
self, queue: queue.Queue[Exception], callback: Callable[[], None]
):
self._queue = queue
self._callback = callback
self._stopevent = Event()
Thread.__init__(self)
def run(self):
while not self._stopevent.is_set():
try:
exc = self._queue.get_nowait()
self._callback()
raise exc
except queue.Empty:
# No exceptions yet, loop back to check
# whether `_stopevent` is set
pass
def join(self, timeout: float | None = None):
self._stopevent.set()
Thread.join(self, timeout)
# Main plugin logic.
BACKEND_CLASSES: list[type[Backend]] = [
CommandBackend,
GStreamerBackend,
AudioToolsBackend,
FfmpegBackend,
]
BACKENDS: dict[str, type[Backend]] = {b.NAME: b for b in BACKEND_CLASSES}
class ReplayGainPlugin(BeetsPlugin):
"""Provides ReplayGain analysis."""
pool: ThreadPool | None = None
def __init__(self) -> None:
super().__init__()
# default backend is 'command' for backward-compatibility.
self.config.add(
{
"overwrite": False,
"auto": True,
"backend": "command",
"threads": os.cpu_count(),
"parallel_on_import": False,
"per_disc": False,
"peak": "true",
"targetlevel": 89,
"r128": ["Opus"],
"r128_targetlevel": lufs_to_db(-23),
}
)
# FIXME: Consider renaming the configuration option and deprecating the
# old name 'overwrite'.
self.force_on_import: bool = self.config["overwrite"].get(bool)
# Remember which backend is used for CLI feedback
self.backend_name = self.config["backend"].as_str()
if self.backend_name not in BACKENDS:
raise ui.UserError(
f"Selected ReplayGain backend {self.backend_name} is not"
f" supported. Please select one of: {', '.join(BACKENDS)}"
)
# FIXME: Consider renaming the configuration option to 'peak_method'
# and deprecating the old name 'peak'.
peak_method = self.config["peak"].as_str()
if peak_method not in PeakMethod.__members__:
raise ui.UserError(
f"Selected ReplayGain peak method {peak_method} is not"
" supported. Please select one of:"
f" {', '.join(PeakMethod.__members__)}"
)
# This only applies to plain old rg tags, r128 doesn't store peak
# values.
self.peak_method = PeakMethod[peak_method]
# On-import analysis.
if self.config["auto"]:
self.register_listener("import_begin", self.import_begin)
self.register_listener("import", self.import_end)
self.import_stages = [self.imported]
# Formats to use R128.
self.r128_whitelist = self.config["r128"].as_str_seq()
try:
self.backend_instance = BACKENDS[self.backend_name](
self.config, self._log
)
except (ReplayGainError, FatalReplayGainError) as e:
raise ui.UserError(f"replaygain initialization failed: {e}")
def should_use_r128(self, item: Item) -> bool:
"""Checks the plugin setting to decide whether the calculation
should be done using the EBU R128 standard and use R128_ tags instead.
"""
return item.format in self.r128_whitelist
@staticmethod
def has_r128_track_data(item: Item) -> bool:
return item.r128_track_gain is not None
@staticmethod
def has_rg_track_data(item: Item) -> bool:
return item.rg_track_gain is not None and item.rg_track_peak is not None
def track_requires_gain(self, item: Item) -> bool:
if self.should_use_r128(item):
if not self.has_r128_track_data(item):
return True
else:
if not self.has_rg_track_data(item):
return True
return False
@staticmethod
def has_r128_album_data(item: Item) -> bool:
return (
item.r128_track_gain is not None
and item.r128_album_gain is not None
)
@staticmethod
def has_rg_album_data(item: Item) -> bool:
return item.rg_album_gain is not None and item.rg_album_peak is not None
def album_requires_gain(self, album: Album) -> bool:
# Skip calculating gain only when *all* files don't need
# recalculation. This way, if any file among an album's tracks
# needs recalculation, we still get an accurate album gain
# value.
for item in album.items():
if self.should_use_r128(item):
if not self.has_r128_album_data(item):
return True
else:
if not self.has_rg_album_data(item):
return True
return False
def create_task(
self,
items: Sequence[Item],
use_r128: bool,
album: Album | None = None,
) -> RgTask:
if use_r128:
return R128Task(
items,
album,
self.config["r128_targetlevel"].as_number(),
self.backend_instance.NAME,
self._log,
)
else:
return RgTask(
items,
album,
self.config["targetlevel"].as_number(),
self.peak_method,
self.backend_instance.NAME,
self._log,
)
def handle_album(self, album: Album, write: bool, force: bool = False):
"""Compute album and track replay gain store it in all of the
album's items.
If ``write`` is truthy then ``item.write()`` is called for each
item. If replay gain information is already present in all
items, nothing is done.
"""
if not force and not self.album_requires_gain(album):
self._log.info("Skipping album {}", album)
return
items_iter = iter(album.items())
use_r128 = self.should_use_r128(next(items_iter))
if any(use_r128 != self.should_use_r128(i) for i in items_iter):
self._log.error(
"Cannot calculate gain for album {} (incompatible formats)",
album,
)
return
self._log.info("analyzing {}", album)
discs: dict[int, list[Item]] = {}
if self.config["per_disc"].get(bool):
for item in album.items():
if discs.get(item.disc) is None:
discs[item.disc] = []
discs[item.disc].append(item)
else:
discs[1] = album.items()
def store_cb(task: RgTask):
task.store(write)
for discnumber, items in discs.items():
task = self.create_task(items, use_r128, album=album)
try:
self._apply(
self.backend_instance.compute_album_gain,
args=[task],
kwds={},
callback=store_cb,
)
except ReplayGainError as e:
self._log.info("ReplayGain error: {}", e)
except FatalReplayGainError as e:
raise ui.UserError(f"Fatal replay gain error: {e}")
def handle_track(self, item: Item, write: bool, force: bool = False):
"""Compute track replay gain and store it in the item.
If ``write`` is truthy then ``item.write()`` is called to write
the data to disk. If replay gain information is already present
in the item, nothing is done.
"""
if not force and not self.track_requires_gain(item):
self._log.info("Skipping track {}", item)
return
use_r128 = self.should_use_r128(item)
def store_cb(task: RgTask):
task.store(write)
task = self.create_task([item], use_r128)
try:
self._apply(
self.backend_instance.compute_track_gain,
args=[task],
kwds={},
callback=store_cb,
)
except ReplayGainError as e:
self._log.info("ReplayGain error: {}", e)
except FatalReplayGainError as e:
raise ui.UserError(f"Fatal replay gain error: {e}")
def open_pool(self, threads: int):
"""Open a `ThreadPool` instance in `self.pool`"""
if self.pool is None and self.backend_instance.do_parallel:
self.pool = ThreadPool(threads)
self.exc_queue: queue.Queue[Exception] = queue.Queue()
signal.signal(signal.SIGINT, self._interrupt)
self.exc_watcher = ExceptionWatcher(
self.exc_queue, # threads push exceptions here
self.terminate_pool, # abort once an exception occurs
)
self.exc_watcher.start()
def _apply(
self,
func: Callable[..., AnyRgTask],
args: list[Any],
kwds: dict[str, Any],
callback: Callable[[AnyRgTask], Any],
):
if self.pool is not None:
def handle_exc(exc):
"""Handle exceptions in the async work."""
if isinstance(exc, ReplayGainError):
self._log.info(exc.args[0]) # Log non-fatal exceptions.
else:
self.exc_queue.put(exc)
self.pool.apply_async(
func, args, kwds, callback, error_callback=handle_exc
)
else:
callback(func(*args, **kwds))
def terminate_pool(self):
"""Forcibly terminate the `ThreadPool` instance in `self.pool`
Sends SIGTERM to all processes.
"""
if self.pool is not None:
self.pool.terminate()
self.pool.join()
# Terminating the processes leaves the ExceptionWatcher's queues
# in an unknown state, so don't wait for it.
# self.exc_watcher.join()
self.pool = None
def _interrupt(self, signal, frame):
try:
self._log.info("interrupted")
self.terminate_pool()
sys.exit(0)
except SystemExit:
# Silence raised SystemExit ~ exit(0)
pass
def close_pool(self):
"""Regularly close the `ThreadPool` instance in `self.pool`."""
if self.pool is not None:
self.pool.close()
self.pool.join()
self.exc_watcher.join()
self.pool = None
def import_begin(self, session: ImportSession):
"""Handle `import_begin` event -> open pool"""
threads: int = self.config["threads"].get(int)
if (
self.config["parallel_on_import"]
and self.config["auto"]
and threads
):
self.open_pool(threads)
def import_end(self, paths):
"""Handle `import` event -> close pool"""
self.close_pool()
def imported(self, session: ImportSession, task: ImportTask):
"""Add replay gain info to items or albums of ``task``."""
if self.config["auto"]:
if task.is_album:
self.handle_album(task.album, False, self.force_on_import)
else:
# Should be a SingletonImportTask
assert hasattr(task, "item")
self.handle_track(task.item, False, self.force_on_import)
def command_func(
self,
lib: Library,
opts: optparse.Values,
args: list[str],
):
try:
write = ui.should_write(opts.write)
force = opts.force
# Bypass self.open_pool() if called with `--threads 0`
if opts.threads != 0:
threads: int = opts.threads or self.config["threads"].get(int)
self.open_pool(threads)
if opts.album:
albums = lib.albums(args)
self._log.info(
f"Analyzing {len(albums)} albums ~"
f" {self.backend_name} backend..."
)
for album in albums:
self.handle_album(album, write, force)
else:
items = lib.items(args)
self._log.info(
f"Analyzing {len(items)} tracks ~"
f" {self.backend_name} backend..."
)
for item in items:
self.handle_track(item, write, force)
self.close_pool()
except (SystemExit, KeyboardInterrupt):
# Silence interrupt exceptions
pass
def commands(self) -> list[ui.Subcommand]:
"""Return the "replaygain" ui subcommand."""
cmd = ui.Subcommand("replaygain", help="analyze for ReplayGain")
cmd.parser.add_album_option()
cmd.parser.add_option(
"-t",
"--threads",
dest="threads",
type=int,
help=(
"change the number of threads, defaults to maximum available"
" processors"
),
)
cmd.parser.add_option(
"-f",
"--force",
dest="force",
action="store_true",
default=False,
help=(
"analyze all files, including those that already have"
" ReplayGain metadata"
),
)
cmd.parser.add_option(
"-w",
"--write",
default=None,
action="store_true",
help="write new metadata to files' tags",
)
cmd.parser.add_option(
"-W",
"--nowrite",
dest="write",
action="store_false",
help="don't write metadata (opposite of -w)",
)
cmd.func = self.command_func
return [cmd]
beetbox-beets-c1877b7/beetsplug/rewrite.py 0000664 0000000 0000000 00000005256 15073551743 0020655 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Uses user-specified rewriting rules to canonicalize names for path
formats.
"""
import re
from collections import defaultdict
from beets import library, ui
from beets.plugins import BeetsPlugin
def rewriter(field, rules):
"""Create a template field function that rewrites the given field
with the given rewriting rules. ``rules`` must be a list of
(pattern, replacement) pairs.
"""
def fieldfunc(item):
value = item._values_fixed[field]
for pattern, replacement in rules:
if pattern.match(value.lower()):
# Rewrite activated.
return replacement
# Not activated; return original value.
return value
return fieldfunc
class RewritePlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({})
# Gather all the rewrite rules for each field.
rules = defaultdict(list)
for key, view in self.config.items():
value = view.as_str()
try:
fieldname, pattern = key.split(None, 1)
except ValueError:
raise ui.UserError("invalid rewrite specification")
if fieldname not in library.Item._fields:
raise ui.UserError(
f"invalid field name ({fieldname}) in rewriter"
)
self._log.debug("adding template field {}", key)
pattern = re.compile(pattern.lower())
rules[fieldname].append((pattern, value))
if fieldname == "artist":
# Special case for the artist field: apply the same
# rewrite for "albumartist" as well.
rules["albumartist"].append((pattern, value))
# Replace each template field with the new rewriter function.
for fieldname, fieldrules in rules.items():
getter = rewriter(fieldname, fieldrules)
self.template_fields[fieldname] = getter
if fieldname in library.Album._fields:
self.album_template_fields[fieldname] = getter
beetbox-beets-c1877b7/beetsplug/scrub.py 0000664 0000000 0000000 00000011773 15073551743 0020313 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Cleans extraneous metadata from files' tags via a command or
automatically whenever tags are written.
"""
import mediafile
import mutagen
from beets import config, ui, util
from beets.plugins import BeetsPlugin
_MUTAGEN_FORMATS = {
"asf": "ASF",
"apev2": "APEv2File",
"flac": "FLAC",
"id3": "ID3FileType",
"mp3": "MP3",
"mp4": "MP4",
"oggflac": "OggFLAC",
"oggspeex": "OggSpeex",
"oggtheora": "OggTheora",
"oggvorbis": "OggVorbis",
"oggopus": "OggOpus",
"trueaudio": "TrueAudio",
"wavpack": "WavPack",
"monkeysaudio": "MonkeysAudio",
"optimfrog": "OptimFROG",
}
class ScrubPlugin(BeetsPlugin):
"""Removes extraneous metadata from files' tags."""
def __init__(self):
super().__init__()
self.config.add(
{
"auto": True,
}
)
if self.config["auto"]:
self.register_listener("import_task_files", self.import_task_files)
def commands(self):
def scrub_func(lib, opts, args):
# Walk through matching files and remove tags.
for item in lib.items(args):
self._log.info("scrubbing: {.filepath}", item)
self._scrub_item(item, opts.write)
scrub_cmd = ui.Subcommand("scrub", help="clean audio tags")
scrub_cmd.parser.add_option(
"-W",
"--nowrite",
dest="write",
action="store_false",
default=True,
help="leave tags empty",
)
scrub_cmd.func = scrub_func
return [scrub_cmd]
@staticmethod
def _mutagen_classes():
"""Get a list of file type classes from the Mutagen module."""
classes = []
for modname, clsname in _MUTAGEN_FORMATS.items():
mod = __import__(f"mutagen.{modname}", fromlist=[clsname])
classes.append(getattr(mod, clsname))
return classes
def _scrub(self, path):
"""Remove all tags from a file."""
for cls in self._mutagen_classes():
# Try opening the file with this type, but just skip in the
# event of any error.
try:
f = cls(util.syspath(path))
except Exception:
continue
if f.tags is None:
continue
# Remove the tag for this type.
try:
f.delete()
except NotImplementedError:
# Some Mutagen metadata subclasses (namely, ASFTag) do not
# support .delete(), presumably because it is impossible to
# remove them. In this case, we just remove all the tags.
for tag in f.keys():
del f[tag]
f.save()
except (OSError, mutagen.MutagenError) as exc:
self._log.error(
"could not scrub {}: {}", util.displayable_path(path), exc
)
def _scrub_item(self, item, restore):
"""Remove tags from an Item's associated file and, if `restore`
is enabled, write the database's tags back to the file.
"""
# Get album art if we need to restore it.
if restore:
try:
mf = mediafile.MediaFile(
util.syspath(item.path), config["id3v23"].get(bool)
)
except mediafile.UnreadableFileError as exc:
self._log.error("could not open file to scrub: {}", exc)
return
images = mf.images
# Remove all tags.
self._scrub(item.path)
# Restore tags, if enabled.
if restore:
self._log.debug("writing new tags after scrub")
item.try_write()
if images:
self._log.debug("restoring art")
try:
mf = mediafile.MediaFile(
util.syspath(item.path), config["id3v23"].get(bool)
)
mf.images = images
mf.save()
except mediafile.UnreadableFileError as exc:
self._log.error("could not write tags: {}", exc)
def import_task_files(self, session, task):
"""Automatically scrub imported files."""
for item in task.imported_items():
self._log.debug("auto-scrubbing {.filepath}", item)
self._scrub_item(item, ui.should_write())
beetbox-beets-c1877b7/beetsplug/smartplaylist.py 0000664 0000000 0000000 00000032025 15073551743 0022076 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Dang Mai .
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Generates smart playlists based on beets queries."""
import os
from urllib.parse import quote
from urllib.request import pathname2url
from beets import ui
from beets.dbcore import OrQuery
from beets.dbcore.query import MultipleSort, ParsingError
from beets.library import Album, Item, parse_query_string
from beets.plugins import BeetsPlugin
from beets.plugins import send as send_event
from beets.util import (
bytestring_path,
displayable_path,
mkdirall,
normpath,
path_as_posix,
sanitize_path,
syspath,
)
class SmartPlaylistPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"relative_to": None,
"playlist_dir": ".",
"auto": True,
"playlists": [],
"uri_format": None,
"fields": [],
"forward_slash": False,
"prefix": "",
"urlencode": False,
"pretend_paths": False,
"output": "m3u",
}
)
self.config["prefix"].redact = True # May contain username/password.
self._matched_playlists = None
self._unmatched_playlists = None
if self.config["auto"]:
self.register_listener("database_change", self.db_change)
def commands(self):
spl_update = ui.Subcommand(
"splupdate",
help="update the smart playlists. Playlist names may be "
"passed as arguments.",
)
spl_update.parser.add_option(
"-p",
"--pretend",
action="store_true",
help="display query results but don't write playlist files.",
)
spl_update.parser.add_option(
"--pretend-paths",
action="store_true",
dest="pretend_paths",
help="in pretend mode, log the playlist item URIs/paths.",
)
spl_update.parser.add_option(
"-d",
"--playlist-dir",
dest="playlist_dir",
metavar="PATH",
type="string",
help="directory to write the generated playlist files to.",
)
spl_update.parser.add_option(
"--relative-to",
dest="relative_to",
metavar="PATH",
type="string",
help="generate playlist item paths relative to this path.",
)
spl_update.parser.add_option(
"--prefix",
type="string",
help="prepend string to every path in the playlist file.",
)
spl_update.parser.add_option(
"--forward-slash",
action="store_true",
dest="forward_slash",
help="force forward slash in paths within playlists.",
)
spl_update.parser.add_option(
"--urlencode",
action="store_true",
help="URL-encode all paths.",
)
spl_update.parser.add_option(
"--uri-format",
dest="uri_format",
type="string",
help="playlist item URI template, e.g. http://beets:8337/item/$id/file.",
)
spl_update.parser.add_option(
"--output",
type="string",
help="specify the playlist format: m3u|extm3u.",
)
spl_update.func = self.update_cmd
return [spl_update]
def update_cmd(self, lib, opts, args):
self.build_queries()
if args:
args = set(args)
for a in list(args):
if not a.endswith(".m3u"):
args.add(f"{a}.m3u")
playlists = {
(name, q, a_q)
for name, q, a_q in self._unmatched_playlists
if name in args
}
if not playlists:
unmatched = [name for name, _, _ in self._unmatched_playlists]
raise ui.UserError(
f"No playlist matching any of {unmatched} found"
)
self._matched_playlists = playlists
self._unmatched_playlists -= playlists
else:
self._matched_playlists = self._unmatched_playlists
self.__apply_opts_to_config(opts)
self.update_playlists(lib, opts.pretend)
def __apply_opts_to_config(self, opts):
for k, v in opts.__dict__.items():
if v is not None and k in self.config:
self.config[k] = v
def build_queries(self):
"""
Instantiate queries for the playlists.
Each playlist has 2 queries: one or items one for albums, each with a
sort. We must also remember its name. _unmatched_playlists is a set of
tuples (name, (q, q_sort), (album_q, album_q_sort)).
sort may be any sort, or NullSort, or None. None and NullSort are
equivalent and both eval to False.
More precisely
- it will be NullSort when a playlist query ('query' or 'album_query')
is a single item or a list with 1 element
- it will be None when there are multiple items i a query
"""
self._unmatched_playlists = set()
self._matched_playlists = set()
for playlist in self.config["playlists"].get(list):
if "name" not in playlist:
self._log.warning("playlist configuration is missing name")
continue
playlist_data = (playlist["name"],)
try:
for key, model_cls in (("query", Item), ("album_query", Album)):
qs = playlist.get(key)
if qs is None:
query_and_sort = None, None
elif isinstance(qs, str):
query_and_sort = parse_query_string(qs, model_cls)
elif len(qs) == 1:
query_and_sort = parse_query_string(qs[0], model_cls)
else:
# multiple queries and sorts
queries, sorts = zip(
*(parse_query_string(q, model_cls) for q in qs)
)
query = OrQuery(queries)
final_sorts = []
for s in sorts:
if s:
if isinstance(s, MultipleSort):
final_sorts += s.sorts
else:
final_sorts.append(s)
if not final_sorts:
sort = None
elif len(final_sorts) == 1:
(sort,) = final_sorts
else:
sort = MultipleSort(final_sorts)
query_and_sort = query, sort
playlist_data += (query_and_sort,)
except ParsingError as exc:
self._log.warning(
"invalid query in playlist {}: {}", playlist["name"], exc
)
continue
self._unmatched_playlists.add(playlist_data)
def matches(self, model, query, album_query):
if album_query and isinstance(model, Album):
return album_query.match(model)
if query and isinstance(model, Item):
return query.match(model)
return False
def db_change(self, lib, model):
if self._unmatched_playlists is None:
self.build_queries()
for playlist in self._unmatched_playlists:
n, (q, _), (a_q, _) = playlist
if self.matches(model, q, a_q):
self._log.debug("{} will be updated because of {}", n, model)
self._matched_playlists.add(playlist)
self.register_listener("cli_exit", self.update_playlists)
self._unmatched_playlists -= self._matched_playlists
def update_playlists(self, lib, pretend=False):
if pretend:
self._log.info(
"Showing query results for {} smart playlists...",
len(self._matched_playlists),
)
else:
self._log.info(
"Updating {} smart playlists...", len(self._matched_playlists)
)
playlist_dir = self.config["playlist_dir"].as_filename()
playlist_dir = bytestring_path(playlist_dir)
tpl = self.config["uri_format"].get()
prefix = bytestring_path(self.config["prefix"].as_str())
relative_to = self.config["relative_to"].get()
if relative_to:
relative_to = normpath(relative_to)
# Maps playlist filenames to lists of track filenames.
m3us = {}
for playlist in self._matched_playlists:
name, (query, q_sort), (album_query, a_q_sort) = playlist
if pretend:
self._log.info("Results for playlist {}:", name)
else:
self._log.info("Creating playlist {}", name)
items = []
if query:
items.extend(lib.items(query, q_sort))
if album_query:
for album in lib.albums(album_query, a_q_sort):
items.extend(album.items())
# As we allow tags in the m3u names, we'll need to iterate through
# the items and generate the correct m3u file names.
for item in items:
m3u_name = item.evaluate_template(name, True)
m3u_name = sanitize_path(m3u_name, lib.replacements)
if m3u_name not in m3us:
m3us[m3u_name] = []
item_uri = item.path
if tpl:
item_uri = tpl.replace("$id", str(item.id)).encode("utf-8")
else:
if relative_to:
item_uri = os.path.relpath(item_uri, relative_to)
if self.config["forward_slash"].get():
item_uri = path_as_posix(item_uri)
if self.config["urlencode"]:
item_uri = bytestring_path(pathname2url(item_uri))
item_uri = prefix + item_uri
if item_uri not in m3us[m3u_name]:
m3us[m3u_name].append(PlaylistItem(item, item_uri))
if pretend and self.config["pretend_paths"]:
print(displayable_path(item_uri))
elif pretend:
print(item)
if not pretend:
# Write all of the accumulated track lists to files.
for m3u in m3us:
m3u_path = normpath(
os.path.join(playlist_dir, bytestring_path(m3u))
)
mkdirall(m3u_path)
pl_format = self.config["output"].get()
if pl_format != "m3u" and pl_format != "extm3u":
msg = "Unsupported output format '{}' provided! "
msg += "Supported: m3u, extm3u"
raise Exception(msg.format(pl_format))
extm3u = pl_format == "extm3u"
with open(syspath(m3u_path), "wb") as f:
keys = []
if extm3u:
keys = self.config["fields"].get(list)
f.write(b"#EXTM3U\n")
for entry in m3us[m3u]:
item = entry.item
comment = ""
if extm3u:
attr = [(k, entry.item[k]) for k in keys]
al = [
f' {key}="{quote(str(value), safe="/:")}"'
for key, value in attr
]
attrs = "".join(al)
comment = (
f"#EXTINF:{int(item.length)}{attrs},"
f"{item.artist} - {item.title}\n"
)
f.write(comment.encode("utf-8") + entry.uri + b"\n")
# Send an event when playlists were updated.
send_event("smartplaylist_update")
if pretend:
self._log.info(
"Displayed results for {} playlists",
len(self._matched_playlists),
)
else:
self._log.info("{} playlists updated", len(self._matched_playlists))
class PlaylistItem:
def __init__(self, item, uri):
self.item = item
self.uri = uri
beetbox-beets-c1877b7/beetsplug/sonosupdate.py 0000664 0000000 0000000 00000003110 15073551743 0021523 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2018, Tobias Sauerwein.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates a Sonos library whenever the beets library is changed.
This is based on the Kodi Update plugin.
"""
import soco
from beets.plugins import BeetsPlugin
class SonosUpdate(BeetsPlugin):
def __init__(self):
super().__init__()
self.register_listener("database_change", self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update"""
self.register_listener("cli_exit", self.update)
def update(self, lib):
"""When the client exists try to send refresh request to a Sonos
controller.
"""
self._log.info("Requesting a Sonos library update...")
device = soco.discovery.any_soco()
if device:
device.music_library.start_library_update()
else:
self._log.warning("Could not find a Sonos device.")
return
self._log.info("Sonos update triggered")
beetbox-beets-c1877b7/beetsplug/spotify.py 0000664 0000000 0000000 00000062555 15073551743 0020676 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2019, Rahul Ahuja.
# Copyright 2022, Alok Saboo.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Spotify release and track search support to the autotagger, along with
Spotify playlist construction.
"""
from __future__ import annotations
import base64
import collections
import json
import re
import time
import webbrowser
from typing import TYPE_CHECKING, Any, Literal, Sequence, Union
import confuse
import requests
from beets import ui
from beets.autotag.hooks import AlbumInfo, TrackInfo
from beets.dbcore import types
from beets.library import Library
from beets.metadata_plugins import (
IDResponse,
SearchApiMetadataSourcePlugin,
SearchFilter,
)
if TYPE_CHECKING:
from beets.library import Library
from beetsplug._typing import JSONDict
DEFAULT_WAITING_TIME = 5
class SearchResponseAlbums(IDResponse):
"""A response returned by the Spotify API.
We only use items and disregard the pagination information.
i.e. res["albums"]["items"][0].
There are more fields in the response, but we only type
the ones we currently use.
see https://developer.spotify.com/documentation/web-api/reference/search
"""
album_type: str
available_markets: Sequence[str]
name: str
class SearchResponseTracks(IDResponse):
"""A track response returned by the Spotify API."""
album: SearchResponseAlbums
available_markets: Sequence[str]
popularity: int
name: str
class APIError(Exception):
pass
class SpotifyPlugin(
SearchApiMetadataSourcePlugin[
Union[SearchResponseAlbums, SearchResponseTracks]
]
):
item_types = {
"spotify_track_popularity": types.INTEGER,
"spotify_acousticness": types.FLOAT,
"spotify_danceability": types.FLOAT,
"spotify_energy": types.FLOAT,
"spotify_instrumentalness": types.FLOAT,
"spotify_key": types.FLOAT,
"spotify_liveness": types.FLOAT,
"spotify_loudness": types.FLOAT,
"spotify_mode": types.INTEGER,
"spotify_speechiness": types.FLOAT,
"spotify_tempo": types.FLOAT,
"spotify_time_signature": types.INTEGER,
"spotify_valence": types.FLOAT,
"spotify_updated": types.DATE,
}
# Base URLs for the Spotify API
# Documentation: https://developer.spotify.com/web-api
oauth_token_url = "https://accounts.spotify.com/api/token"
open_track_url = "https://open.spotify.com/track/"
search_url = "https://api.spotify.com/v1/search"
album_url = "https://api.spotify.com/v1/albums/"
track_url = "https://api.spotify.com/v1/tracks/"
audio_features_url = "https://api.spotify.com/v1/audio-features/"
spotify_audio_features = {
"acousticness": "spotify_acousticness",
"danceability": "spotify_danceability",
"energy": "spotify_energy",
"instrumentalness": "spotify_instrumentalness",
"key": "spotify_key",
"liveness": "spotify_liveness",
"loudness": "spotify_loudness",
"mode": "spotify_mode",
"speechiness": "spotify_speechiness",
"tempo": "spotify_tempo",
"time_signature": "spotify_time_signature",
"valence": "spotify_valence",
}
def __init__(self):
super().__init__()
self.config.add(
{
"mode": "list",
"tiebreak": "popularity",
"show_failures": False,
"region_filter": None,
"regex": [],
"client_id": "4e414367a1d14c75a5c5129a627fcab8",
"client_secret": "f82bdc09b2254f1a8286815d02fd46dc",
"tokenfile": "spotify_token.json",
}
)
self.config["client_id"].redact = True
self.config["client_secret"].redact = True
self.setup()
def setup(self):
"""Retrieve previously saved OAuth token or generate a new one."""
try:
with open(self._tokenfile()) as f:
token_data = json.load(f)
except OSError:
self._authenticate()
else:
self.access_token = token_data["access_token"]
def _tokenfile(self) -> str:
"""Get the path to the JSON file for storing the OAuth token."""
return self.config["tokenfile"].get(confuse.Filename(in_app_dir=True))
def _authenticate(self) -> None:
"""Request an access token via the Client Credentials Flow:
https://developer.spotify.com/documentation/general/guides/authorization-guide/#client-credentials-flow
"""
c_id: str = self.config["client_id"].as_str()
c_secret: str = self.config["client_secret"].as_str()
headers = {
"Authorization": (
"Basic"
f" {base64.b64encode(f'{c_id}:{c_secret}'.encode()).decode()}"
)
}
response = requests.post(
self.oauth_token_url,
data={"grant_type": "client_credentials"},
headers=headers,
timeout=10,
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise ui.UserError(
f"Spotify authorization failed: {e}\n{response.text}"
)
self.access_token = response.json()["access_token"]
# Save the token for later use.
self._log.debug("{0.data_source} access token: {0.access_token}", self)
with open(self._tokenfile(), "w") as f:
json.dump({"access_token": self.access_token}, f)
def _handle_response(
self,
method: Literal["get", "post", "put", "delete"],
url: str,
params: Any = None,
retry_count: int = 0,
max_retries: int = 3,
) -> JSONDict:
"""Send a request, reauthenticating if necessary.
:param method: HTTP method to use for the request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) list of tuples or bytes to send
in the query string for the :class:`Request`.
:type params: dict
"""
if retry_count > max_retries:
raise APIError("Maximum retries reached.")
try:
response = requests.request(
method,
url,
headers={"Authorization": f"Bearer {self.access_token}"},
params=params,
timeout=10,
)
response.raise_for_status()
return response.json()
except requests.exceptions.ReadTimeout:
self._log.error("ReadTimeout.")
raise APIError("Request timed out.")
except requests.exceptions.ConnectionError as e:
self._log.error("Network error: {}", e)
raise APIError("Network error.")
except requests.exceptions.RequestException as e:
if e.response is None:
self._log.error("Request failed: {}", e)
raise APIError("Request failed.")
if e.response.status_code == 401:
self._log.debug(
"{.data_source} access token has expired. Reauthenticating.",
self,
)
self._authenticate()
return self._handle_response(
method,
url,
params=params,
retry_count=retry_count + 1,
)
elif e.response.status_code == 404:
raise APIError(
f"API Error: {e.response.status_code}\n"
f"URL: {url}\nparams: {params}"
)
elif e.response.status_code == 429:
seconds = e.response.headers.get(
"Retry-After", DEFAULT_WAITING_TIME
)
self._log.debug(
"Too many API requests. Retrying after {} seconds.", seconds
)
time.sleep(int(seconds) + 1)
return self._handle_response(
method,
url,
params=params,
retry_count=retry_count + 1,
)
elif e.response.status_code == 503:
self._log.error("Service Unavailable.")
raise APIError("Service Unavailable.")
elif e.response.status_code == 502:
self._log.error("Bad Gateway.")
raise APIError("Bad Gateway.")
elif e.response is not None:
raise APIError(
f"{self.data_source} API error:\n{e.response.text}\n"
f"URL:\n{url}\nparams:\n{params}"
)
else:
self._log.error("Request failed. Error: {}", e)
raise APIError("Request failed.")
def album_for_id(self, album_id: str) -> AlbumInfo | None:
"""Fetch an album by its Spotify ID or URL and return an
AlbumInfo object or None if the album is not found.
:param album_id: Spotify ID or URL for the album
:type album_id: str
:return: AlbumInfo object for album
:rtype: beets.autotag.hooks.AlbumInfo or None
"""
if not (spotify_id := self._extract_id(album_id)):
return None
album_data = self._handle_response(
"get", f"{self.album_url}{spotify_id}"
)
if album_data["name"] == "":
self._log.debug("Album removed from Spotify: {}", album_id)
return None
artist, artist_id = self.get_artist(album_data["artists"])
date_parts = [
int(part) for part in album_data["release_date"].split("-")
]
release_date_precision = album_data["release_date_precision"]
if release_date_precision == "day":
year, month, day = date_parts
elif release_date_precision == "month":
year, month = date_parts
day = None
elif release_date_precision == "year":
year = date_parts[0]
month = None
day = None
else:
raise ui.UserError(
"Invalid `release_date_precision` returned "
f"by {self.data_source} API: '{release_date_precision}'"
)
tracks_data = album_data["tracks"]
tracks_items = tracks_data["items"]
while tracks_data["next"]:
tracks_data = self._handle_response("get", tracks_data["next"])
tracks_items.extend(tracks_data["items"])
tracks = []
medium_totals: dict[int | None, int] = collections.defaultdict(int)
for i, track_data in enumerate(tracks_items, start=1):
track = self._get_track(track_data)
track.index = i
medium_totals[track.medium] += 1
tracks.append(track)
for track in tracks:
track.medium_total = medium_totals[track.medium]
return AlbumInfo(
album=album_data["name"],
album_id=spotify_id,
spotify_album_id=spotify_id,
artist=artist,
artist_id=artist_id,
spotify_artist_id=artist_id,
tracks=tracks,
albumtype=album_data["album_type"],
va=len(album_data["artists"]) == 1
and artist.lower() == "various artists",
year=year,
month=month,
day=day,
label=album_data["label"],
mediums=max(filter(None, medium_totals.keys())),
data_source=self.data_source,
data_url=album_data["external_urls"]["spotify"],
)
def _get_track(self, track_data: JSONDict) -> TrackInfo:
"""Convert a Spotify track object dict to a TrackInfo object.
:param track_data: Simplified track object
(https://developer.spotify.com/documentation/web-api/reference/object-model/#track-object-simplified)
:return: TrackInfo object for track
"""
artist, artist_id = self.get_artist(track_data["artists"])
# Get album information for spotify tracks
try:
album = track_data["album"]["name"]
except (KeyError, TypeError):
album = None
return TrackInfo(
title=track_data["name"],
track_id=track_data["id"],
spotify_track_id=track_data["id"],
artist=artist,
album=album,
artist_id=artist_id,
spotify_artist_id=artist_id,
length=track_data["duration_ms"] / 1000,
index=track_data["track_number"],
medium=track_data["disc_number"],
medium_index=track_data["track_number"],
data_source=self.data_source,
data_url=track_data["external_urls"]["spotify"],
)
def track_for_id(self, track_id: str) -> None | TrackInfo:
"""Fetch a track by its Spotify ID or URL.
Returns a TrackInfo object or None if the track is not found.
"""
if not (spotify_id := self._extract_id(track_id)):
self._log.debug("Invalid Spotify ID: {}", track_id)
return None
if not (
track_data := self._handle_response(
"get", f"{self.track_url}{spotify_id}"
)
):
self._log.debug("Track not found: {}", track_id)
return None
track = self._get_track(track_data)
# Get album's tracks to set `track.index` (position on the entire
# release) and `track.medium_total` (total number of tracks on
# the track's disc).
album_data = self._handle_response(
"get", f"{self.album_url}{track_data['album']['id']}"
)
medium_total = 0
for i, track_data in enumerate(album_data["tracks"]["items"], start=1):
if track_data["disc_number"] == track.medium:
medium_total += 1
if track_data["id"] == track.track_id:
track.index = i
track.medium_total = medium_total
return track
def _search_api(
self,
query_type: Literal["album", "track"],
filters: SearchFilter,
query_string: str = "",
) -> Sequence[SearchResponseAlbums | SearchResponseTracks]:
"""Query the Spotify Search API for the specified ``query_string``,
applying the provided ``filters``.
:param query_type: Item type to search across. Valid types are:
'album', 'artist', 'playlist', and 'track'.
:param filters: Field filters to apply.
:param query_string: Additional query to include in the search.
"""
query = self._construct_search_query(
filters=filters, query_string=query_string
)
self._log.debug("Searching {.data_source} for '{}'", self, query)
try:
response = self._handle_response(
"get",
self.search_url,
params={
"q": query,
"type": query_type,
"limit": self.config["search_limit"].get(),
},
)
except APIError as e:
self._log.debug("Spotify API error: {}", e)
return ()
response_data = response.get(f"{query_type}s", {}).get("items", [])
self._log.debug(
"Found {} result(s) from {.data_source} for '{}'",
len(response_data),
self,
query,
)
return response_data
def commands(self) -> list[ui.Subcommand]:
# autotagger import command
def queries(lib, opts, args):
success = self._parse_opts(opts)
if success:
results = self._match_library_tracks(lib, args)
self._output_match_results(results)
spotify_cmd = ui.Subcommand(
"spotify", help=f"build a {self.data_source} playlist"
)
spotify_cmd.parser.add_option(
"-m",
"--mode",
action="store",
help=(
f'"open" to open {self.data_source} with playlist, '
'"list" to print (default)'
),
)
spotify_cmd.parser.add_option(
"-f",
"--show-failures",
action="store_true",
dest="show_failures",
help=f"list tracks that did not match a {self.data_source} ID",
)
spotify_cmd.func = queries
# spotifysync command
sync_cmd = ui.Subcommand(
"spotifysync", help="fetch track attributes from Spotify"
)
sync_cmd.parser.add_option(
"-f",
"--force",
dest="force_refetch",
action="store_true",
default=False,
help="re-download data when already present",
)
def func(lib, opts, args):
items = lib.items(args)
self._fetch_info(items, ui.should_write(), opts.force_refetch)
sync_cmd.func = func
return [spotify_cmd, sync_cmd]
def _parse_opts(self, opts):
if opts.mode:
self.config["mode"].set(opts.mode)
if opts.show_failures:
self.config["show_failures"].set(True)
if self.config["mode"].get() not in ["list", "open"]:
self._log.warning(
"{} is not a valid mode", self.config["mode"].get()
)
return False
self.opts = opts
return True
def _match_library_tracks(self, library: Library, keywords: str):
"""Get a list of simplified track object dicts for library tracks
matching the specified ``keywords``.
:param library: beets library object to query.
:param keywords: Query to match library items against.
:return: List of simplified track object dicts for library items
matching the specified query.
"""
results = []
failures = []
items = library.items(keywords)
if not items:
self._log.debug(
"Your beets query returned no items, skipping {.data_source}.",
self,
)
return
self._log.info("Processing {} tracks...", len(items))
for item in items:
# Apply regex transformations if provided
for regex in self.config["regex"].get():
if (
not regex["field"]
or not regex["search"]
or not regex["replace"]
):
continue
value = item[regex["field"]]
item[regex["field"]] = re.sub(
regex["search"], regex["replace"], value
)
artist = item["artist"] or item["albumartist"]
album = item["album"]
query_string = item["title"]
# Query the Web API for each track, look for the items' JSON data
query_filters: SearchFilter = {}
if artist:
query_filters["artist"] = artist
if album:
query_filters["album"] = album
response_data_tracks = self._search_api(
query_type="track",
query_string=query_string,
filters=query_filters,
)
if not response_data_tracks:
query = self._construct_search_query(
query_string=query_string, filters=query_filters
)
failures.append(query)
continue
# Apply market filter if requested
region_filter: str = self.config["region_filter"].get()
if region_filter:
response_data_tracks = [
track_data
for track_data in response_data_tracks
if region_filter in track_data["available_markets"]
]
if (
len(response_data_tracks) == 1
or self.config["tiebreak"].get() == "first"
):
self._log.debug(
"{.data_source} track(s) found, count: {}",
self,
len(response_data_tracks),
)
chosen_result = response_data_tracks[0]
else:
# Use the popularity filter
self._log.debug(
"Most popular track chosen, count: {}",
len(response_data_tracks),
)
chosen_result = max(
response_data_tracks,
key=lambda x: x[
# We are sure this is a track response!
"popularity" # type: ignore[typeddict-item]
],
)
results.append(chosen_result)
failure_count = len(failures)
if failure_count > 0:
if self.config["show_failures"].get():
self._log.info(
"{} track(s) did not match a {.data_source} ID:",
failure_count,
self,
)
for track in failures:
self._log.info("track: {}", track)
self._log.info("")
else:
self._log.warning(
"{} track(s) did not match a {.data_source} ID:\n"
"use --show-failures to display",
failure_count,
self,
)
return results
def _output_match_results(self, results):
"""Open a playlist or print Spotify URLs for the provided track
object dicts.
:param results: List of simplified track object dicts
(https://developer.spotify.com/documentation/web-api/reference/object-model/#track-object-simplified)
:type results: list[dict]
"""
if results:
spotify_ids = [track_data["id"] for track_data in results]
if self.config["mode"].get() == "open":
self._log.info(
"Attempting to open {.data_source} with playlist", self
)
spotify_url = (
f"spotify:trackset:Playlist:{','.join(spotify_ids)}"
)
webbrowser.open(spotify_url)
else:
for spotify_id in spotify_ids:
print(f"{self.open_track_url}{spotify_id}")
else:
self._log.warning(
"No {.data_source} tracks found from beets query", self
)
def _fetch_info(self, items, write, force):
"""Obtain track information from Spotify."""
self._log.debug("Total {} tracks", len(items))
for index, item in enumerate(items, start=1):
self._log.info(
"Processing {}/{} tracks - {} ", index, len(items), item
)
# If we're not forcing re-downloading for all tracks, check
# whether the popularity data is already present
if not force:
if "spotify_track_popularity" in item:
self._log.debug("Popularity already present for: {}", item)
continue
try:
spotify_track_id = item.spotify_track_id
except AttributeError:
self._log.debug("No track_id present for: {}", item)
continue
popularity, isrc, ean, upc = self.track_info(spotify_track_id)
item["spotify_track_popularity"] = popularity
item["isrc"] = isrc
item["ean"] = ean
item["upc"] = upc
audio_features = self.track_audio_features(spotify_track_id)
if audio_features is None:
self._log.info("No audio features found for: {}", item)
else:
for feature, value in audio_features.items():
if feature in self.spotify_audio_features:
item[self.spotify_audio_features[feature]] = value
item["spotify_updated"] = time.time()
item.store()
if write:
item.try_write()
def track_info(self, track_id: str):
"""Fetch a track's popularity and external IDs using its Spotify ID."""
track_data = self._handle_response("get", f"{self.track_url}{track_id}")
external_ids = track_data.get("external_ids", {})
popularity = track_data.get("popularity")
self._log.debug(
"track_popularity: {} and track_isrc: {}",
popularity,
external_ids.get("isrc"),
)
return (
popularity,
external_ids.get("isrc"),
external_ids.get("ean"),
external_ids.get("upc"),
)
def track_audio_features(self, track_id: str):
"""Fetch track audio features by its Spotify ID."""
try:
return self._handle_response(
"get", f"{self.audio_features_url}{track_id}"
)
except APIError as e:
self._log.debug("Spotify API error: {}", e)
return None
beetbox-beets-c1877b7/beetsplug/subsonicplaylist.py 0000664 0000000 0000000 00000014505 15073551743 0022600 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2019, Joris Jensen
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import random
import string
from hashlib import md5
from urllib.parse import urlencode
from xml.etree import ElementTree
import requests
from beets.dbcore import AndQuery
from beets.dbcore.query import MatchQuery
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
__author__ = "https://github.com/MrNuggelz"
def filter_to_be_removed(items, keys):
if len(items) > len(keys):
dont_remove = []
for artist, album, title in keys:
for item in items:
if (
artist == item["artist"]
and album == item["album"]
and title == item["title"]
):
dont_remove.append(item)
return [item for item in items if item not in dont_remove]
else:
def to_be_removed(item):
for artist, album, title in keys:
if (
artist == item["artist"]
and album == item["album"]
and title == item["title"]
):
return False
return True
return [item for item in items if to_be_removed(item)]
class SubsonicPlaylistPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"delete": False,
"playlist_ids": [],
"playlist_names": [],
"username": "",
"password": "",
}
)
self.config["password"].redact = True
def update_tags(self, playlist_dict, lib):
with lib.transaction():
for query, playlist_tag in playlist_dict.items():
query = AndQuery(
[
MatchQuery("artist", query[0]),
MatchQuery("album", query[1]),
MatchQuery("title", query[2]),
]
)
items = lib.items(query)
if not items:
self._log.warn(
"{} | track not found ({})", playlist_tag, query
)
continue
for item in items:
item.subsonic_playlist = playlist_tag
item.try_sync(write=True, move=False)
def get_playlist(self, playlist_id):
xml = self.send("getPlaylist", {"id": playlist_id}).text
playlist = ElementTree.fromstring(xml)[0]
if playlist.attrib.get("code", "200") != "200":
alt_error = "error getting playlist, but no error message found"
self._log.warn(playlist.attrib.get("message", alt_error))
return
name = playlist.attrib.get("name", "undefined")
tracks = [
(t.attrib["artist"], t.attrib["album"], t.attrib["title"])
for t in playlist
]
return name, tracks
def commands(self):
def build_playlist(lib, opts, args):
self.config.set_args(opts)
ids = self.config["playlist_ids"].as_str_seq()
if self.config["playlist_names"].as_str_seq():
playlists = ElementTree.fromstring(
self.send("getPlaylists").text
)[0]
if playlists.attrib.get("code", "200") != "200":
alt_error = (
"error getting playlists, but no error message found"
)
self._log.warn(playlists.attrib.get("message", alt_error))
return
for name in self.config["playlist_names"].as_str_seq():
for playlist in playlists:
if name == playlist.attrib["name"]:
ids.append(playlist.attrib["id"])
playlist_dict = self.get_playlists(ids)
# delete old tags
if self.config["delete"]:
existing = list(lib.items('subsonic_playlist:";"'))
to_be_removed = filter_to_be_removed(
existing, playlist_dict.keys()
)
for item in to_be_removed:
item["subsonic_playlist"] = ""
with lib.transaction():
item.try_sync(write=True, move=False)
self.update_tags(playlist_dict, lib)
subsonicplaylist_cmds = Subcommand(
"subsonicplaylist", help="import a subsonic playlist"
)
subsonicplaylist_cmds.parser.add_option(
"-d",
"--delete",
action="store_true",
help="delete tag from items not in any playlist anymore",
)
subsonicplaylist_cmds.func = build_playlist
return [subsonicplaylist_cmds]
def generate_token(self):
salt = "".join(random.choices(string.ascii_lowercase + string.digits))
return (
md5((self.config["password"].get() + salt).encode()).hexdigest(),
salt,
)
def send(self, endpoint, params=None):
if params is None:
params = {}
a, b = self.generate_token()
params["u"] = self.config["username"]
params["t"] = a
params["s"] = b
params["v"] = "1.12.0"
params["c"] = "beets"
resp = requests.get(
f"{self.config['base_url'].get()}/rest/{endpoint}?{urlencode(params)}",
timeout=10,
)
return resp
def get_playlists(self, ids):
output = {}
for playlist_id in ids:
name, tracks = self.get_playlist(playlist_id)
for track in tracks:
if track not in output:
output[track] = ";"
output[track] += f"{name};"
return output
beetbox-beets-c1877b7/beetsplug/subsonicupdate.py 0000664 0000000 0000000 00000012242 15073551743 0022215 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates Subsonic library on Beets import
Your Beets configuration file should contain
a "subsonic" section like the following:
subsonic:
url: https://mydomain.com:443/subsonic
user: username
pass: password
auth: token
For older Subsonic versions, token authentication
is not supported, use password instead:
subsonic:
url: https://mydomain.com:443/subsonic
user: username
pass: password
auth: pass
"""
import hashlib
import random
import string
from binascii import hexlify
import requests
from beets.plugins import BeetsPlugin
__author__ = "https://github.com/maffo999"
class SubsonicUpdate(BeetsPlugin):
def __init__(self):
super().__init__("subsonic")
# Set default configuration values
self.config.add(
{
"user": "admin",
"pass": "admin",
"url": "http://localhost:4040",
"auth": "token",
}
)
self.config["user"].redact = True
self.config["pass"].redact = True
self.register_listener("database_change", self.db_change)
self.register_listener("smartplaylist_update", self.spl_update)
def db_change(self, lib, model):
self.register_listener("cli_exit", self.start_scan)
def spl_update(self):
self.register_listener("cli_exit", self.start_scan)
def __create_token(self):
"""Create salt and token from given password.
:return: The generated salt and hashed token
"""
password = self.config["pass"].as_str()
# Pick the random sequence and salt the password
r = string.ascii_letters + string.digits
salt = "".join([random.choice(r) for _ in range(6)])
salted_password = f"{password}{salt}"
token = hashlib.md5(salted_password.encode("utf-8")).hexdigest()
# Put together the payload of the request to the server and the URL
return salt, token
def __format_url(self, endpoint):
"""Get the Subsonic URL to trigger the given endpoint.
Uses either the url config option or the deprecated host, port,
and context_path config options together.
:return: Endpoint for updating Subsonic
"""
url = self.config["url"].as_str()
if url and url.endswith("/"):
url = url[:-1]
# @deprecated("Use url config option instead")
if not url:
host = self.config["host"].as_str()
port = self.config["port"].get(int)
context_path = self.config["contextpath"].as_str()
if context_path == "/":
context_path = ""
url = f"http://{host}:{port}{context_path}"
return f"{url}/rest/{endpoint}"
def start_scan(self):
user = self.config["user"].as_str()
auth = self.config["auth"].as_str()
url = self.__format_url("startScan")
self._log.debug("URL is {}", url)
self._log.debug("auth type is {.config[auth]}", self)
if auth == "token":
salt, token = self.__create_token()
payload = {
"u": user,
"t": token,
"s": salt,
"v": "1.13.0", # Subsonic 5.3 and newer
"c": "beets",
"f": "json",
}
elif auth == "password":
password = self.config["pass"].as_str()
encpass = hexlify(password.encode()).decode()
payload = {
"u": user,
"p": f"enc:{encpass}",
"v": "1.12.0",
"c": "beets",
"f": "json",
}
else:
return
try:
response = requests.get(
url,
params=payload,
timeout=10,
)
json = response.json()
if (
response.status_code == 200
and json["subsonic-response"]["status"] == "ok"
):
count = json["subsonic-response"]["scanStatus"]["count"]
self._log.info("Updating Subsonic; scanning {} tracks", count)
elif (
response.status_code == 200
and json["subsonic-response"]["status"] == "failed"
):
self._log.error(
"Error: {[subsonic-response][error][message]}", json
)
else:
self._log.error("Error: {}", json)
except Exception as error:
self._log.error("Error: {}", error)
beetbox-beets-c1877b7/beetsplug/substitute.py 0000664 0000000 0000000 00000003355 15073551743 0021405 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2023, Daniele Ferone.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The substitute plugin module.
Uses user-specified substitution rules to canonicalize names for path formats.
"""
import re
from beets.plugins import BeetsPlugin
class Substitute(BeetsPlugin):
"""The substitute plugin class.
Create a template field function that substitute the given field with the
given substitution rules. ``rules`` must be a list of (pattern,
replacement) pairs.
"""
def tmpl_substitute(self, text):
"""Do the actual replacing."""
if text:
for pattern, replacement in self.substitute_rules:
text = pattern.sub(replacement, text)
return text
else:
return ""
def __init__(self):
"""Initialize the substitute plugin.
Get the configuration, register template function and create list of
substitute rules.
"""
super().__init__()
self.template_funcs["substitute"] = self.tmpl_substitute
self.substitute_rules = [
(re.compile(key, flags=re.IGNORECASE), value)
for key, value in self.config.flatten().items()
]
beetbox-beets-c1877b7/beetsplug/the.py 0000664 0000000 0000000 00000006224 15073551743 0017750 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Blemjhoo Tezoulbr .
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Moves patterns in path formats (suitable for moving articles)."""
import re
from beets.plugins import BeetsPlugin
__author__ = "baobab@heresiarch.info"
__version__ = "1.1"
PATTERN_THE = "^the\\s"
PATTERN_A = "^[a][n]?\\s"
FORMAT = "{}, {}"
class ThePlugin(BeetsPlugin):
patterns: list[str] = []
def __init__(self):
super().__init__()
self.template_funcs["the"] = self.the_template_func
self.config.add(
{
"the": True,
"a": True,
"format": "{}, {}",
"strip": False,
"patterns": [],
}
)
self.patterns = self.config["patterns"].as_str_seq()
for p in self.patterns:
if p:
try:
re.compile(p)
except re.error:
self._log.error("invalid pattern: {}", p)
else:
if not (p.startswith("^") or p.endswith("$")):
self._log.warning(
'warning: "{}" will not match string start/end',
p,
)
if self.config["a"]:
self.patterns = [PATTERN_A] + self.patterns
if self.config["the"]:
self.patterns = [PATTERN_THE] + self.patterns
if not self.patterns:
self._log.warning("no patterns defined!")
def unthe(self, text, pattern):
"""Moves pattern in the path format string or strips it
text -- text to handle
pattern -- regexp pattern (case ignore is already on)
strip -- if True, pattern will be removed
"""
if text:
r = re.compile(pattern, flags=re.IGNORECASE)
try:
t = r.findall(text)[0]
except IndexError:
return text
else:
r = re.sub(r, "", text).strip()
if self.config["strip"]:
return r
else:
fmt = self.config["format"].as_str()
return fmt.format(r, t.strip()).strip()
else:
return ""
def the_template_func(self, text):
if not self.patterns:
return text
if text:
for p in self.patterns:
r = self.unthe(text, p)
if r != text:
self._log.debug('"{}" -> "{}"', text, r)
break
return r
else:
return ""
beetbox-beets-c1877b7/beetsplug/thumbnails.py 0000664 0000000 0000000 00000022762 15073551743 0021343 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Bruno Cauet
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Create freedesktop.org-compliant thumbnails for album folders
This plugin is POSIX-only.
Spec: standards.freedesktop.org/thumbnail-spec/latest/index.html
"""
import ctypes
import ctypes.util
import os
import shutil
from hashlib import md5
from pathlib import PurePosixPath
from xdg import BaseDirectory
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets.util import bytestring_path, displayable_path, syspath
from beets.util.artresizer import ArtResizer
BASE_DIR = os.path.join(BaseDirectory.xdg_cache_home, "thumbnails")
NORMAL_DIR = bytestring_path(os.path.join(BASE_DIR, "normal"))
LARGE_DIR = bytestring_path(os.path.join(BASE_DIR, "large"))
class ThumbnailsPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"auto": True,
"force": False,
"dolphin": False,
}
)
if self.config["auto"] and self._check_local_ok():
self.register_listener("art_set", self.process_album)
def commands(self):
thumbnails_command = Subcommand(
"thumbnails", help="Create album thumbnails"
)
thumbnails_command.parser.add_option(
"-f",
"--force",
dest="force",
action="store_true",
default=False,
help="force regeneration of thumbnails deemed fine (existing & "
"recent enough)",
)
thumbnails_command.parser.add_option(
"--dolphin",
dest="dolphin",
action="store_true",
default=False,
help="create Dolphin-compatible thumbnail information (for KDE)",
)
thumbnails_command.func = self.process_query
return [thumbnails_command]
def process_query(self, lib, opts, args):
self.config.set_args(opts)
if self._check_local_ok():
for album in lib.albums(args):
self.process_album(album)
def _check_local_ok(self):
"""Check that everything is ready:
- local capability to resize images
- thumbnail dirs exist (create them if needed)
- detect whether we'll use PIL or IM
- detect whether we'll use GIO or Python to get URIs
"""
if not ArtResizer.shared.local:
self._log.warning(
"No local image resizing capabilities, "
"cannot generate thumbnails"
)
return False
for dir in (NORMAL_DIR, LARGE_DIR):
if not os.path.exists(syspath(dir)):
os.makedirs(syspath(dir))
if not ArtResizer.shared.can_write_metadata:
raise RuntimeError(
f"Thumbnails: ArtResizer backend {ArtResizer.shared.method}"
f" unexpectedly cannot write image metadata."
)
self._log.debug("using {.shared.method} to write metadata", ArtResizer)
uri_getter = GioURI()
if not uri_getter.available:
uri_getter = PathlibURI()
self._log.debug("using {.name} to compute URIs", uri_getter)
self.get_uri = uri_getter.uri
return True
def process_album(self, album):
"""Produce thumbnails for the album folder."""
self._log.debug("generating thumbnail for {}", album)
if not album.artpath:
self._log.info("album {} has no art", album)
return
if self.config["dolphin"]:
self.make_dolphin_cover_thumbnail(album)
size = ArtResizer.shared.get_size(album.artpath)
if not size:
self._log.warning(
"problem getting the picture size for {.artpath}", album
)
return
wrote = True
if max(size) >= 256:
wrote &= self.make_cover_thumbnail(album, 256, LARGE_DIR)
wrote &= self.make_cover_thumbnail(album, 128, NORMAL_DIR)
if wrote:
self._log.info("wrote thumbnail for {}", album)
else:
self._log.info("nothing to do for {}", album)
def make_cover_thumbnail(self, album, size, target_dir):
"""Make a thumbnail of given size for `album` and put it in
`target_dir`.
"""
target = os.path.join(target_dir, self.thumbnail_file_name(album.path))
if (
os.path.exists(syspath(target))
and os.stat(syspath(target)).st_mtime
> os.stat(syspath(album.artpath)).st_mtime
):
if self.config["force"]:
self._log.debug(
"found a suitable {0}x{0} thumbnail for {1}, "
"forcing regeneration",
size,
album,
)
else:
self._log.debug(
"{0}x{0} thumbnail for {1} exists and is recent enough",
size,
album,
)
return False
resized = ArtResizer.shared.resize(size, album.artpath, target)
self.add_tags(album, resized)
shutil.move(syspath(resized), syspath(target))
return True
def thumbnail_file_name(self, path):
"""Compute the thumbnail file name
See https://standards.freedesktop.org/thumbnail-spec/latest/x227.html
"""
uri = self.get_uri(path)
hash = md5(uri.encode("utf-8")).hexdigest()
return bytestring_path(f"{hash}.png")
def add_tags(self, album, image_path):
"""Write required metadata to the thumbnail
See https://standards.freedesktop.org/thumbnail-spec/latest/x142.html
"""
mtime = os.stat(syspath(album.artpath)).st_mtime
metadata = {
"Thumb::URI": self.get_uri(album.artpath),
"Thumb::MTime": str(mtime),
}
try:
ArtResizer.shared.write_metadata(image_path, metadata)
except Exception:
self._log.exception(
"could not write metadata to {}", displayable_path(image_path)
)
def make_dolphin_cover_thumbnail(self, album):
outfilename = os.path.join(album.path, b".directory")
if os.path.exists(syspath(outfilename)):
return
artfile = os.path.split(album.artpath)[1]
with open(syspath(outfilename), "w") as f:
f.write("[Desktop Entry]\n")
f.write(f"Icon=./{artfile.decode('utf-8')}")
f.close()
self._log.debug("Wrote file {}", displayable_path(outfilename))
class URIGetter:
available = False
name = "Abstract base"
def uri(self, path):
raise NotImplementedError()
class PathlibURI(URIGetter):
available = True
name = "Python Pathlib"
def uri(self, path):
return PurePosixPath(os.fsdecode(path)).as_uri()
def copy_c_string(c_string):
"""Copy a `ctypes.POINTER(ctypes.c_char)` value into a new Python
string and return it. The old memory is then safe to free.
"""
# This is a pretty dumb way to get a string copy, but it seems to
# work. A more surefire way would be to allocate a ctypes buffer and copy
# the data with `memcpy` or somesuch.
return ctypes.cast(c_string, ctypes.c_char_p).value
class GioURI(URIGetter):
"""Use gio URI function g_file_get_uri. Paths must be utf-8 encoded."""
name = "GIO"
def __init__(self):
self.libgio = self.get_library()
self.available = bool(self.libgio)
if self.available:
self.libgio.g_type_init() # for glib < 2.36
self.libgio.g_file_new_for_path.argtypes = [ctypes.c_char_p]
self.libgio.g_file_new_for_path.restype = ctypes.c_void_p
self.libgio.g_file_get_uri.argtypes = [ctypes.c_void_p]
self.libgio.g_file_get_uri.restype = ctypes.POINTER(ctypes.c_char)
self.libgio.g_object_unref.argtypes = [ctypes.c_void_p]
def get_library(self):
lib_name = ctypes.util.find_library("gio-2")
try:
if not lib_name:
return False
return ctypes.cdll.LoadLibrary(lib_name)
except OSError:
return False
def uri(self, path):
g_file_ptr = self.libgio.g_file_new_for_path(path)
if not g_file_ptr:
raise RuntimeError(
f"No gfile pointer received for {displayable_path(path)}"
)
try:
uri_ptr = self.libgio.g_file_get_uri(g_file_ptr)
finally:
self.libgio.g_object_unref(g_file_ptr)
if not uri_ptr:
self.libgio.g_free(uri_ptr)
raise RuntimeError(
f"No URI received from the gfile pointer for {displayable_path(path)}"
)
try:
uri = copy_c_string(uri_ptr)
finally:
self.libgio.g_free(uri_ptr)
try:
return os.fsdecode(uri)
except UnicodeDecodeError:
raise RuntimeError(f"Could not decode filename from GIO: {uri!r}")
beetbox-beets-c1877b7/beetsplug/types.py 0000664 0000000 0000000 00000003055 15073551743 0020333 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from confuse import ConfigValueError
from beets.dbcore import types
from beets.plugins import BeetsPlugin
class TypesPlugin(BeetsPlugin):
@property
def item_types(self):
return self._types()
@property
def album_types(self):
return self._types()
def _types(self):
if not self.config.exists():
return {}
mytypes = {}
for key, value in self.config.items():
if value.get() == "int":
mytypes[key] = types.INTEGER
elif value.get() == "float":
mytypes[key] = types.FLOAT
elif value.get() == "bool":
mytypes[key] = types.BOOLEAN
elif value.get() == "date":
mytypes[key] = types.DATE
else:
raise ConfigValueError(
f"unknown type '{value}' for the '{key}' field"
)
return mytypes
beetbox-beets-c1877b7/beetsplug/unimported.py 0000664 0000000 0000000 00000004670 15073551743 0021361 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2019, Joris Jensen
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""
List all files in the library folder which are not listed in the
beets library database, including art files
"""
import os
from beets import util
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, print_
__author__ = "https://github.com/MrNuggelz"
class Unimported(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add({"ignore_extensions": [], "ignore_subdirectories": []})
def commands(self):
def print_unimported(lib, opts, args):
ignore_exts = [
f".{x}".encode()
for x in self.config["ignore_extensions"].as_str_seq()
]
ignore_dirs = [
os.path.join(lib.directory, x.encode())
for x in self.config["ignore_subdirectories"].as_str_seq()
]
in_folder = set()
for root, _, files in os.walk(lib.directory):
# do not traverse if root is a child of an ignored directory
if any(root.startswith(ignored) for ignored in ignore_dirs):
continue
for file in files:
# ignore files with ignored extensions
if any(file.endswith(ext) for ext in ignore_exts):
continue
in_folder.add(os.path.join(root, file))
in_library = {x.path for x in lib.items()}
art_files = {x.artpath for x in lib.albums()}
for f in in_folder - in_library - art_files:
print_(util.displayable_path(f))
unimported = Subcommand(
"unimported",
help="list all files in the library folder which are not listed"
" in the beets library database",
)
unimported.func = print_unimported
return [unimported]
beetbox-beets-c1877b7/beetsplug/web/ 0000775 0000000 0000000 00000000000 15073551743 0017367 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beetsplug/web/__init__.py 0000664 0000000 0000000 00000036113 15073551743 0021504 0 ustar 00root root 0000000 0000000 # This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A Web interface to beets."""
import base64
import json
import os
import flask
from flask import g, jsonify
from unidecode import unidecode
from werkzeug.routing import BaseConverter, PathConverter
import beets.library
from beets import ui, util
from beets.dbcore.query import PathQuery
from beets.plugins import BeetsPlugin
# Utilities.
def _rep(obj, expand=False):
"""Get a flat -- i.e., JSON-ish -- representation of a beets Item or
Album object. For Albums, `expand` dictates whether tracks are
included.
"""
out = dict(obj)
if isinstance(obj, beets.library.Item):
if app.config.get("INCLUDE_PATHS", False):
out["path"] = util.displayable_path(out["path"])
else:
del out["path"]
# Filter all bytes attributes and convert them to strings.
for key, value in out.items():
if isinstance(out[key], bytes):
out[key] = base64.b64encode(value).decode("ascii")
# Get the size (in bytes) of the backing file. This is useful
# for the Tomahawk resolver API.
try:
out["size"] = os.path.getsize(util.syspath(obj.path))
except OSError:
out["size"] = 0
return out
elif isinstance(obj, beets.library.Album):
if app.config.get("INCLUDE_PATHS", False):
out["artpath"] = util.displayable_path(out["artpath"])
else:
del out["artpath"]
if expand:
out["items"] = [_rep(item) for item in obj.items()]
return out
def json_generator(items, root, expand=False):
"""Generator that dumps list of beets Items or Albums as JSON
:param root: root key for JSON
:param items: list of :class:`Item` or :class:`Album` to dump
:param expand: If true every :class:`Album` contains its items in the json
representation
:returns: generator that yields strings
"""
yield f'{{"{root}":['
first = True
for item in items:
if first:
first = False
else:
yield ","
yield json.dumps(_rep(item, expand=expand))
yield "]}"
def is_expand():
"""Returns whether the current request is for an expanded response."""
return flask.request.args.get("expand") is not None
def is_delete():
"""Returns whether the current delete request should remove the selected
files.
"""
return flask.request.args.get("delete") is not None
def get_method():
"""Returns the HTTP method of the current request."""
return flask.request.method
def resource(name, patchable=False):
"""Decorates a function to handle RESTful HTTP requests for a resource."""
def make_responder(retriever):
def responder(ids):
entities = [retriever(id) for id in ids]
entities = [entity for entity in entities if entity]
if get_method() == "DELETE":
if app.config.get("READONLY", True):
return flask.abort(405)
for entity in entities:
entity.remove(delete=is_delete())
return flask.make_response(jsonify({"deleted": True}), 200)
elif get_method() == "PATCH" and patchable:
if app.config.get("READONLY", True):
return flask.abort(405)
for entity in entities:
entity.update(flask.request.get_json())
entity.try_sync(True, False) # write, don't move
if len(entities) == 1:
return flask.jsonify(_rep(entities[0], expand=is_expand()))
elif entities:
return app.response_class(
json_generator(entities, root=name),
mimetype="application/json",
)
elif get_method() == "GET":
if len(entities) == 1:
return flask.jsonify(_rep(entities[0], expand=is_expand()))
elif entities:
return app.response_class(
json_generator(entities, root=name),
mimetype="application/json",
)
else:
return flask.abort(404)
else:
return flask.abort(405)
responder.__name__ = f"get_{name}"
return responder
return make_responder
def resource_query(name, patchable=False):
"""Decorates a function to handle RESTful HTTP queries for resources."""
def make_responder(query_func):
def responder(queries):
entities = query_func(queries)
if get_method() == "DELETE":
if app.config.get("READONLY", True):
return flask.abort(405)
for entity in entities:
entity.remove(delete=is_delete())
return flask.make_response(jsonify({"deleted": True}), 200)
elif get_method() == "PATCH" and patchable:
if app.config.get("READONLY", True):
return flask.abort(405)
for entity in entities:
entity.update(flask.request.get_json())
entity.try_sync(True, False) # write, don't move
return app.response_class(
json_generator(entities, root=name),
mimetype="application/json",
)
elif get_method() == "GET":
return app.response_class(
json_generator(
entities, root="results", expand=is_expand()
),
mimetype="application/json",
)
else:
return flask.abort(405)
responder.__name__ = f"query_{name}"
return responder
return make_responder
def resource_list(name):
"""Decorates a function to handle RESTful HTTP request for a list of
resources.
"""
def make_responder(list_all):
def responder():
return app.response_class(
json_generator(list_all(), root=name, expand=is_expand()),
mimetype="application/json",
)
responder.__name__ = f"all_{name}"
return responder
return make_responder
def _get_unique_table_field_values(model, field, sort_field):
"""retrieve all unique values belonging to a key from a model"""
if field not in model.all_keys() or sort_field not in model.all_keys():
raise KeyError
with g.lib.transaction() as tx:
rows = tx.query(
f"SELECT DISTINCT '{field}' FROM '{model._table}' ORDER BY '{sort_field}'"
)
return [row[0] for row in rows]
class IdListConverter(BaseConverter):
"""Converts comma separated lists of ids in urls to integer lists."""
def to_python(self, value):
ids = []
for id in value.split(","):
try:
ids.append(int(id))
except ValueError:
pass
return ids
def to_url(self, value):
return ",".join(str(v) for v in value)
class QueryConverter(PathConverter):
"""Converts slash separated lists of queries in the url to string list."""
def to_python(self, value):
queries = value.split("/")
"""Do not do path substitution on regex value tests"""
return [
query if "::" in query else query.replace("\\", os.sep)
for query in queries
]
def to_url(self, value):
return "/".join([v.replace(os.sep, "\\") for v in value])
class EverythingConverter(PathConverter):
part_isolating = False
regex = ".*?"
# Flask setup.
app = flask.Flask(__name__)
app.url_map.converters["idlist"] = IdListConverter
app.url_map.converters["query"] = QueryConverter
app.url_map.converters["everything"] = EverythingConverter
@app.before_request
def before_request():
g.lib = app.config["lib"]
# Items.
@app.route("/item/", methods=["GET", "DELETE", "PATCH"])
@resource("items", patchable=True)
def get_item(id):
return g.lib.get_item(id)
@app.route("/item/")
@app.route("/item/query/")
@resource_list("items")
def all_items():
return g.lib.items()
@app.route("/item//file")
def item_file(item_id):
item = g.lib.get_item(item_id)
item_path = util.syspath(item.path)
base_filename = os.path.basename(item_path)
try:
# Imitate http.server behaviour
base_filename.encode("latin-1", "strict")
except UnicodeError:
safe_filename = unidecode(base_filename)
else:
safe_filename = base_filename
response = flask.send_file(
item_path, as_attachment=True, download_name=safe_filename
)
return response
@app.route("/item/query/", methods=["GET", "DELETE", "PATCH"])
@resource_query("items", patchable=True)
def item_query(queries):
return g.lib.items(queries)
@app.route("/item/path/")
def item_at_path(path):
query = PathQuery("path", path.encode("utf-8"))
item = g.lib.items(query).get()
if item:
return flask.jsonify(_rep(item))
else:
return flask.abort(404)
@app.route("/item/values/")
def item_unique_field_values(key):
sort_key = flask.request.args.get("sort_key", key)
try:
values = _get_unique_table_field_values(
beets.library.Item, key, sort_key
)
except KeyError:
return flask.abort(404)
return flask.jsonify(values=values)
# Albums.
@app.route("/album/", methods=["GET", "DELETE"])
@resource("albums")
def get_album(id):
return g.lib.get_album(id)
@app.route("/album/")
@app.route("/album/query/")
@resource_list("albums")
def all_albums():
return g.lib.albums()
@app.route("/album/query/", methods=["GET", "DELETE"])
@resource_query("albums")
def album_query(queries):
return g.lib.albums(queries)
@app.route("/album//art")
def album_art(album_id):
album = g.lib.get_album(album_id)
if album and album.artpath:
return flask.send_file(album.artpath.decode())
else:
return flask.abort(404)
@app.route("/album/values/")
def album_unique_field_values(key):
sort_key = flask.request.args.get("sort_key", key)
try:
values = _get_unique_table_field_values(
beets.library.Album, key, sort_key
)
except KeyError:
return flask.abort(404)
return flask.jsonify(values=values)
# Artists.
@app.route("/artist/")
def all_artists():
with g.lib.transaction() as tx:
rows = tx.query("SELECT DISTINCT albumartist FROM albums")
all_artists = [row[0] for row in rows]
return flask.jsonify(artist_names=all_artists)
# Library information.
@app.route("/stats")
def stats():
with g.lib.transaction() as tx:
item_rows = tx.query("SELECT COUNT(*) FROM items")
album_rows = tx.query("SELECT COUNT(*) FROM albums")
return flask.jsonify(
{
"items": item_rows[0][0],
"albums": album_rows[0][0],
}
)
# UI.
@app.route("/")
def home():
return flask.render_template("index.html")
# Plugin hook.
class WebPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
"host": "127.0.0.1",
"port": 8337,
"cors": "",
"cors_supports_credentials": False,
"reverse_proxy": False,
"include_paths": False,
"readonly": True,
}
)
def commands(self):
cmd = ui.Subcommand("web", help="start a Web interface")
cmd.parser.add_option(
"-d",
"--debug",
action="store_true",
default=False,
help="debug mode",
)
def func(lib, opts, args):
args = args
if args:
self.config["host"] = args.pop(0)
if args:
self.config["port"] = int(args.pop(0))
app.config["lib"] = lib
# Normalizes json output
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = False
app.config["INCLUDE_PATHS"] = self.config["include_paths"]
app.config["READONLY"] = self.config["readonly"]
# Enable CORS if required.
if self.config["cors"]:
self._log.info(
"Enabling CORS with origin: {}", self.config["cors"]
)
from flask_cors import CORS
app.config["CORS_ALLOW_HEADERS"] = "Content-Type"
app.config["CORS_RESOURCES"] = {
r"/*": {"origins": self.config["cors"].get(str)}
}
CORS(
app,
supports_credentials=self.config[
"cors_supports_credentials"
].get(bool),
)
# Allow serving behind a reverse proxy
if self.config["reverse_proxy"]:
app.wsgi_app = ReverseProxied(app.wsgi_app)
# Start the web application.
app.run(
host=self.config["host"].as_str(),
port=self.config["port"].get(int),
debug=opts.debug,
threaded=True,
)
cmd.func = func
return [cmd]
class ReverseProxied:
"""Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
From: http://flask.pocoo.org/snippets/35/
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get("HTTP_X_SCRIPT_NAME", "")
if script_name:
environ["SCRIPT_NAME"] = script_name
path_info = environ["PATH_INFO"]
if path_info.startswith(script_name):
environ["PATH_INFO"] = path_info[len(script_name) :]
scheme = environ.get("HTTP_X_SCHEME", "")
if scheme:
environ["wsgi.url_scheme"] = scheme
return self.app(environ, start_response)
beetbox-beets-c1877b7/beetsplug/web/static/ 0000775 0000000 0000000 00000000000 15073551743 0020656 5 ustar 00root root 0000000 0000000 beetbox-beets-c1877b7/beetsplug/web/static/backbone.js 0000664 0000000 0000000 00000123141 15073551743 0022762 0 ustar 00root root 0000000 0000000 // Backbone.js 0.5.3
// (c) 2010 Jeremy Ashkenas, DocumentCloud Inc.
// Backbone may be freely distributed under the MIT license.
// For all details and documentation:
// http://documentcloud.github.com/backbone
(function(){
// Initial Setup
// -------------
// Save a reference to the global object.
var root = this;
// Save the previous value of the `Backbone` variable.
var previousBackbone = root.Backbone;
// The top-level namespace. All public Backbone classes and modules will
// be attached to this. Exported for both CommonJS and the browser.
var Backbone;
if (typeof exports !== 'undefined') {
Backbone = exports;
} else {
Backbone = root.Backbone = {};
}
// Current version of the library. Keep in sync with `package.json`.
Backbone.VERSION = '0.5.3';
// Require Underscore, if we're on the server, and it's not already present.
var _ = root._;
if (!_ && (typeof require !== 'undefined')) _ = require('underscore')._;
// For Backbone's purposes, jQuery or Zepto owns the `$` variable.
var $ = root.jQuery || root.Zepto;
// Runs Backbone.js in *noConflict* mode, returning the `Backbone` variable
// to its previous owner. Returns a reference to this Backbone object.
Backbone.noConflict = function() {
root.Backbone = previousBackbone;
return this;
};
// Turn on `emulateHTTP` to support legacy HTTP servers. Setting this option will
// fake `"PUT"` and `"DELETE"` requests via the `_method` parameter and set a
// `X-Http-Method-Override` header.
Backbone.emulateHTTP = false;
// Turn on `emulateJSON` to support legacy servers that can't deal with direct
// `application/json` requests ... will encode the body as
// `application/x-www-form-urlencoded` instead and will send the model in a
// form param named `model`.
Backbone.emulateJSON = false;
// Backbone.Events
// -----------------
// A module that can be mixed in to *any object* in order to provide it with
// custom events. You may `bind` or `unbind` a callback function to an event;
// `trigger`-ing an event fires all callbacks in succession.
//
// var object = {};
// _.extend(object, Backbone.Events);
// object.bind('expand', function(){ alert('expanded'); });
// object.trigger('expand');
//
Backbone.Events = {
// Bind an event, specified by a string name, `ev`, to a `callback` function.
// Passing `"all"` will bind the callback to all events fired.
bind : function(ev, callback, context) {
var calls = this._callbacks || (this._callbacks = {});
var list = calls[ev] || (calls[ev] = []);
list.push([callback, context]);
return this;
},
// Remove one or many callbacks. If `callback` is null, removes all
// callbacks for the event. If `ev` is null, removes all bound callbacks
// for all events.
unbind : function(ev, callback) {
var calls;
if (!ev) {
this._callbacks = {};
} else if (calls = this._callbacks) {
if (!callback) {
calls[ev] = [];
} else {
var list = calls[ev];
if (!list) return this;
for (var i = 0, l = list.length; i < l; i++) {
if (list[i] && callback === list[i][0]) {
list[i] = null;
break;
}
}
}
}
return this;
},
// Trigger an event, firing all bound callbacks. Callbacks are passed the
// same arguments as `trigger` is, apart from the event name.
// Listening for `"all"` passes the true event name as the first argument.
trigger : function(eventName) {
var list, calls, ev, callback, args;
var both = 2;
if (!(calls = this._callbacks)) return this;
while (both--) {
ev = both ? eventName : 'all';
if (list = calls[ev]) {
for (var i = 0, l = list.length; i < l; i++) {
if (!(callback = list[i])) {
list.splice(i, 1); i--; l--;
} else {
args = both ? Array.prototype.slice.call(arguments, 1) : arguments;
callback[0].apply(callback[1] || this, args);
}
}
}
}
return this;
}
};
// Backbone.Model
// --------------
// Create a new model, with defined attributes. A client id (`cid`)
// is automatically generated and assigned for you.
Backbone.Model = function(attributes, options) {
var defaults;
attributes || (attributes = {});
if (defaults = this.defaults) {
if (_.isFunction(defaults)) defaults = defaults.call(this);
attributes = _.extend({}, defaults, attributes);
}
this.attributes = {};
this._escapedAttributes = {};
this.cid = _.uniqueId('c');
this.set(attributes, {silent : true});
this._changed = false;
this._previousAttributes = _.clone(this.attributes);
if (options && options.collection) this.collection = options.collection;
this.initialize(attributes, options);
};
// Attach all inheritable methods to the Model prototype.
_.extend(Backbone.Model.prototype, Backbone.Events, {
// A snapshot of the model's previous attributes, taken immediately
// after the last `"change"` event was fired.
_previousAttributes : null,
// Has the item been changed since the last `"change"` event?
_changed : false,
// The default name for the JSON `id` attribute is `"id"`. MongoDB and
// CouchDB users may want to set this to `"_id"`.
idAttribute : 'id',
// Initialize is an empty function by default. Override it with your own
// initialization logic.
initialize : function(){},
// Return a copy of the model's `attributes` object.
toJSON : function() {
return _.clone(this.attributes);
},
// Get the value of an attribute.
get : function(attr) {
return this.attributes[attr];
},
// Get the HTML-escaped value of an attribute.
escape : function(attr) {
var html;
if (html = this._escapedAttributes[attr]) return html;
var val = this.attributes[attr];
return this._escapedAttributes[attr] = escapeHTML(val == null ? '' : '' + val);
},
// Returns `true` if the attribute contains a value that is not null
// or undefined.
has : function(attr) {
return this.attributes[attr] != null;
},
// Set a hash of model attributes on the object, firing `"change"` unless you
// choose to silence it.
set : function(attrs, options) {
// Extract attributes and options.
options || (options = {});
if (!attrs) return this;
if (attrs.attributes) attrs = attrs.attributes;
var now = this.attributes, escaped = this._escapedAttributes;
// Run validation.
if (!options.silent && this.validate && !this._performValidation(attrs, options)) return false;
// Check for changes of `id`.
if (this.idAttribute in attrs) this.id = attrs[this.idAttribute];
// We're about to start triggering change events.
var alreadyChanging = this._changing;
this._changing = true;
// Update attributes.
for (var attr in attrs) {
var val = attrs[attr];
if (!_.isEqual(now[attr], val)) {
now[attr] = val;
delete escaped[attr];
this._changed = true;
if (!options.silent) this.trigger('change:' + attr, this, val, options);
}
}
// Fire the `"change"` event, if the model has been changed.
if (!alreadyChanging && !options.silent && this._changed) this.change(options);
this._changing = false;
return this;
},
// Remove an attribute from the model, firing `"change"` unless you choose
// to silence it. `unset` is a noop if the attribute doesn't exist.
unset : function(attr, options) {
if (!(attr in this.attributes)) return this;
options || (options = {});
var value = this.attributes[attr];
// Run validation.
var validObj = {};
validObj[attr] = void 0;
if (!options.silent && this.validate && !this._performValidation(validObj, options)) return false;
// Remove the attribute.
delete this.attributes[attr];
delete this._escapedAttributes[attr];
if (attr == this.idAttribute) delete this.id;
this._changed = true;
if (!options.silent) {
this.trigger('change:' + attr, this, void 0, options);
this.change(options);
}
return this;
},
// Clear all attributes on the model, firing `"change"` unless you choose
// to silence it.
clear : function(options) {
options || (options = {});
var attr;
var old = this.attributes;
// Run validation.
var validObj = {};
for (attr in old) validObj[attr] = void 0;
if (!options.silent && this.validate && !this._performValidation(validObj, options)) return false;
this.attributes = {};
this._escapedAttributes = {};
this._changed = true;
if (!options.silent) {
for (attr in old) {
this.trigger('change:' + attr, this, void 0, options);
}
this.change(options);
}
return this;
},
// Fetch the model from the server. If the server's representation of the
// model differs from its current attributes, they will be overridden,
// triggering a `"change"` event.
fetch : function(options) {
options || (options = {});
var model = this;
var success = options.success;
options.success = function(resp, status, xhr) {
if (!model.set(model.parse(resp, xhr), options)) return false;
if (success) success(model, resp);
};
options.error = wrapError(options.error, model, options);
return (this.sync || Backbone.sync).call(this, 'read', this, options);
},
// Set a hash of model attributes, and sync the model to the server.
// If the server returns an attributes hash that differs, the model's
// state will be `set` again.
save : function(attrs, options) {
options || (options = {});
if (attrs && !this.set(attrs, options)) return false;
var model = this;
var success = options.success;
options.success = function(resp, status, xhr) {
if (!model.set(model.parse(resp, xhr), options)) return false;
if (success) success(model, resp, xhr);
};
options.error = wrapError(options.error, model, options);
var method = this.isNew() ? 'create' : 'update';
return (this.sync || Backbone.sync).call(this, method, this, options);
},
// Destroy this model on the server if it was already persisted. Upon success, the model is removed
// from its collection, if it has one.
destroy : function(options) {
options || (options = {});
if (this.isNew()) return this.trigger('destroy', this, this.collection, options);
var model = this;
var success = options.success;
options.success = function(resp) {
model.trigger('destroy', model, model.collection, options);
if (success) success(model, resp);
};
options.error = wrapError(options.error, model, options);
return (this.sync || Backbone.sync).call(this, 'delete', this, options);
},
// Default URL for the model's representation on the server -- if you're
// using Backbone's restful methods, override this to change the endpoint
// that will be called.
url : function() {
var base = getUrl(this.collection) || this.urlRoot || urlError();
if (this.isNew()) return base;
return base + (base.charAt(base.length - 1) == '/' ? '' : '/') + encodeURIComponent(this.id);
},
// **parse** converts a response into the hash of attributes to be `set` on
// the model. The default implementation is just to pass the response along.
parse : function(resp, xhr) {
return resp;
},
// Create a new model with identical attributes to this one.
clone : function() {
return new this.constructor(this);
},
// A model is new if it has never been saved to the server, and lacks an id.
isNew : function() {
return this.id == null;
},
// Call this method to manually fire a `change` event for this model.
// Calling this will cause all objects observing the model to update.
change : function(options) {
this.trigger('change', this, options);
this._previousAttributes = _.clone(this.attributes);
this._changed = false;
},
// Determine if the model has changed since the last `"change"` event.
// If you specify an attribute name, determine if that attribute has changed.
hasChanged : function(attr) {
if (attr) return this._previousAttributes[attr] != this.attributes[attr];
return this._changed;
},
// Return an object containing all the attributes that have changed, or false
// if there are no changed attributes. Useful for determining what parts of a
// view need to be updated and/or what attributes need to be persisted to
// the server.
changedAttributes : function(now) {
now || (now = this.attributes);
var old = this._previousAttributes;
var changed = false;
for (var attr in now) {
if (!_.isEqual(old[attr], now[attr])) {
changed = changed || {};
changed[attr] = now[attr];
}
}
return changed;
},
// Get the previous value of an attribute, recorded at the time the last
// `"change"` event was fired.
previous : function(attr) {
if (!attr || !this._previousAttributes) return null;
return this._previousAttributes[attr];
},
// Get all of the attributes of the model at the time of the previous
// `"change"` event.
previousAttributes : function() {
return _.clone(this._previousAttributes);
},
// Run validation against a set of incoming attributes, returning `true`
// if all is well. If a specific `error` callback has been passed,
// call that instead of firing the general `"error"` event.
_performValidation : function(attrs, options) {
var error = this.validate(attrs);
if (error) {
if (options.error) {
options.error(this, error, options);
} else {
this.trigger('error', this, error, options);
}
return false;
}
return true;
}
});
// Backbone.Collection
// -------------------
// Provides a standard collection class for our sets of models, ordered
// or unordered. If a `comparator` is specified, the Collection will maintain
// its models in sort order, as they're added and removed.
Backbone.Collection = function(models, options) {
options || (options = {});
if (options.comparator) this.comparator = options.comparator;
_.bindAll(this, '_onModelEvent', '_removeReference');
this._reset();
if (models) this.reset(models, {silent: true});
this.initialize.apply(this, arguments);
};
// Define the Collection's inheritable methods.
_.extend(Backbone.Collection.prototype, Backbone.Events, {
// The default model for a collection is just a **Backbone.Model**.
// This should be overridden in most cases.
model : Backbone.Model,
// Initialize is an empty function by default. Override it with your own
// initialization logic.
initialize : function(){},
// The JSON representation of a Collection is an array of the
// models' attributes.
toJSON : function() {
return this.map(function(model){ return model.toJSON(); });
},
// Add a model, or list of models to the set. Pass **silent** to avoid
// firing the `added` event for every new model.
add : function(models, options) {
if (_.isArray(models)) {
for (var i = 0, l = models.length; i < l; i++) {
this._add(models[i], options);
}
} else {
this._add(models, options);
}
return this;
},
// Remove a model, or a list of models from the set. Pass silent to avoid
// firing the `removed` event for every model removed.
remove : function(models, options) {
if (_.isArray(models)) {
for (var i = 0, l = models.length; i < l; i++) {
this._remove(models[i], options);
}
} else {
this._remove(models, options);
}
return this;
},
// Get a model from the set by id.
get : function(id) {
if (id == null) return null;
return this._byId[id.id != null ? id.id : id];
},
// Get a model from the set by client id.
getByCid : function(cid) {
return cid && this._byCid[cid.cid || cid];
},
// Get the model at the given index.
at: function(index) {
return this.models[index];
},
// Force the collection to re-sort itself. You don't need to call this under normal
// circumstances, as the set will maintain sort order as each item is added.
sort : function(options) {
options || (options = {});
if (!this.comparator) throw new Error('Cannot sort a set without a comparator');
this.models = this.sortBy(this.comparator);
if (!options.silent) this.trigger('reset', this, options);
return this;
},
// Pluck an attribute from each model in the collection.
pluck : function(attr) {
return _.map(this.models, function(model){ return model.get(attr); });
},
// When you have more items than you want to add or remove individually,
// you can reset the entire set with a new list of models, without firing
// any `added` or `removed` events. Fires `reset` when finished.
reset : function(models, options) {
models || (models = []);
options || (options = {});
this.each(this._removeReference);
this._reset();
this.add(models, {silent: true});
if (!options.silent) this.trigger('reset', this, options);
return this;
},
// Fetch the default set of models for this collection, resetting the
// collection when they arrive. If `add: true` is passed, appends the
// models to the collection instead of resetting.
fetch : function(options) {
options || (options = {});
var collection = this;
var success = options.success;
options.success = function(resp, status, xhr) {
collection[options.add ? 'add' : 'reset'](collection.parse(resp, xhr), options);
if (success) success(collection, resp);
};
options.error = wrapError(options.error, collection, options);
return (this.sync || Backbone.sync).call(this, 'read', this, options);
},
// Create a new instance of a model in this collection. After the model
// has been created on the server, it will be added to the collection.
// Returns the model, or 'false' if validation on a new model fails.
create : function(model, options) {
var coll = this;
options || (options = {});
model = this._prepareModel(model, options);
if (!model) return false;
var success = options.success;
options.success = function(nextModel, resp, xhr) {
coll.add(nextModel, options);
if (success) success(nextModel, resp, xhr);
};
model.save(null, options);
return model;
},
// **parse** converts a response into a list of models to be added to the
// collection. The default implementation is just to pass it through.
parse : function(resp, xhr) {
return resp;
},
// Proxy to _'s chain. Can't be proxied the same way the rest of the
// underscore methods are proxied because it relies on the underscore
// constructor.
chain: function () {
return _(this.models).chain();
},
// Reset all internal state. Called when the collection is reset.
_reset : function(options) {
this.length = 0;
this.models = [];
this._byId = {};
this._byCid = {};
},
// Prepare a model to be added to this collection
_prepareModel: function(model, options) {
if (!(model instanceof Backbone.Model)) {
var attrs = model;
model = new this.model(attrs, {collection: this});
if (model.validate && !model._performValidation(attrs, options)) model = false;
} else if (!model.collection) {
model.collection = this;
}
return model;
},
// Internal implementation of adding a single model to the set, updating
// hash indexes for `id` and `cid` lookups.
// Returns the model, or 'false' if validation on a new model fails.
_add : function(model, options) {
options || (options = {});
model = this._prepareModel(model, options);
if (!model) return false;
var already = this.getByCid(model);
if (already) throw new Error(["Can't add the same model to a set twice", already.id]);
this._byId[model.id] = model;
this._byCid[model.cid] = model;
var index = options.at != null ? options.at :
this.comparator ? this.sortedIndex(model, this.comparator) :
this.length;
this.models.splice(index, 0, model);
model.bind('all', this._onModelEvent);
this.length++;
if (!options.silent) model.trigger('add', model, this, options);
return model;
},
// Internal implementation of removing a single model from the set, updating
// hash indexes for `id` and `cid` lookups.
_remove : function(model, options) {
options || (options = {});
model = this.getByCid(model) || this.get(model);
if (!model) return null;
delete this._byId[model.id];
delete this._byCid[model.cid];
this.models.splice(this.indexOf(model), 1);
this.length--;
if (!options.silent) model.trigger('remove', model, this, options);
this._removeReference(model);
return model;
},
// Internal method to remove a model's ties to a collection.
_removeReference : function(model) {
if (this == model.collection) {
delete model.collection;
}
model.unbind('all', this._onModelEvent);
},
// Internal method called every time a model in the set fires an event.
// Sets need to update their indexes when models change ids. All other
// events simply proxy through. "add" and "remove" events that originate
// in other collections are ignored.
_onModelEvent : function(ev, model, collection, options) {
if ((ev == 'add' || ev == 'remove') && collection != this) return;
if (ev == 'destroy') {
this._remove(model, options);
}
if (model && ev === 'change:' + model.idAttribute) {
delete this._byId[model.previous(model.idAttribute)];
this._byId[model.id] = model;
}
this.trigger.apply(this, arguments);
}
});
// Underscore methods that we want to implement on the Collection.
var methods = ['forEach', 'each', 'map', 'reduce', 'reduceRight', 'find', 'detect',
'filter', 'select', 'reject', 'every', 'all', 'some', 'any', 'include',
'contains', 'invoke', 'max', 'min', 'sortBy', 'sortedIndex', 'toArray', 'size',
'first', 'rest', 'last', 'without', 'indexOf', 'lastIndexOf', 'isEmpty', 'groupBy'];
// Mix in each Underscore method as a proxy to `Collection#models`.
_.each(methods, function(method) {
Backbone.Collection.prototype[method] = function() {
return _[method].apply(_, [this.models].concat(_.toArray(arguments)));
};
});
// Backbone.Router
// -------------------
// Routers map faux-URLs to actions, and fire events when routes are
// matched. Creating a new one sets its `routes` hash, if not set statically.
Backbone.Router = function(options) {
options || (options = {});
if (options.routes) this.routes = options.routes;
this._bindRoutes();
this.initialize.apply(this, arguments);
};
// Cached regular expressions for matching named param parts and splatted
// parts of route strings.
var namedParam = /:([\w\d]+)/g;
var splatParam = /\*([\w\d]+)/g;
var escapeRegExp = /[-[\]{}()+?.,\\^$|#\s]/g;
// Set up all inheritable **Backbone.Router** properties and methods.
_.extend(Backbone.Router.prototype, Backbone.Events, {
// Initialize is an empty function by default. Override it with your own
// initialization logic.
initialize : function(){},
// Manually bind a single named route to a callback. For example:
//
// this.route('search/:query/p:num', 'search', function(query, num) {
// ...
// });
//
route : function(route, name, callback) {
Backbone.history || (Backbone.history = new Backbone.History);
if (!_.isRegExp(route)) route = this._routeToRegExp(route);
Backbone.history.route(route, _.bind(function(fragment) {
var args = this._extractParameters(route, fragment);
callback.apply(this, args);
this.trigger.apply(this, ['route:' + name].concat(args));
}, this));
},
// Simple proxy to `Backbone.history` to save a fragment into the history.
navigate : function(fragment, triggerRoute) {
Backbone.history.navigate(fragment, triggerRoute);
},
// Bind all defined routes to `Backbone.history`. We have to reverse the
// order of the routes here to support behavior where the most general
// routes can be defined at the bottom of the route map.
_bindRoutes : function() {
if (!this.routes) return;
var routes = [];
for (var route in this.routes) {
routes.unshift([route, this.routes[route]]);
}
for (var i = 0, l = routes.length; i < l; i++) {
this.route(routes[i][0], routes[i][1], this[routes[i][1]]);
}
},
// Convert a route string into a regular expression, suitable for matching
// against the current location hash.
_routeToRegExp : function(route) {
route = route.replace(escapeRegExp, "\\$&")
.replace(namedParam, "([^\/]*)")
.replace(splatParam, "(.*?)");
return new RegExp('^' + route + '$');
},
// Given a route, and a URL fragment that it matches, return the array of
// extracted parameters.
_extractParameters : function(route, fragment) {
return route.exec(fragment).slice(1);
}
});
// Backbone.History
// ----------------
// Handles cross-browser history management, based on URL fragments. If the
// browser does not support `onhashchange`, falls back to polling.
Backbone.History = function() {
this.handlers = [];
_.bindAll(this, 'checkUrl');
};
// Cached regex for cleaning hashes.
var hashStrip = /^#*/;
// Cached regex for detecting MSIE.
var isExplorer = /msie [\w.]+/;
// Has the history handling already been started?
var historyStarted = false;
// Set up all inheritable **Backbone.History** properties and methods.
_.extend(Backbone.History.prototype, {
// The default interval to poll for hash changes, if necessary, is
// twenty times a second.
interval: 50,
// Get the cross-browser normalized URL fragment, either from the URL,
// the hash, or the override.
getFragment : function(fragment, forcePushState) {
if (fragment == null) {
if (this._hasPushState || forcePushState) {
fragment = window.location.pathname;
var search = window.location.search;
if (search) fragment += search;
if (fragment.indexOf(this.options.root) == 0) fragment = fragment.substr(this.options.root.length);
} else {
fragment = window.location.hash;
}
}
return decodeURIComponent(fragment.replace(hashStrip, ''));
},
// Start the hash change handling, returning `true` if the current URL matches
// an existing route, and `false` otherwise.
start : function(options) {
// Figure out the initial configuration. Do we need an iframe?
// Is pushState desired ... is it available?
if (historyStarted) throw new Error("Backbone.history has already been started");
this.options = _.extend({}, {root: '/'}, this.options, options);
this._wantsPushState = !!this.options.pushState;
this._hasPushState = !!(this.options.pushState && window.history && window.history.pushState);
var fragment = this.getFragment();
var docMode = document.documentMode;
var oldIE = (isExplorer.exec(navigator.userAgent.toLowerCase()) && (!docMode || docMode <= 7));
if (oldIE) {
this.iframe = $('').hide().appendTo('body')[0].contentWindow;
this.navigate(fragment);
}
// Depending on whether we're using pushState or hashes, and whether
// 'onhashchange' is supported, determine how we check the URL state.
if (this._hasPushState) {
$(window).bind('popstate', this.checkUrl);
} else if ('onhashchange' in window && !oldIE) {
$(window).bind('hashchange', this.checkUrl);
} else {
setInterval(this.checkUrl, this.interval);
}
// Determine if we need to change the base url, for a pushState link
// opened by a non-pushState browser.
this.fragment = fragment;
historyStarted = true;
var loc = window.location;
var atRoot = loc.pathname == this.options.root;
if (this._wantsPushState && !this._hasPushState && !atRoot) {
this.fragment = this.getFragment(null, true);
window.location.replace(this.options.root + '#' + this.fragment);
// Return immediately as browser will do redirect to new url
return true;
} else if (this._wantsPushState && this._hasPushState && atRoot && loc.hash) {
this.fragment = loc.hash.replace(hashStrip, '');
window.history.replaceState({}, document.title, loc.protocol + '//' + loc.host + this.options.root + this.fragment);
}
if (!this.options.silent) {
return this.loadUrl();
}
},
// Add a route to be tested when the fragment changes. Routes added later may
// override previous routes.
route : function(route, callback) {
this.handlers.unshift({route : route, callback : callback});
},
// Checks the current URL to see if it has changed, and if it has,
// calls `loadUrl`, normalizing across the hidden iframe.
checkUrl : function(e) {
var current = this.getFragment();
if (current == this.fragment && this.iframe) current = this.getFragment(this.iframe.location.hash);
if (current == this.fragment || current == decodeURIComponent(this.fragment)) return false;
if (this.iframe) this.navigate(current);
this.loadUrl() || this.loadUrl(window.location.hash);
},
// Attempt to load the current URL fragment. If a route succeeds with a
// match, returns `true`. If no defined routes matches the fragment,
// returns `false`.
loadUrl : function(fragmentOverride) {
var fragment = this.fragment = this.getFragment(fragmentOverride);
var matched = _.any(this.handlers, function(handler) {
if (handler.route.test(fragment)) {
handler.callback(fragment);
return true;
}
});
return matched;
},
// Save a fragment into the hash history. You are responsible for properly
// URL-encoding the fragment in advance. This does not trigger
// a `hashchange` event.
navigate : function(fragment, triggerRoute) {
var frag = (fragment || '').replace(hashStrip, '');
if (this.fragment == frag || this.fragment == decodeURIComponent(frag)) return;
if (this._hasPushState) {
var loc = window.location;
if (frag.indexOf(this.options.root) != 0) frag = this.options.root + frag;
this.fragment = frag;
window.history.pushState({}, document.title, loc.protocol + '//' + loc.host + frag);
} else {
window.location.hash = this.fragment = frag;
if (this.iframe && (frag != this.getFragment(this.iframe.location.hash))) {
this.iframe.document.open().close();
this.iframe.location.hash = frag;
}
}
if (triggerRoute) this.loadUrl(fragment);
}
});
// Backbone.View
// -------------
// Creating a Backbone.View creates its initial element outside of the DOM,
// if an existing element is not provided...
Backbone.View = function(options) {
this.cid = _.uniqueId('view');
this._configure(options || {});
this._ensureElement();
this.delegateEvents();
this.initialize.apply(this, arguments);
};
// Element lookup, scoped to DOM elements within the current view.
// This should be preferred to global lookups, if you're dealing with
// a specific view.
var selectorDelegate = function(selector) {
return $(selector, this.el);
};
// Cached regex to split keys for `delegate`.
var eventSplitter = /^(\S+)\s*(.*)$/;
// List of view options to be merged as properties.
var viewOptions = ['model', 'collection', 'el', 'id', 'attributes', 'className', 'tagName'];
// Set up all inheritable **Backbone.View** properties and methods.
_.extend(Backbone.View.prototype, Backbone.Events, {
// The default `tagName` of a View's element is `"div"`.
tagName : 'div',
// Attach the `selectorDelegate` function as the `$` property.
$ : selectorDelegate,
// Initialize is an empty function by default. Override it with your own
// initialization logic.
initialize : function(){},
// **render** is the core function that your view should override, in order
// to populate its element (`this.el`), with the appropriate HTML. The
// convention is for **render** to always return `this`.
render : function() {
return this;
},
// Remove this view from the DOM. Note that the view isn't present in the
// DOM by default, so calling this method may be a no-op.
remove : function() {
$(this.el).remove();
return this;
},
// For small amounts of DOM Elements, where a full-blown template isn't
// needed, use **make** to manufacture elements, one at a time.
//
// var el = this.make('li', {'class': 'row'}, this.model.escape('title'));
//
make : function(tagName, attributes, content) {
var el = document.createElement(tagName);
if (attributes) $(el).attr(attributes);
if (content) $(el).html(content);
return el;
},
// Set callbacks, where `this.callbacks` is a hash of
//
// *{"event selector": "callback"}*
//
// {
// 'mousedown .title': 'edit',
// 'click .button': 'save'
// }
//
// pairs. Callbacks will be bound to the view, with `this` set properly.
// Uses event delegation for efficiency.
// Omitting the selector binds the event to `this.el`.
// This only works for delegate-able events: not `focus`, `blur`, and
// not `change`, `submit`, and `reset` in Internet Explorer.
delegateEvents : function(events) {
if (!(events || (events = this.events))) return;
if (_.isFunction(events)) events = events.call(this);
$(this.el).unbind('.delegateEvents' + this.cid);
for (var key in events) {
var method = this[events[key]];
if (!method) throw new Error('Event "' + events[key] + '" does not exist');
var match = key.match(eventSplitter);
var eventName = match[1], selector = match[2];
method = _.bind(method, this);
eventName += '.delegateEvents' + this.cid;
if (selector === '') {
$(this.el).bind(eventName, method);
} else {
$(this.el).delegate(selector, eventName, method);
}
}
},
// Performs the initial configuration of a View with a set of options.
// Keys with special meaning *(model, collection, id, className)*, are
// attached directly to the view.
_configure : function(options) {
if (this.options) options = _.extend({}, this.options, options);
for (var i = 0, l = viewOptions.length; i < l; i++) {
var attr = viewOptions[i];
if (options[attr]) this[attr] = options[attr];
}
this.options = options;
},
// Ensure that the View has a DOM element to render into.
// If `this.el` is a string, pass it through `$()`, take the first
// matching element, and re-assign it to `el`. Otherwise, create
// an element from the `id`, `className` and `tagName` properties.
_ensureElement : function() {
if (!this.el) {
var attrs = this.attributes || {};
if (this.id) attrs.id = this.id;
if (this.className) attrs['class'] = this.className;
this.el = this.make(this.tagName, attrs);
} else if (_.isString(this.el)) {
this.el = $(this.el).get(0);
}
}
});
// The self-propagating extend function that Backbone classes use.
var extend = function (protoProps, classProps) {
var child = inherits(this, protoProps, classProps);
child.extend = this.extend;
return child;
};
// Set up inheritance for the model, collection, and view.
Backbone.Model.extend = Backbone.Collection.extend =
Backbone.Router.extend = Backbone.View.extend = extend;
// Map from CRUD to HTTP for our default `Backbone.sync` implementation.
var methodMap = {
'create': 'POST',
'update': 'PUT',
'delete': 'DELETE',
'read' : 'GET'
};
// Backbone.sync
// -------------
// Override this function to change the manner in which Backbone persists
// models to the server. You will be passed the type of request, and the
// model in question. By default, uses makes a RESTful Ajax request
// to the model's `url()`. Some possible customizations could be:
//
// * Use `setTimeout` to batch rapid-fire updates into a single request.
// * Send up the models as XML instead of JSON.
// * Persist models via WebSockets instead of Ajax.
//
// Turn on `Backbone.emulateHTTP` in order to send `PUT` and `DELETE` requests
// as `POST`, with a `_method` parameter containing the true HTTP method,
// as well as all requests with the body as `application/x-www-form-urlencoded` instead of
// `application/json` with the model in a param named `model`.
// Useful when interfacing with server-side languages like **PHP** that make
// it difficult to read the body of `PUT` requests.
Backbone.sync = function(method, model, options) {
var type = methodMap[method];
// Default JSON-request options.
var params = _.extend({
type: type,
dataType: 'json'
}, options);
// Ensure that we have a URL.
if (!params.url) {
params.url = getUrl(model) || urlError();
}
// Ensure that we have the appropriate request data.
if (!params.data && model && (method == 'create' || method == 'update')) {
params.contentType = 'application/json';
params.data = JSON.stringify(model.toJSON());
}
// For older servers, emulate JSON by encoding the request into an HTML-form.
if (Backbone.emulateJSON) {
params.contentType = 'application/x-www-form-urlencoded';
params.data = params.data ? {model : params.data} : {};
}
// For older servers, emulate HTTP by mimicking the HTTP method with `_method`
// And an `X-HTTP-Method-Override` header.
if (Backbone.emulateHTTP) {
if (type === 'PUT' || type === 'DELETE') {
if (Backbone.emulateJSON) params.data._method = type;
params.type = 'POST';
params.beforeSend = function(xhr) {
xhr.setRequestHeader('X-HTTP-Method-Override', type);
};
}
}
// Don't process data on a non-GET request.
if (params.type !== 'GET' && !Backbone.emulateJSON) {
params.processData = false;
}
// Make the request.
return $.ajax(params);
};
// Helpers
// -------
// Shared empty constructor function to aid in prototype-chain creation.
var ctor = function(){};
// Helper function to correctly set up the prototype chain, for subclasses.
// Similar to `goog.inherits`, but uses a hash of prototype properties and
// class properties to be extended.
var inherits = function(parent, protoProps, staticProps) {
var child;
// The constructor function for the new subclass is either defined by you
// (the "constructor" property in your `extend` definition), or defaulted
// by us to simply call `super()`.
if (protoProps && protoProps.hasOwnProperty('constructor')) {
child = protoProps.constructor;
} else {
child = function(){ return parent.apply(this, arguments); };
}
// Inherit class (static) properties from parent.
_.extend(child, parent);
// Set the prototype chain to inherit from `parent`, without calling
// `parent`'s constructor function.
ctor.prototype = parent.prototype;
child.prototype = new ctor();
// Add prototype properties (instance properties) to the subclass,
// if supplied.
if (protoProps) _.extend(child.prototype, protoProps);
// Add static properties to the constructor function, if supplied.
if (staticProps) _.extend(child, staticProps);
// Correctly set child's `prototype.constructor`.
child.prototype.constructor = child;
// Set a convenience property in case the parent's prototype is needed later.
child.__super__ = parent.prototype;
return child;
};
// Helper function to get a URL from a Model or Collection as a property
// or as a function.
var getUrl = function(object) {
if (!(object && object.url)) return null;
return _.isFunction(object.url) ? object.url() : object.url;
};
// Throw an error when a URL is needed, and none is supplied.
var urlError = function() {
throw new Error('A "url" property or function must be specified');
};
// Wrap an optional error callback with a fallback error event.
var wrapError = function(onError, model, options) {
return function(resp) {
if (onError) {
onError(model, resp, options);
} else {
model.trigger('error', model, resp, options);
}
};
};
// Helper function to escape a string for HTML rendering.
var escapeHTML = function(string) {
return string.replace(/&(?!\w+;|#\d+;|#x[\da-f]+;)/gi, '&').replace(//g, '>').replace(/"/g, '"').replace(/'/g, ''').replace(/\//g,'/');
};
}).call(this);
beetbox-beets-c1877b7/beetsplug/web/static/beets.css 0000664 0000000 0000000 00000005607 15073551743 0022502 0 ustar 00root root 0000000 0000000 body {
font-family: Helvetica, Arial, sans-serif;
}
#header {
position: fixed;
left: 0;
right: 0;
top: 0;
height: 36px;
color: white;
cursor: default;
/* shadowy border */
box-shadow: 0 0 20px #999;
-webkit-box-shadow: 0 0 20px #999;
-moz-box-shadow: 0 0 20px #999;
/* background gradient */
background: #0e0e0e;
background: -moz-linear-gradient(top, #6b6b6b 0%, #0e0e0e 100%);
background: -webkit-linear-gradient(top, #6b6b6b 0%,#0e0e0e 100%);
}
#header h1 {
font-size: 1.1em;
font-weight: bold;
color: white;
margin: 0.35em;
float: left;
}
#entities {
width: 17em;
position: fixed;
top: 36px;
left: 0;
bottom: 0;
margin: 0;
z-index: 1;
background: #dde4eb;
/* shadowy border */
box-shadow: 0 0 20px #666;
-webkit-box-shadow: 0 0 20px #666;
-moz-box-shadow: 0 0 20px #666;
}
#queryForm {
display: block;
text-align: center;
margin: 0.25em 0;
}
#query {
width: 95%;
font-size: 1em;
}
#entities ul {
width: 17em;
position: fixed;
top: 36px;
left: 0;
bottom: 0;
margin: 2.2em 0 0 0;
padding: 0;
overflow-y: auto;
overflow-x: hidden;
}
#entities ul li {
list-style: none;
padding: 4px 8px;
margin: 0;
cursor: default;
}
#entities ul li.selected {
background: #7abcff;
background: -moz-linear-gradient(top, #7abcff 0%, #60abf8 44%, #4096ee 100%);
background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,#7abcff), color-stop(44%,#60abf8), color-stop(100%,#4096ee));
color: white;
}
#entities ul li .playing {
margin-left: 5px;
font-size: 0.9em;
}
#main-detail, #extra-detail {
position: fixed;
left: 17em;
margin: 1.0em 0 0 1.5em;
}
#main-detail {
top: 36px;
height: 98px;
}
#main-detail .artist, #main-detail .album, #main-detail .title {
display: block;
}
#main-detail .title {
font-size: 1.3em;
font-weight: bold;
}
#main-detail .albumtitle {
font-style: italic;
}
#extra-detail {
overflow-x: hidden;
overflow-y: auto;
top: 134px;
bottom: 0;
right: 0;
}
/*Fix for correctly displaying line breaks in lyrics*/
#extra-detail .lyrics {
white-space: pre-wrap;
}
#extra-detail dl dt, #extra-detail dl dd {
list-style: none;
margin: 0;
padding: 0;
}
#extra-detail dl dt {
width: 10em;
float: left;
text-align: right;
font-weight: bold;
clear: both;
}
#extra-detail dl dd {
margin-left: 10.5em;
}
#player {
float: left;
width: 150px;
height: 36px;
}
#player .play, #player .pause, #player .disabled {
-webkit-appearance: none;
font-size: 1em;
font-family: Helvetica, Arial, sans-serif;
background: none;
border: none;
color: white;
padding: 5px;
margin: 0;
text-align: center;
width: 36px;
height: 36px;
}
#player .disabled {
color: #666;
}
beetbox-beets-c1877b7/beetsplug/web/static/beets.js 0000664 0000000 0000000 00000022710 15073551743 0022320 0 ustar 00root root 0000000 0000000 // Format times as minutes and seconds.
var timeFormat = function(secs) {
if (secs == undefined || isNaN(secs)) {
return '0:00';
}
secs = Math.round(secs);
var mins = '' + Math.floor(secs / 60);
secs = '' + (secs % 60);
if (secs.length < 2) {
secs = '0' + secs;
}
return mins + ':' + secs;
}
// jQuery extension encapsulating event hookups for audio element controls.
$.fn.player = function(debug) {
// Selected element should contain an HTML5 Audio element.
var audio = $('audio', this).get(0);
// Control elements that may be present, identified by class.
var playBtn = $('.play', this);
var pauseBtn = $('.pause', this);
var disabledInd = $('.disabled', this);
var timesEl = $('.times', this);
var curTimeEl = $('.currentTime', this);
var totalTimeEl = $('.totalTime', this);
var sliderPlayedEl = $('.slider .played', this);
var sliderLoadedEl = $('.slider .loaded', this);
// Button events.
playBtn.click(function() {
audio.play();
});
pauseBtn.click(function(ev) {
audio.pause();
});
// Utilities.
var timePercent = function(cur, total) {
if (cur == undefined || isNaN(cur) ||
total == undefined || isNaN(total) || total == 0) {
return 0;
}
var ratio = cur / total;
if (ratio > 1.0) {
ratio = 1.0;
}
return (Math.round(ratio * 10000) / 100) + '%';
}
// Event helpers.
var dbg = function(msg) {
if (debug)
console.log(msg);
}
var showState = function() {
if (audio.duration == undefined || isNaN(audio.duration)) {
playBtn.hide();
pauseBtn.hide();
disabledInd.show();
timesEl.hide();
} else if (audio.paused) {
playBtn.show();
pauseBtn.hide();
disabledInd.hide();
timesEl.show();
} else {
playBtn.hide();
pauseBtn.show();
disabledInd.hide();
timesEl.show();
}
}
var showTimes = function() {
curTimeEl.text(timeFormat(audio.currentTime));
totalTimeEl.text(timeFormat(audio.duration));
sliderPlayedEl.css('width',
timePercent(audio.currentTime, audio.duration));
// last time buffered
var bufferEnd = 0;
for (var i = 0; i < audio.buffered.length; ++i) {
if (audio.buffered.end(i) > bufferEnd)
bufferEnd = audio.buffered.end(i);
}
sliderLoadedEl.css('width',
timePercent(bufferEnd, audio.duration));
}
// Initialize controls.
showState();
showTimes();
// Bind events.
$('audio', this).bind({
playing: function() {
dbg('playing');
showState();
},
pause: function() {
dbg('pause');
showState();
},
ended: function() {
dbg('ended');
showState();
},
progress: function() {
dbg('progress ' + audio.buffered);
},
timeupdate: function() {
dbg('timeupdate ' + audio.currentTime);
showTimes();
},
durationchange: function() {
dbg('durationchange ' + audio.duration);
showState();
showTimes();
},
loadeddata: function() {
dbg('loadeddata');
},
loadedmetadata: function() {
dbg('loadedmetadata');
}
});
}
// Simple selection disable for jQuery.
// Cut-and-paste from:
// https://stackoverflow.com/questions/2700000
$.fn.disableSelection = function() {
$(this).attr('unselectable', 'on')
.css('-moz-user-select', 'none')
.each(function() {
this.onselectstart = function() { return false; };
});
};
$(function() {
// Routes.
var BeetsRouter = Backbone.Router.extend({
routes: {
"item/query/:query": "itemQuery",
},
itemQuery: function(query) {
var queryURL = query.split(/\s+/).map(encodeURIComponent).join('/');
$.getJSON('item/query/' + queryURL, function(data) {
var models = _.map(
data['results'],
function(d) { return new Item(d); }
);
var results = new Items(models);
app.showItems(results);
});
}
});
var router = new BeetsRouter();
// Model.
var Item = Backbone.Model.extend({
urlRoot: 'item'
});
var Items = Backbone.Collection.extend({
model: Item
});
// Item views.
var ItemEntryView = Backbone.View.extend({
tagName: "li",
template: _.template($('#item-entry-template').html()),
events: {
'click': 'select',
'dblclick': 'play'
},
initialize: function() {
this.playing = false;
},
render: function() {
$(this.el).html(this.template(this.model.toJSON()));
this.setPlaying(this.playing);
return this;
},
select: function() {
app.selectItem(this);
},
play: function() {
app.playItem(this.model);
},
setPlaying: function(val) {
this.playing = val;
if (val)
this.$('.playing').show();
else
this.$('.playing').hide();
}
});
//Holds Title, Artist, Album etc.
var ItemMainDetailView = Backbone.View.extend({
tagName: "div",
template: _.template($('#item-main-detail-template').html()),
events: {
'click .play': 'play',
},
render: function() {
$(this.el).html(this.template(this.model.toJSON()));
return this;
},
play: function() {
app.playItem(this.model);
}
});
// Holds Track no., Format, MusicBrainz link, Lyrics, Comments etc.
var ItemExtraDetailView = Backbone.View.extend({
tagName: "div",
template: _.template($('#item-extra-detail-template').html()),
render: function() {
$(this.el).html(this.template(this.model.toJSON()));
return this;
}
});
// Main app view.
var AppView = Backbone.View.extend({
el: $('body'),
events: {
'submit #queryForm': 'querySubmit',
},
querySubmit: function(ev) {
ev.preventDefault();
router.navigate('item/query/' + encodeURIComponent($('#query').val()), true);
},
initialize: function() {
this.playingItem = null;
this.shownItems = null;
// Not sure why these events won't bind automatically.
this.$('audio').bind({
'play': _.bind(this.audioPlay, this),
'pause': _.bind(this.audioPause, this),
'ended': _.bind(this.audioEnded, this)
});
},
showItems: function(items) {
this.shownItems = items;
$('#results').empty();
items.each(function(item) {
var view = new ItemEntryView({model: item});
item.entryView = view;
$('#results').append(view.render().el);
});
},
selectItem: function(view) {
// Mark row as selected.
$('#results li').removeClass("selected");
$(view.el).addClass("selected");
// Show main and extra detail.
var mainDetailView = new ItemMainDetailView({model: view.model});
$('#main-detail').empty().append(mainDetailView.render().el);
var extraDetailView = new ItemExtraDetailView({model: view.model});
$('#extra-detail').empty().append(extraDetailView.render().el);
},
playItem: function(item) {
var url = 'item/' + item.get('id') + '/file';
$('#player audio').attr('src', url);
$('#player audio').get(0).play().then(() => {
this.updateMediaSession(item);
});
if (this.playingItem != null) {
this.playingItem.entryView.setPlaying(false);
}
item.entryView.setPlaying(true);
this.playingItem = item;
},
updateMediaSession: function (item) {
if ("mediaSession" in navigator) {
const album_id = item.get("album_id");
const album_art_url = "album/" + album_id + "/art";
navigator.mediaSession.metadata = new MediaMetadata({
title: item.get("title"),
artist: item.get("artist"),
album: item.get("album"),
artwork: [
{ src: album_art_url, sizes: "96x96" },
{ src: album_art_url, sizes: "128x128" },
{ src: album_art_url, sizes: "192x192" },
{ src: album_art_url, sizes: "256x256" },
{ src: album_art_url, sizes: "384x384" },
{ src: album_art_url, sizes: "512x512" },
],
});
}
},
audioPause: function() {
this.playingItem.entryView.setPlaying(false);
},
audioPlay: function() {
if (this.playingItem != null)
this.playingItem.entryView.setPlaying(true);
},
audioEnded: function() {
this.playingItem.entryView.setPlaying(false);
// Try to play the next track.
var idx = this.shownItems.indexOf(this.playingItem);
if (idx == -1) {
// Not in current list.
return;
}
var nextIdx = idx + 1;
if (nextIdx >= this.shownItems.size()) {
// End of list.
return;
}
this.playItem(this.shownItems.at(nextIdx));
}
});
var app = new AppView();
// App setup.
Backbone.history.start({pushState: false});
// Disable selection on UI elements.
$('#entities ul').disableSelection();
$('#header').disableSelection();
// Audio player setup.
$('#player').player();
});
beetbox-beets-c1877b7/beetsplug/web/static/jquery.js 0000664 0000000 0000000 00000744653 15073551743 0022556 0 ustar 00root root 0000000 0000000 /*!
* jQuery JavaScript Library v1.7.1
* http://jquery.com/
*
* Copyright 2016, John Resig
* Dual licensed under the MIT or GPL Version 2 licenses.
* http://jquery.org/license
*
* Includes Sizzle.js
* http://sizzlejs.com/
* Copyright 2016, The Dojo Foundation
* Released under the MIT, BSD, and GPL Licenses.
*
* Date: Mon Nov 21 21:11:03 2011 -0500
*/
(function( window, undefined ) {
// Use the correct document accordingly with window argument (sandbox)
var document = window.document,
navigator = window.navigator,
location = window.location;
var jQuery = (function() {
// Define a local copy of jQuery
var jQuery = function( selector, context ) {
// The jQuery object is actually just the init constructor 'enhanced'
return new jQuery.fn.init( selector, context, rootjQuery );
},
// Map over jQuery in case of overwrite
_jQuery = window.jQuery,
// Map over the $ in case of overwrite
_$ = window.$,
// A central reference to the root jQuery(document)
rootjQuery,
// A simple way to check for HTML strings or ID strings
// Prioritize #id over to avoid XSS via location.hash (#9521)
quickExpr = /^(?:[^#<]*(<[\w\W]+>)[^>]*$|#([\w\-]*)$)/,
// Check if a string has a non-whitespace character in it
rnotwhite = /\S/,
// Used for trimming whitespace
trimLeft = /^\s+/,
trimRight = /\s+$/,
// Match a standalone tag
rsingleTag = /^<(\w+)\s*\/?>(?:<\/\1>)?$/,
// JSON RegExp
rvalidchars = /^[\],:{}\s]*$/,
rvalidescape = /\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,
rvalidtokens = /"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,
rvalidbraces = /(?:^|:|,)(?:\s*\[)+/g,
// Useragent RegExp
rwebkit = /(webkit)[ \/]([\w.]+)/,
ropera = /(opera)(?:.*version)?[ \/]([\w.]+)/,
rmsie = /(msie) ([\w.]+)/,
rmozilla = /(mozilla)(?:.*? rv:([\w.]+))?/,
// Matches dashed string for camelizing
rdashAlpha = /-([a-z]|[0-9])/ig,
rmsPrefix = /^-ms-/,
// Used by jQuery.camelCase as callback to replace()
fcamelCase = function( all, letter ) {
return ( letter + "" ).toUpperCase();
},
// Keep a UserAgent string for use with jQuery.browser
userAgent = navigator.userAgent,
// For matching the engine and version of the browser
browserMatch,
// The deferred used on DOM ready
readyList,
// The ready event handler
DOMContentLoaded,
// Save a reference to some core methods
toString = Object.prototype.toString,
hasOwn = Object.prototype.hasOwnProperty,
push = Array.prototype.push,
slice = Array.prototype.slice,
trim = String.prototype.trim,
indexOf = Array.prototype.indexOf,
// [[Class]] -> type pairs
class2type = {};
jQuery.fn = jQuery.prototype = {
constructor: jQuery,
init: function( selector, context, rootjQuery ) {
var match, elem, ret, doc;
// Handle $(""), $(null), or $(undefined)
if ( !selector ) {
return this;
}
// Handle $(DOMElement)
if ( selector.nodeType ) {
this.context = this[0] = selector;
this.length = 1;
return this;
}
// The body element only exists once, optimize finding it
if ( selector === "body" && !context && document.body ) {
this.context = document;
this[0] = document.body;
this.selector = selector;
this.length = 1;
return this;
}
// Handle HTML strings
if ( typeof selector === "string" ) {
// Are we dealing with HTML string or an ID?
if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) {
// Assume that strings that start and end with <> are HTML and skip the regex check
match = [ null, selector, null ];
} else {
match = quickExpr.exec( selector );
}
// Verify a match, and that no context was specified for #id
if ( match && (match[1] || !context) ) {
// HANDLE: $(html) -> $(array)
if ( match[1] ) {
context = context instanceof jQuery ? context[0] : context;
doc = ( context ? context.ownerDocument || context : document );
// If a single string is passed in and it's a single tag
// just do a createElement and skip the rest
ret = rsingleTag.exec( selector );
if ( ret ) {
if ( jQuery.isPlainObject( context ) ) {
selector = [ document.createElement( ret[1] ) ];
jQuery.fn.attr.call( selector, context, true );
} else {
selector = [ doc.createElement( ret[1] ) ];
}
} else {
ret = jQuery.buildFragment( [ match[1] ], [ doc ] );
selector = ( ret.cacheable ? jQuery.clone(ret.fragment) : ret.fragment ).childNodes;
}
return jQuery.merge( this, selector );
// HANDLE: $("#id")
} else {
elem = document.getElementById( match[2] );
// Check parentNode to catch when Blackberry 4.6 returns
// nodes that are no longer in the document #6963
if ( elem && elem.parentNode ) {
// Handle the case where IE and Opera return items
// by name instead of ID
if ( elem.id !== match[2] ) {
return rootjQuery.find( selector );
}
// Otherwise, we inject the element directly into the jQuery object
this.length = 1;
this[0] = elem;
}
this.context = document;
this.selector = selector;
return this;
}
// HANDLE: $(expr, $(...))
} else if ( !context || context.jquery ) {
return ( context || rootjQuery ).find( selector );
// HANDLE: $(expr, context)
// (which is just equivalent to: $(context).find(expr)
} else {
return this.constructor( context ).find( selector );
}
// HANDLE: $(function)
// Shortcut for document ready
} else if ( jQuery.isFunction( selector ) ) {
return rootjQuery.ready( selector );
}
if ( selector.selector !== undefined ) {
this.selector = selector.selector;
this.context = selector.context;
}
return jQuery.makeArray( selector, this );
},
// Start with an empty selector
selector: "",
// The current version of jQuery being used
jquery: "1.7.1",
// The default length of a jQuery object is 0
length: 0,
// The number of elements contained in the matched element set
size: function() {
return this.length;
},
toArray: function() {
return slice.call( this, 0 );
},
// Get the Nth element in the matched element set OR
// Get the whole matched element set as a clean array
get: function( num ) {
return num == null ?
// Return a 'clean' array
this.toArray() :
// Return just the object
( num < 0 ? this[ this.length + num ] : this[ num ] );
},
// Take an array of elements and push it onto the stack
// (returning the new matched element set)
pushStack: function( elems, name, selector ) {
// Build a new jQuery matched element set
var ret = this.constructor();
if ( jQuery.isArray( elems ) ) {
push.apply( ret, elems );
} else {
jQuery.merge( ret, elems );
}
// Add the old object onto the stack (as a reference)
ret.prevObject = this;
ret.context = this.context;
if ( name === "find" ) {
ret.selector = this.selector + ( this.selector ? " " : "" ) + selector;
} else if ( name ) {
ret.selector = this.selector + "." + name + "(" + selector + ")";
}
// Return the newly-formed element set
return ret;
},
// Execute a callback for every element in the matched set.
// (You can seed the arguments with an array of args, but this is
// only used internally.)
each: function( callback, args ) {
return jQuery.each( this, callback, args );
},
ready: function( fn ) {
// Attach the listeners
jQuery.bindReady();
// Add the callback
readyList.add( fn );
return this;
},
eq: function( i ) {
i = +i;
return i === -1 ?
this.slice( i ) :
this.slice( i, i + 1 );
},
first: function() {
return this.eq( 0 );
},
last: function() {
return this.eq( -1 );
},
slice: function() {
return this.pushStack( slice.apply( this, arguments ),
"slice", slice.call(arguments).join(",") );
},
map: function( callback ) {
return this.pushStack( jQuery.map(this, function( elem, i ) {
return callback.call( elem, i, elem );
}));
},
end: function() {
return this.prevObject || this.constructor(null);
},
// For internal use only.
// Behaves like an Array's method, not like a jQuery method.
push: push,
sort: [].sort,
splice: [].splice
};
// Give the init function the jQuery prototype for later instantiation
jQuery.fn.init.prototype = jQuery.fn;
jQuery.extend = jQuery.fn.extend = function() {
var options, name, src, copy, copyIsArray, clone,
target = arguments[0] || {},
i = 1,
length = arguments.length,
deep = false;
// Handle a deep copy situation
if ( typeof target === "boolean" ) {
deep = target;
target = arguments[1] || {};
// skip the boolean and the target
i = 2;
}
// Handle case when target is a string or something (possible in deep copy)
if ( typeof target !== "object" && !jQuery.isFunction(target) ) {
target = {};
}
// extend jQuery itself if only one argument is passed
if ( length === i ) {
target = this;
--i;
}
for ( ; i < length; i++ ) {
// Only deal with non-null/undefined values
if ( (options = arguments[ i ]) != null ) {
// Extend the base object
for ( name in options ) {
src = target[ name ];
copy = options[ name ];
// Prevent never-ending loop
if ( target === copy ) {
continue;
}
// Recurse if we're merging plain objects or arrays
if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) {
if ( copyIsArray ) {
copyIsArray = false;
clone = src && jQuery.isArray(src) ? src : [];
} else {
clone = src && jQuery.isPlainObject(src) ? src : {};
}
// Never move original objects, clone them
target[ name ] = jQuery.extend( deep, clone, copy );
// Don't bring in undefined values
} else if ( copy !== undefined ) {
target[ name ] = copy;
}
}
}
}
// Return the modified object
return target;
};
jQuery.extend({
noConflict: function( deep ) {
if ( window.$ === jQuery ) {
window.$ = _$;
}
if ( deep && window.jQuery === jQuery ) {
window.jQuery = _jQuery;
}
return jQuery;
},
// Is the DOM ready to be used? Set to true once it occurs.
isReady: false,
// A counter to track how many items to wait for before
// the ready event fires. See #6781
readyWait: 1,
// Hold (or release) the ready event
holdReady: function( hold ) {
if ( hold ) {
jQuery.readyWait++;
} else {
jQuery.ready( true );
}
},
// Handle when the DOM is ready
ready: function( wait ) {
// Either a released hold or an DOMready/load event and not yet ready
if ( (wait === true && !--jQuery.readyWait) || (wait !== true && !jQuery.isReady) ) {
// Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443).
if ( !document.body ) {
return setTimeout( jQuery.ready, 1 );
}
// Remember that the DOM is ready
jQuery.isReady = true;
// If a normal DOM Ready event fired, decrement, and wait if need be
if ( wait !== true && --jQuery.readyWait > 0 ) {
return;
}
// If there are functions bound, to execute
readyList.fireWith( document, [ jQuery ] );
// Trigger any bound ready events
if ( jQuery.fn.trigger ) {
jQuery( document ).trigger( "ready" ).off( "ready" );
}
}
},
bindReady: function() {
if ( readyList ) {
return;
}
readyList = jQuery.Callbacks( "once memory" );
// Catch cases where $(document).ready() is called after the
// browser event has already occurred.
if ( document.readyState === "complete" ) {
// Handle it asynchronously to allow scripts the opportunity to delay ready
return setTimeout( jQuery.ready, 1 );
}
// Mozilla, Opera and webkit nightlies currently support this event
if ( document.addEventListener ) {
// Use the handy event callback
document.addEventListener( "DOMContentLoaded", DOMContentLoaded, false );
// A fallback to window.onload, that will always work
window.addEventListener( "load", jQuery.ready, false );
// If IE event model is used
} else if ( document.attachEvent ) {
// ensure firing before onload,
// maybe late but safe also for iframes
document.attachEvent( "onreadystatechange", DOMContentLoaded );
// A fallback to window.onload, that will always work
window.attachEvent( "onload", jQuery.ready );
// If IE and not a frame
// continually check to see if the document is ready
var toplevel = false;
try {
toplevel = window.frameElement == null;
} catch(e) {}
if ( document.documentElement.doScroll && toplevel ) {
doScrollCheck();
}
}
},
// See test/unit/core.js for details concerning isFunction.
// Since version 1.3, DOM methods and functions like alert
// aren't supported. They return false on IE (#2968).
isFunction: function( obj ) {
return jQuery.type(obj) === "function";
},
isArray: Array.isArray || function( obj ) {
return jQuery.type(obj) === "array";
},
// A crude way of determining if an object is a window
isWindow: function( obj ) {
return obj && typeof obj === "object" && "setInterval" in obj;
},
isNumeric: function( obj ) {
return !isNaN( parseFloat(obj) ) && isFinite( obj );
},
type: function( obj ) {
return obj == null ?
String( obj ) :
class2type[ toString.call(obj) ] || "object";
},
isPlainObject: function( obj ) {
// Must be an Object.
// Because of IE, we also have to check the presence of the constructor property.
// Make sure that DOM nodes and window objects don't pass through, as well
if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) {
return false;
}
try {
// Not own constructor property must be Object
if ( obj.constructor &&
!hasOwn.call(obj, "constructor") &&
!hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) {
return false;
}
} catch ( e ) {
// IE8,9 Will throw exceptions on certain host objects #9897
return false;
}
// Own properties are enumerated firstly, so to speed up,
// if last one is own, then all properties are own.
var key;
for ( key in obj ) {}
return key === undefined || hasOwn.call( obj, key );
},
isEmptyObject: function( obj ) {
for ( var name in obj ) {
return false;
}
return true;
},
error: function( msg ) {
throw new Error( msg );
},
parseJSON: function( data ) {
if ( typeof data !== "string" || !data ) {
return null;
}
// Make sure leading/trailing whitespace is removed (IE can't handle it)
data = jQuery.trim( data );
// Attempt to parse using the native JSON parser first
if ( window.JSON && window.JSON.parse ) {
return window.JSON.parse( data );
}
// Make sure the incoming data is actual JSON
// Logic borrowed from http://json.org/json2.js
if ( rvalidchars.test( data.replace( rvalidescape, "@" )
.replace( rvalidtokens, "]" )
.replace( rvalidbraces, "")) ) {
return ( new Function( "return " + data ) )();
}
jQuery.error( "Invalid JSON: " + data );
},
// Cross-browser xml parsing
parseXML: function( data ) {
var xml, tmp;
try {
if ( window.DOMParser ) { // Standard
tmp = new DOMParser();
xml = tmp.parseFromString( data , "text/xml" );
} else { // IE
xml = new ActiveXObject( "Microsoft.XMLDOM" );
xml.async = "false";
xml.loadXML( data );
}
} catch( e ) {
xml = undefined;
}
if ( !xml || !xml.documentElement || xml.getElementsByTagName( "parsererror" ).length ) {
jQuery.error( "Invalid XML: " + data );
}
return xml;
},
noop: function() {},
// Evaluates a script in a global context
// Workarounds based on findings by Jim Driscoll
// http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context
globalEval: function( data ) {
if ( data && rnotwhite.test( data ) ) {
// We use execScript on Internet Explorer
// We use an anonymous function so that context is window
// rather than jQuery in Firefox
( window.execScript || function( data ) {
window[ "eval" ].call( window, data );
} )( data );
}
},
// Convert dashed to camelCase; used by the css and data modules
// Microsoft forgot to hump their vendor prefix (#9572)
camelCase: function( string ) {
return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase );
},
nodeName: function( elem, name ) {
return elem.nodeName && elem.nodeName.toUpperCase() === name.toUpperCase();
},
// args is for internal usage only
each: function( object, callback, args ) {
var name, i = 0,
length = object.length,
isObj = length === undefined || jQuery.isFunction( object );
if ( args ) {
if ( isObj ) {
for ( name in object ) {
if ( callback.apply( object[ name ], args ) === false ) {
break;
}
}
} else {
for ( ; i < length; ) {
if ( callback.apply( object[ i++ ], args ) === false ) {
break;
}
}
}
// A special, fast, case for the most common use of each
} else {
if ( isObj ) {
for ( name in object ) {
if ( callback.call( object[ name ], name, object[ name ] ) === false ) {
break;
}
}
} else {
for ( ; i < length; ) {
if ( callback.call( object[ i ], i, object[ i++ ] ) === false ) {
break;
}
}
}
}
return object;
},
// Use native String.trim function wherever possible
trim: trim ?
function( text ) {
return text == null ?
"" :
trim.call( text );
} :
// Otherwise use our own trimming functionality
function( text ) {
return text == null ?
"" :
text.toString().replace( trimLeft, "" ).replace( trimRight, "" );
},
// results is for internal usage only
makeArray: function( array, results ) {
var ret = results || [];
if ( array != null ) {
// The window, strings (and functions) also have 'length'
// Tweaked logic slightly to handle Blackberry 4.7 RegExp issues #6930
var type = jQuery.type( array );
if ( array.length == null || type === "string" || type === "function" || type === "regexp" || jQuery.isWindow( array ) ) {
push.call( ret, array );
} else {
jQuery.merge( ret, array );
}
}
return ret;
},
inArray: function( elem, array, i ) {
var len;
if ( array ) {
if ( indexOf ) {
return indexOf.call( array, elem, i );
}
len = array.length;
i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0;
for ( ; i < len; i++ ) {
// Skip accessing in sparse arrays
if ( i in array && array[ i ] === elem ) {
return i;
}
}
}
return -1;
},
merge: function( first, second ) {
var i = first.length,
j = 0;
if ( typeof second.length === "number" ) {
for ( var l = second.length; j < l; j++ ) {
first[ i++ ] = second[ j ];
}
} else {
while ( second[j] !== undefined ) {
first[ i++ ] = second[ j++ ];
}
}
first.length = i;
return first;
},
grep: function( elems, callback, inv ) {
var ret = [], retVal;
inv = !!inv;
// Go through the array, only saving the items
// that pass the validator function
for ( var i = 0, length = elems.length; i < length; i++ ) {
retVal = !!callback( elems[ i ], i );
if ( inv !== retVal ) {
ret.push( elems[ i ] );
}
}
return ret;
},
// arg is for internal usage only
map: function( elems, callback, arg ) {
var value, key, ret = [],
i = 0,
length = elems.length,
// jquery objects are treated as arrays
isArray = elems instanceof jQuery || length !== undefined && typeof length === "number" && ( ( length > 0 && elems[ 0 ] && elems[ length -1 ] ) || length === 0 || jQuery.isArray( elems ) ) ;
// Go through the array, translating each of the items to their
if ( isArray ) {
for ( ; i < length; i++ ) {
value = callback( elems[ i ], i, arg );
if ( value != null ) {
ret[ ret.length ] = value;
}
}
// Go through every key on the object,
} else {
for ( key in elems ) {
value = callback( elems[ key ], key, arg );
if ( value != null ) {
ret[ ret.length ] = value;
}
}
}
// Flatten any nested arrays
return ret.concat.apply( [], ret );
},
// A global GUID counter for objects
guid: 1,
// Bind a function to a context, optionally partially applying any
// arguments.
proxy: function( fn, context ) {
if ( typeof context === "string" ) {
var tmp = fn[ context ];
context = fn;
fn = tmp;
}
// Quick check to determine if target is callable, in the spec
// this throws a TypeError, but we will just return undefined.
if ( !jQuery.isFunction( fn ) ) {
return undefined;
}
// Simulated bind
var args = slice.call( arguments, 2 ),
proxy = function() {
return fn.apply( context, args.concat( slice.call( arguments ) ) );
};
// Set the guid of unique handler to the same of original handler, so it can be removed
proxy.guid = fn.guid = fn.guid || proxy.guid || jQuery.guid++;
return proxy;
},
// Mutifunctional method to get and set values to a collection
// The value/s can optionally be executed if it's a function
access: function( elems, key, value, exec, fn, pass ) {
var length = elems.length;
// Setting many attributes
if ( typeof key === "object" ) {
for ( var k in key ) {
jQuery.access( elems, k, key[k], exec, fn, value );
}
return elems;
}
// Setting one attribute
if ( value !== undefined ) {
// Optionally, function values get executed if exec is true
exec = !pass && exec && jQuery.isFunction(value);
for ( var i = 0; i < length; i++ ) {
fn( elems[i], key, exec ? value.call( elems[i], i, fn( elems[i], key ) ) : value, pass );
}
return elems;
}
// Getting an attribute
return length ? fn( elems[0], key ) : undefined;
},
now: function() {
return ( new Date() ).getTime();
},
// Use of jQuery.browser is frowned upon.
// More details: http://docs.jquery.com/Utilities/jQuery.browser
uaMatch: function( ua ) {
ua = ua.toLowerCase();
var match = rwebkit.exec( ua ) ||
ropera.exec( ua ) ||
rmsie.exec( ua ) ||
ua.indexOf("compatible") < 0 && rmozilla.exec( ua ) ||
[];
return { browser: match[1] || "", version: match[2] || "0" };
},
sub: function() {
function jQuerySub( selector, context ) {
return new jQuerySub.fn.init( selector, context );
}
jQuery.extend( true, jQuerySub, this );
jQuerySub.superclass = this;
jQuerySub.fn = jQuerySub.prototype = this();
jQuerySub.fn.constructor = jQuerySub;
jQuerySub.sub = this.sub;
jQuerySub.fn.init = function init( selector, context ) {
if ( context && context instanceof jQuery && !(context instanceof jQuerySub) ) {
context = jQuerySub( context );
}
return jQuery.fn.init.call( this, selector, context, rootjQuerySub );
};
jQuerySub.fn.init.prototype = jQuerySub.fn;
var rootjQuerySub = jQuerySub(document);
return jQuerySub;
},
browser: {}
});
// Populate the class2type map
jQuery.each("Boolean Number String Function Array Date RegExp Object".split(" "), function(i, name) {
class2type[ "[object " + name + "]" ] = name.toLowerCase();
});
browserMatch = jQuery.uaMatch( userAgent );
if ( browserMatch.browser ) {
jQuery.browser[ browserMatch.browser ] = true;
jQuery.browser.version = browserMatch.version;
}
// Deprecated, use jQuery.browser.webkit instead
if ( jQuery.browser.webkit ) {
jQuery.browser.safari = true;
}
// IE doesn't match non-breaking spaces with \s
if ( rnotwhite.test( "\xA0" ) ) {
trimLeft = /^[\s\xA0]+/;
trimRight = /[\s\xA0]+$/;
}
// All jQuery objects should point back to these
rootjQuery = jQuery(document);
// Cleanup functions for the document ready method
if ( document.addEventListener ) {
DOMContentLoaded = function() {
document.removeEventListener( "DOMContentLoaded", DOMContentLoaded, false );
jQuery.ready();
};
} else if ( document.attachEvent ) {
DOMContentLoaded = function() {
// Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443).
if ( document.readyState === "complete" ) {
document.detachEvent( "onreadystatechange", DOMContentLoaded );
jQuery.ready();
}
};
}
// The DOM ready check for Internet Explorer
function doScrollCheck() {
if ( jQuery.isReady ) {
return;
}
try {
// If IE is used, use the trick by Diego Perini
// http://javascript.nwbox.com/IEContentLoaded/
document.documentElement.doScroll("left");
} catch(e) {
setTimeout( doScrollCheck, 1 );
return;
}
// and execute any waiting functions
jQuery.ready();
}
return jQuery;
})();
// String to Object flags format cache
var flagsCache = {};
// Convert String-formatted flags into Object-formatted ones and store in cache
function createFlags( flags ) {
var object = flagsCache[ flags ] = {},
i, length;
flags = flags.split( /\s+/ );
for ( i = 0, length = flags.length; i < length; i++ ) {
object[ flags[i] ] = true;
}
return object;
}
/*
* Create a callback list using the following parameters:
*
* flags: an optional list of space-separated flags that will change how
* the callback list behaves
*
* By default a callback list will act like an event callback list and can be
* "fired" multiple times.
*
* Possible flags:
*
* once: will ensure the callback list can only be fired once (like a Deferred)
*
* memory: will keep track of previous values and will call any callback added
* after the list has been fired right away with the latest "memorized"
* values (like a Deferred)
*
* unique: will ensure a callback can only be added once (no duplicate in the list)
*
* stopOnFalse: interrupt callings when a callback returns false
*
*/
jQuery.Callbacks = function( flags ) {
// Convert flags from String-formatted to Object-formatted
// (we check in cache first)
flags = flags ? ( flagsCache[ flags ] || createFlags( flags ) ) : {};
var // Actual callback list
list = [],
// Stack of fire calls for repeatable lists
stack = [],
// Last fire value (for non-forgettable lists)
memory,
// Flag to know if list is currently firing
firing,
// First callback to fire (used internally by add and fireWith)
firingStart,
// End of the loop when firing
firingLength,
// Index of currently firing callback (modified by remove if needed)
firingIndex,
// Add one or several callbacks to the list
add = function( args ) {
var i,
length,
elem,
type,
actual;
for ( i = 0, length = args.length; i < length; i++ ) {
elem = args[ i ];
type = jQuery.type( elem );
if ( type === "array" ) {
// Inspect recursively
add( elem );
} else if ( type === "function" ) {
// Add if not in unique mode and callback is not in
if ( !flags.unique || !self.has( elem ) ) {
list.push( elem );
}
}
}
},
// Fire callbacks
fire = function( context, args ) {
args = args || [];
memory = !flags.memory || [ context, args ];
firing = true;
firingIndex = firingStart || 0;
firingStart = 0;
firingLength = list.length;
for ( ; list && firingIndex < firingLength; firingIndex++ ) {
if ( list[ firingIndex ].apply( context, args ) === false && flags.stopOnFalse ) {
memory = true; // Mark as halted
break;
}
}
firing = false;
if ( list ) {
if ( !flags.once ) {
if ( stack && stack.length ) {
memory = stack.shift();
self.fireWith( memory[ 0 ], memory[ 1 ] );
}
} else if ( memory === true ) {
self.disable();
} else {
list = [];
}
}
},
// Actual Callbacks object
self = {
// Add a callback or a collection of callbacks to the list
add: function() {
if ( list ) {
var length = list.length;
add( arguments );
// Do we need to add the callbacks to the
// current firing batch?
if ( firing ) {
firingLength = list.length;
// With memory, if we're not firing then
// we should call right away, unless previous
// firing was halted (stopOnFalse)
} else if ( memory && memory !== true ) {
firingStart = length;
fire( memory[ 0 ], memory[ 1 ] );
}
}
return this;
},
// Remove a callback from the list
remove: function() {
if ( list ) {
var args = arguments,
argIndex = 0,
argLength = args.length;
for ( ; argIndex < argLength ; argIndex++ ) {
for ( var i = 0; i < list.length; i++ ) {
if ( args[ argIndex ] === list[ i ] ) {
// Handle firingIndex and firingLength
if ( firing ) {
if ( i <= firingLength ) {
firingLength--;
if ( i <= firingIndex ) {
firingIndex--;
}
}
}
// Remove the element
list.splice( i--, 1 );
// If we have some unicity property then
// we only need to do this once
if ( flags.unique ) {
break;
}
}
}
}
}
return this;
},
// Control if a given callback is in the list
has: function( fn ) {
if ( list ) {
var i = 0,
length = list.length;
for ( ; i < length; i++ ) {
if ( fn === list[ i ] ) {
return true;
}
}
}
return false;
},
// Remove all callbacks from the list
empty: function() {
list = [];
return this;
},
// Have the list do nothing anymore
disable: function() {
list = stack = memory = undefined;
return this;
},
// Is it disabled?
disabled: function() {
return !list;
},
// Lock the list in its current state
lock: function() {
stack = undefined;
if ( !memory || memory === true ) {
self.disable();
}
return this;
},
// Is it locked?
locked: function() {
return !stack;
},
// Call all callbacks with the given context and arguments
fireWith: function( context, args ) {
if ( stack ) {
if ( firing ) {
if ( !flags.once ) {
stack.push( [ context, args ] );
}
} else if ( !( flags.once && memory ) ) {
fire( context, args );
}
}
return this;
},
// Call all the callbacks with the given arguments
fire: function() {
self.fireWith( this, arguments );
return this;
},
// To know if the callbacks have already been called at least once
fired: function() {
return !!memory;
}
};
return self;
};
var // Static reference to slice
sliceDeferred = [].slice;
jQuery.extend({
Deferred: function( func ) {
var doneList = jQuery.Callbacks( "once memory" ),
failList = jQuery.Callbacks( "once memory" ),
progressList = jQuery.Callbacks( "memory" ),
state = "pending",
lists = {
resolve: doneList,
reject: failList,
notify: progressList
},
promise = {
done: doneList.add,
fail: failList.add,
progress: progressList.add,
state: function() {
return state;
},
// Deprecated
isResolved: doneList.fired,
isRejected: failList.fired,
then: function( doneCallbacks, failCallbacks, progressCallbacks ) {
deferred.done( doneCallbacks ).fail( failCallbacks ).progress( progressCallbacks );
return this;
},
always: function() {
deferred.done.apply( deferred, arguments ).fail.apply( deferred, arguments );
return this;
},
pipe: function( fnDone, fnFail, fnProgress ) {
return jQuery.Deferred(function( newDefer ) {
jQuery.each( {
done: [ fnDone, "resolve" ],
fail: [ fnFail, "reject" ],
progress: [ fnProgress, "notify" ]
}, function( handler, data ) {
var fn = data[ 0 ],
action = data[ 1 ],
returned;
if ( jQuery.isFunction( fn ) ) {
deferred[ handler ](function() {
returned = fn.apply( this, arguments );
if ( returned && jQuery.isFunction( returned.promise ) ) {
returned.promise().then( newDefer.resolve, newDefer.reject, newDefer.notify );
} else {
newDefer[ action + "With" ]( this === deferred ? newDefer : this, [ returned ] );
}
});
} else {
deferred[ handler ]( newDefer[ action ] );
}
});
}).promise();
},
// Get a promise for this deferred
// If obj is provided, the promise aspect is added to the object
promise: function( obj ) {
if ( obj == null ) {
obj = promise;
} else {
for ( var key in promise ) {
obj[ key ] = promise[ key ];
}
}
return obj;
}
},
deferred = promise.promise({}),
key;
for ( key in lists ) {
deferred[ key ] = lists[ key ].fire;
deferred[ key + "With" ] = lists[ key ].fireWith;
}
// Handle state
deferred.done( function() {
state = "resolved";
}, failList.disable, progressList.lock ).fail( function() {
state = "rejected";
}, doneList.disable, progressList.lock );
// Call given func if any
if ( func ) {
func.call( deferred, deferred );
}
// All done!
return deferred;
},
// Deferred helper
when: function( firstParam ) {
var args = sliceDeferred.call( arguments, 0 ),
i = 0,
length = args.length,
pValues = new Array( length ),
count = length,
pCount = length,
deferred = length <= 1 && firstParam && jQuery.isFunction( firstParam.promise ) ?
firstParam :
jQuery.Deferred(),
promise = deferred.promise();
function resolveFunc( i ) {
return function( value ) {
args[ i ] = arguments.length > 1 ? sliceDeferred.call( arguments, 0 ) : value;
if ( !( --count ) ) {
deferred.resolveWith( deferred, args );
}
};
}
function progressFunc( i ) {
return function( value ) {
pValues[ i ] = arguments.length > 1 ? sliceDeferred.call( arguments, 0 ) : value;
deferred.notifyWith( promise, pValues );
};
}
if ( length > 1 ) {
for ( ; i < length; i++ ) {
if ( args[ i ] && args[ i ].promise && jQuery.isFunction( args[ i ].promise ) ) {
args[ i ].promise().then( resolveFunc(i), deferred.reject, progressFunc(i) );
} else {
--count;
}
}
if ( !count ) {
deferred.resolveWith( deferred, args );
}
} else if ( deferred !== firstParam ) {
deferred.resolveWith( deferred, length ? [ firstParam ] : [] );
}
return promise;
}
});
jQuery.support = (function() {
var support,
all,
a,
select,
opt,
input,
marginDiv,
fragment,
tds,
events,
eventName,
i,
isSupported,
div = document.createElement( "div" ),
documentElement = document.documentElement;
// Preliminary tests
div.setAttribute("className", "t");
div.innerHTML = " a ";
all = div.getElementsByTagName( "*" );
a = div.getElementsByTagName( "a" )[ 0 ];
// Can't get basic test support
if ( !all || !all.length || !a ) {
return {};
}
// First batch of supports tests
select = document.createElement( "select" );
opt = select.appendChild( document.createElement("option") );
input = div.getElementsByTagName( "input" )[ 0 ];
support = {
// IE strips leading whitespace when .innerHTML is used
leadingWhitespace: ( div.firstChild.nodeType === 3 ),
// Make sure that tbody elements aren't automatically inserted
// IE will insert them into empty tables
tbody: !div.getElementsByTagName("tbody").length,
// Make sure that link elements get serialized correctly by innerHTML
// This requires a wrapper element in IE
htmlSerialize: !!div.getElementsByTagName("link").length,
// Get the style information from getAttribute
// (IE uses .cssText instead)
style: /top/.test( a.getAttribute("style") ),
// Make sure that URLs aren't manipulated
// (IE normalizes it by default)
hrefNormalized: ( a.getAttribute("href") === "/a" ),
// Make sure that element opacity exists
// (IE uses filter instead)
// Use a regex to work around a WebKit issue. See #5145
opacity: /^0.55/.test( a.style.opacity ),
// Verify style float existence
// (IE uses styleFloat instead of cssFloat)
cssFloat: !!a.style.cssFloat,
// Make sure that if no value is specified for a checkbox
// that it defaults to "on".
// (WebKit defaults to "" instead)
checkOn: ( input.value === "on" ),
// Make sure that a selected-by-default option has a working selected property.
// (WebKit defaults to false instead of true, IE too, if it's in an optgroup)
optSelected: opt.selected,
// Test setAttribute on camelCase class. If it works, we need attrFixes when doing get/setAttribute (ie6/7)
getSetAttribute: div.className !== "t",
// Tests for enctype support on a form(#6743)
enctype: !!document.createElement("form").enctype,
// Makes sure cloning an html5 element does not cause problems
// Where outerHTML is undefined, this still works
html5Clone: document.createElement("nav").cloneNode( true ).outerHTML !== "<:nav>",
// Will be defined later
submitBubbles: true,
changeBubbles: true,
focusinBubbles: false,
deleteExpando: true,
noCloneEvent: true,
inlineBlockNeedsLayout: false,
shrinkWrapBlocks: false,
reliableMarginRight: true
};
// Make sure checked status is properly cloned
input.checked = true;
support.noCloneChecked = input.cloneNode( true ).checked;
// Make sure that the options inside disabled selects aren't marked as disabled
// (WebKit marks them as disabled)
select.disabled = true;
support.optDisabled = !opt.disabled;
// Test to see if it's possible to delete an expando from an element
// Fails in Internet Explorer
try {
delete div.test;
} catch( e ) {
support.deleteExpando = false;
}
if ( !div.addEventListener && div.attachEvent && div.fireEvent ) {
div.attachEvent( "onclick", function() {
// Cloning a node shouldn't copy over any
// bound event handlers (IE does this)
support.noCloneEvent = false;
});
div.cloneNode( true ).fireEvent( "onclick" );
}
// Check if a radio maintains its value
// after being appended to the DOM
input = document.createElement("input");
input.value = "t";
input.setAttribute("type", "radio");
support.radioValue = input.value === "t";
input.setAttribute("checked", "checked");
div.appendChild( input );
fragment = document.createDocumentFragment();
fragment.appendChild( div.lastChild );
// WebKit doesn't clone checked state correctly in fragments
support.checkClone = fragment.cloneNode( true ).cloneNode( true ).lastChild.checked;
// Check if a disconnected checkbox will retain its checked
// value of true after appended to the DOM (IE6/7)
support.appendChecked = input.checked;
fragment.removeChild( input );
fragment.appendChild( div );
div.innerHTML = "";
// Check if div with explicit width and no margin-right incorrectly
// gets computed margin-right based on width of container. For more
// info see bug #3333
// Fails in WebKit before Feb 2011 nightlies
// WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right
if ( window.getComputedStyle ) {
marginDiv = document.createElement( "div" );
marginDiv.style.width = "0";
marginDiv.style.marginRight = "0";
div.style.width = "2px";
div.appendChild( marginDiv );
support.reliableMarginRight =
( parseInt( ( window.getComputedStyle( marginDiv, null ) || { marginRight: 0 } ).marginRight, 10 ) || 0 ) === 0;
}
// Technique from Juriy Zaytsev
// http://perfectionkills.com/detecting-event-support-without-browser-sniffing/
// We only care about the case where non-standard event systems
// are used, namely in IE. Short-circuiting here helps us to
// avoid an eval call (in setAttribute) which can cause CSP
// to go haywire. See: https://developer.mozilla.org/en/Security/CSP
if ( div.attachEvent ) {
for( i in {
submit: 1,
change: 1,
focusin: 1
}) {
eventName = "on" + i;
isSupported = ( eventName in div );
if ( !isSupported ) {
div.setAttribute( eventName, "return;" );
isSupported = ( typeof div[ eventName ] === "function" );
}
support[ i + "Bubbles" ] = isSupported;
}
}
fragment.removeChild( div );
// Null elements to avoid leaks in IE
fragment = select = opt = marginDiv = div = input = null;
// Run tests that need a body at doc ready
jQuery(function() {
var container, outer, inner, table, td, offsetSupport,
conMarginTop, ptlm, vb, style, html,
body = document.getElementsByTagName("body")[0];
if ( !body ) {
// Return for frameset docs that don't have a body
return;
}
conMarginTop = 1;
ptlm = "position:absolute;top:0;left:0;width:1px;height:1px;margin:0;";
vb = "visibility:hidden;border:0;";
style = "style='" + ptlm + "border:5px solid #000;padding:0;'";
html = "" +
"";
container = document.createElement("div");
container.style.cssText = vb + "width:0;height:0;position:static;top:0;margin-top:" + conMarginTop + "px";
body.insertBefore( container, body.firstChild );
// Construct the test element
div = document.createElement("div");
container.appendChild( div );
// Check if table cells still have offsetWidth/Height when they are set
// to display:none and there are still other visible table cells in a
// table row; if so, offsetWidth/Height are not reliable for use when
// determining if an element has been hidden directly using
// display:none (it is still safe to use offsets if a parent element is
// hidden; don safety goggles and see bug #4512 for more information).
// (only IE 8 fails this test)
div.innerHTML = "";
tds = div.getElementsByTagName( "td" );
isSupported = ( tds[ 0 ].offsetHeight === 0 );
tds[ 0 ].style.display = "";
tds[ 1 ].style.display = "none";
// Check if empty table cells still have offsetWidth/Height
// (IE <= 8 fail this test)
support.reliableHiddenOffsets = isSupported && ( tds[ 0 ].offsetHeight === 0 );
// Figure out if the W3C box model works as expected
div.innerHTML = "";
div.style.width = div.style.paddingLeft = "1px";
jQuery.boxModel = support.boxModel = div.offsetWidth === 2;
if ( typeof div.style.zoom !== "undefined" ) {
// Check if natively block-level elements act like inline-block
// elements when setting their display to 'inline' and giving
// them layout
// (IE < 8 does this)
div.style.display = "inline";
div.style.zoom = 1;
support.inlineBlockNeedsLayout = ( div.offsetWidth === 2 );
// Check if elements with layout shrink-wrap their children
// (IE 6 does this)
div.style.display = "";
div.innerHTML = "
";
support.shrinkWrapBlocks = ( div.offsetWidth !== 2 );
}
div.style.cssText = ptlm + vb;
div.innerHTML = html;
outer = div.firstChild;
inner = outer.firstChild;
td = outer.nextSibling.firstChild.firstChild;
offsetSupport = {
doesNotAddBorder: ( inner.offsetTop !== 5 ),
doesAddBorderForTableAndCells: ( td.offsetTop === 5 )
};
inner.style.position = "fixed";
inner.style.top = "20px";
// safari subtracts parent border width here which is 5px
offsetSupport.fixedPosition = ( inner.offsetTop === 20 || inner.offsetTop === 15 );
inner.style.position = inner.style.top = "";
outer.style.overflow = "hidden";
outer.style.position = "relative";
offsetSupport.subtractsBorderForOverflowNotVisible = ( inner.offsetTop === -5 );
offsetSupport.doesNotIncludeMarginInBodyOffset = ( body.offsetTop !== conMarginTop );
body.removeChild( container );
div = container = null;
jQuery.extend( support, offsetSupport );
});
return support;
})();
var rbrace = /^(?:\{.*\}|\[.*\])$/,
rmultiDash = /([A-Z])/g;
jQuery.extend({
cache: {},
// Please use with caution
uuid: 0,
// Unique for each copy of jQuery on the page
// Non-digits removed to match rinlinejQuery
expando: "jQuery" + ( jQuery.fn.jquery + Math.random() ).replace( /\D/g, "" ),
// The following elements throw uncatchable exceptions if you
// attempt to add expando properties to them.
noData: {
"embed": true,
// Ban all objects except for Flash (which handle expandos)
"object": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",
"applet": true
},
hasData: function( elem ) {
elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ];
return !!elem && !isEmptyDataObject( elem );
},
data: function( elem, name, data, pvt /* Internal Use Only */ ) {
if ( !jQuery.acceptData( elem ) ) {
return;
}
var privateCache, thisCache, ret,
internalKey = jQuery.expando,
getByName = typeof name === "string",
// We have to handle DOM nodes and JS objects differently because IE6-7
// can't GC object references properly across the DOM-JS boundary
isNode = elem.nodeType,
// Only DOM nodes need the global jQuery cache; JS object data is
// attached directly to the object so GC can occur automatically
cache = isNode ? jQuery.cache : elem,
// Only defining an ID for JS objects if its cache already exists allows
// the code to shortcut on the same path as a DOM node with no cache
id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey,
isEvents = name === "events";
// Avoid doing any more work than we need to when trying to get data on an
// object that has no data at all
if ( (!id || !cache[id] || (!isEvents && !pvt && !cache[id].data)) && getByName && data === undefined ) {
return;
}
if ( !id ) {
// Only DOM nodes need a new unique ID for each element since their data
// ends up in the global cache
if ( isNode ) {
elem[ internalKey ] = id = ++jQuery.uuid;
} else {
id = internalKey;
}
}
if ( !cache[ id ] ) {
cache[ id ] = {};
// Avoids exposing jQuery metadata on plain JS objects when the object
// is serialized using JSON.stringify
if ( !isNode ) {
cache[ id ].toJSON = jQuery.noop;
}
}
// An object can be passed to jQuery.data instead of a key/value pair; this gets
// shallow copied over onto the existing cache
if ( typeof name === "object" || typeof name === "function" ) {
if ( pvt ) {
cache[ id ] = jQuery.extend( cache[ id ], name );
} else {
cache[ id ].data = jQuery.extend( cache[ id ].data, name );
}
}
privateCache = thisCache = cache[ id ];
// jQuery data() is stored in a separate object inside the object's internal data
// cache in order to avoid key collisions between internal data and user-defined
// data.
if ( !pvt ) {
if ( !thisCache.data ) {
thisCache.data = {};
}
thisCache = thisCache.data;
}
if ( data !== undefined ) {
thisCache[ jQuery.camelCase( name ) ] = data;
}
// Users should not attempt to inspect the internal events object using jQuery.data,
// it is undocumented and subject to change. But does anyone listen? No.
if ( isEvents && !thisCache[ name ] ) {
return privateCache.events;
}
// Check for both converted-to-camel and non-converted data property names
// If a data property was specified
if ( getByName ) {
// First Try to find as-is property data
ret = thisCache[ name ];
// Test for null|undefined property data
if ( ret == null ) {
// Try to find the camelCased property
ret = thisCache[ jQuery.camelCase( name ) ];
}
} else {
ret = thisCache;
}
return ret;
},
removeData: function( elem, name, pvt /* Internal Use Only */ ) {
if ( !jQuery.acceptData( elem ) ) {
return;
}
var thisCache, i, l,
// Reference to internal data cache key
internalKey = jQuery.expando,
isNode = elem.nodeType,
// See jQuery.data for more information
cache = isNode ? jQuery.cache : elem,
// See jQuery.data for more information
id = isNode ? elem[ internalKey ] : internalKey;
// If there is already no cache entry for this object, there is no
// purpose in continuing
if ( !cache[ id ] ) {
return;
}
if ( name ) {
thisCache = pvt ? cache[ id ] : cache[ id ].data;
if ( thisCache ) {
// Support array or space separated string names for data keys
if ( !jQuery.isArray( name ) ) {
// try the string as a key before any manipulation
if ( name in thisCache ) {
name = [ name ];
} else {
// split the camel cased version by spaces unless a key with the spaces exists
name = jQuery.camelCase( name );
if ( name in thisCache ) {
name = [ name ];
} else {
name = name.split( " " );
}
}
}
for ( i = 0, l = name.length; i < l; i++ ) {
delete thisCache[ name[i] ];
}
// If there is no data left in the cache, we want to continue
// and let the cache object itself get destroyed
if ( !( pvt ? isEmptyDataObject : jQuery.isEmptyObject )( thisCache ) ) {
return;
}
}
}
// See jQuery.data for more information
if ( !pvt ) {
delete cache[ id ].data;
// Don't destroy the parent cache unless the internal data object
// had been the only thing left in it
if ( !isEmptyDataObject(cache[ id ]) ) {
return;
}
}
// Browsers that fail expando deletion also refuse to delete expandos on
// the window, but it will allow it on all other JS objects; other browsers
// don't care
// Ensure that `cache` is not a window object #10080
if ( jQuery.support.deleteExpando || !cache.setInterval ) {
delete cache[ id ];
} else {
cache[ id ] = null;
}
// We destroyed the cache and need to eliminate the expando on the node to avoid
// false lookups in the cache for entries that no longer exist
if ( isNode ) {
// IE does not allow us to delete expando properties from nodes,
// nor does it have a removeAttribute function on Document nodes;
// we must handle all of these cases
if ( jQuery.support.deleteExpando ) {
delete elem[ internalKey ];
} else if ( elem.removeAttribute ) {
elem.removeAttribute( internalKey );
} else {
elem[ internalKey ] = null;
}
}
},
// For internal use only.
_data: function( elem, name, data ) {
return jQuery.data( elem, name, data, true );
},
// A method for determining if a DOM node can handle the data expando
acceptData: function( elem ) {
if ( elem.nodeName ) {
var match = jQuery.noData[ elem.nodeName.toLowerCase() ];
if ( match ) {
return !(match === true || elem.getAttribute("classid") !== match);
}
}
return true;
}
});
jQuery.fn.extend({
data: function( key, value ) {
var parts, attr, name,
data = null;
if ( typeof key === "undefined" ) {
if ( this.length ) {
data = jQuery.data( this[0] );
if ( this[0].nodeType === 1 && !jQuery._data( this[0], "parsedAttrs" ) ) {
attr = this[0].attributes;
for ( var i = 0, l = attr.length; i < l; i++ ) {
name = attr[i].name;
if ( name.indexOf( "data-" ) === 0 ) {
name = jQuery.camelCase( name.substring(5) );
dataAttr( this[0], name, data[ name ] );
}
}
jQuery._data( this[0], "parsedAttrs", true );
}
}
return data;
} else if ( typeof key === "object" ) {
return this.each(function() {
jQuery.data( this, key );
});
}
parts = key.split(".");
parts[1] = parts[1] ? "." + parts[1] : "";
if ( value === undefined ) {
data = this.triggerHandler("getData" + parts[1] + "!", [parts[0]]);
// Try to fetch any internally stored data first
if ( data === undefined && this.length ) {
data = jQuery.data( this[0], key );
data = dataAttr( this[0], key, data );
}
return data === undefined && parts[1] ?
this.data( parts[0] ) :
data;
} else {
return this.each(function() {
var self = jQuery( this ),
args = [ parts[0], value ];
self.triggerHandler( "setData" + parts[1] + "!", args );
jQuery.data( this, key, value );
self.triggerHandler( "changeData" + parts[1] + "!", args );
});
}
},
removeData: function( key ) {
return this.each(function() {
jQuery.removeData( this, key );
});
}
});
function dataAttr( elem, key, data ) {
// If nothing was found internally, try to fetch any
// data from the HTML5 data-* attribute
if ( data === undefined && elem.nodeType === 1 ) {
var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase();
data = elem.getAttribute( name );
if ( typeof data === "string" ) {
try {
data = data === "true" ? true :
data === "false" ? false :
data === "null" ? null :
jQuery.isNumeric( data ) ? parseFloat( data ) :
rbrace.test( data ) ? jQuery.parseJSON( data ) :
data;
} catch( e ) {}
// Make sure we set the data so it isn't changed later
jQuery.data( elem, key, data );
} else {
data = undefined;
}
}
return data;
}
// checks a cache object for emptiness
function isEmptyDataObject( obj ) {
for ( var name in obj ) {
// if the public data object is empty, the private is still empty
if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) {
continue;
}
if ( name !== "toJSON" ) {
return false;
}
}
return true;
}
function handleQueueMarkDefer( elem, type, src ) {
var deferDataKey = type + "defer",
queueDataKey = type + "queue",
markDataKey = type + "mark",
defer = jQuery._data( elem, deferDataKey );
if ( defer &&
( src === "queue" || !jQuery._data(elem, queueDataKey) ) &&
( src === "mark" || !jQuery._data(elem, markDataKey) ) ) {
// Give room for hard-coded callbacks to fire first
// and eventually mark/queue something else on the element
setTimeout( function() {
if ( !jQuery._data( elem, queueDataKey ) &&
!jQuery._data( elem, markDataKey ) ) {
jQuery.removeData( elem, deferDataKey, true );
defer.fire();
}
}, 0 );
}
}
jQuery.extend({
_mark: function( elem, type ) {
if ( elem ) {
type = ( type || "fx" ) + "mark";
jQuery._data( elem, type, (jQuery._data( elem, type ) || 0) + 1 );
}
},
_unmark: function( force, elem, type ) {
if ( force !== true ) {
type = elem;
elem = force;
force = false;
}
if ( elem ) {
type = type || "fx";
var key = type + "mark",
count = force ? 0 : ( (jQuery._data( elem, key ) || 1) - 1 );
if ( count ) {
jQuery._data( elem, key, count );
} else {
jQuery.removeData( elem, key, true );
handleQueueMarkDefer( elem, type, "mark" );
}
}
},
queue: function( elem, type, data ) {
var q;
if ( elem ) {
type = ( type || "fx" ) + "queue";
q = jQuery._data( elem, type );
// Speed up dequeue by getting out quickly if this is just a lookup
if ( data ) {
if ( !q || jQuery.isArray(data) ) {
q = jQuery._data( elem, type, jQuery.makeArray(data) );
} else {
q.push( data );
}
}
return q || [];
}
},
dequeue: function( elem, type ) {
type = type || "fx";
var queue = jQuery.queue( elem, type ),
fn = queue.shift(),
hooks = {};
// If the fx queue is dequeued, always remove the progress sentinel
if ( fn === "inprogress" ) {
fn = queue.shift();
}
if ( fn ) {
// Add a progress sentinel to prevent the fx queue from being
// automatically dequeued
if ( type === "fx" ) {
queue.unshift( "inprogress" );
}
jQuery._data( elem, type + ".run", hooks );
fn.call( elem, function() {
jQuery.dequeue( elem, type );
}, hooks );
}
if ( !queue.length ) {
jQuery.removeData( elem, type + "queue " + type + ".run", true );
handleQueueMarkDefer( elem, type, "queue" );
}
}
});
jQuery.fn.extend({
queue: function( type, data ) {
if ( typeof type !== "string" ) {
data = type;
type = "fx";
}
if ( data === undefined ) {
return jQuery.queue( this[0], type );
}
return this.each(function() {
var queue = jQuery.queue( this, type, data );
if ( type === "fx" && queue[0] !== "inprogress" ) {
jQuery.dequeue( this, type );
}
});
},
dequeue: function( type ) {
return this.each(function() {
jQuery.dequeue( this, type );
});
},
// Based off of the plugin by Clint Helfers, with permission.
// http://blindsignals.com/index.php/2009/07/jquery-delay/
delay: function( time, type ) {
time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time;
type = type || "fx";
return this.queue( type, function( next, hooks ) {
var timeout = setTimeout( next, time );
hooks.stop = function() {
clearTimeout( timeout );
};
});
},
clearQueue: function( type ) {
return this.queue( type || "fx", [] );
},
// Get a promise resolved when queues of a certain type
// are emptied (fx is the type by default)
promise: function( type, object ) {
if ( typeof type !== "string" ) {
object = type;
type = undefined;
}
type = type || "fx";
var defer = jQuery.Deferred(),
elements = this,
i = elements.length,
count = 1,
deferDataKey = type + "defer",
queueDataKey = type + "queue",
markDataKey = type + "mark",
tmp;
function resolve() {
if ( !( --count ) ) {
defer.resolveWith( elements, [ elements ] );
}
}
while( i-- ) {
if (( tmp = jQuery.data( elements[ i ], deferDataKey, undefined, true ) ||
( jQuery.data( elements[ i ], queueDataKey, undefined, true ) ||
jQuery.data( elements[ i ], markDataKey, undefined, true ) ) &&
jQuery.data( elements[ i ], deferDataKey, jQuery.Callbacks( "once memory" ), true ) )) {
count++;
tmp.add( resolve );
}
}
resolve();
return defer.promise();
}
});
var rclass = /[\n\t\r]/g,
rspace = /\s+/,
rreturn = /\r/g,
rtype = /^(?:button|input)$/i,
rfocusable = /^(?:button|input|object|select|textarea)$/i,
rclickable = /^a(?:rea)?$/i,
rboolean = /^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i,
getSetAttribute = jQuery.support.getSetAttribute,
nodeHook, boolHook, fixSpecified;
jQuery.fn.extend({
attr: function( name, value ) {
return jQuery.access( this, name, value, true, jQuery.attr );
},
removeAttr: function( name ) {
return this.each(function() {
jQuery.removeAttr( this, name );
});
},
prop: function( name, value ) {
return jQuery.access( this, name, value, true, jQuery.prop );
},
removeProp: function( name ) {
name = jQuery.propFix[ name ] || name;
return this.each(function() {
// try/catch handles cases where IE balks (such as removing a property on window)
try {
this[ name ] = undefined;
delete this[ name ];
} catch( e ) {}
});
},
addClass: function( value ) {
var classNames, i, l, elem,
setClass, c, cl;
if ( jQuery.isFunction( value ) ) {
return this.each(function( j ) {
jQuery( this ).addClass( value.call(this, j, this.className) );
});
}
if ( value && typeof value === "string" ) {
classNames = value.split( rspace );
for ( i = 0, l = this.length; i < l; i++ ) {
elem = this[ i ];
if ( elem.nodeType === 1 ) {
if ( !elem.className && classNames.length === 1 ) {
elem.className = value;
} else {
setClass = " " + elem.className + " ";
for ( c = 0, cl = classNames.length; c < cl; c++ ) {
if ( !~setClass.indexOf( " " + classNames[ c ] + " " ) ) {
setClass += classNames[ c ] + " ";
}
}
elem.className = jQuery.trim( setClass );
}
}
}
}
return this;
},
removeClass: function( value ) {
var classNames, i, l, elem, className, c, cl;
if ( jQuery.isFunction( value ) ) {
return this.each(function( j ) {
jQuery( this ).removeClass( value.call(this, j, this.className) );
});
}
if ( (value && typeof value === "string") || value === undefined ) {
classNames = ( value || "" ).split( rspace );
for ( i = 0, l = this.length; i < l; i++ ) {
elem = this[ i ];
if ( elem.nodeType === 1 && elem.className ) {
if ( value ) {
className = (" " + elem.className + " ").replace( rclass, " " );
for ( c = 0, cl = classNames.length; c < cl; c++ ) {
className = className.replace(" " + classNames[ c ] + " ", " ");
}
elem.className = jQuery.trim( className );
} else {
elem.className = "";
}
}
}
}
return this;
},
toggleClass: function( value, stateVal ) {
var type = typeof value,
isBool = typeof stateVal === "boolean";
if ( jQuery.isFunction( value ) ) {
return this.each(function( i ) {
jQuery( this ).toggleClass( value.call(this, i, this.className, stateVal), stateVal );
});
}
return this.each(function() {
if ( type === "string" ) {
// toggle individual class names
var className,
i = 0,
self = jQuery( this ),
state = stateVal,
classNames = value.split( rspace );
while ( (className = classNames[ i++ ]) ) {
// check each className given, space separated list
state = isBool ? state : !self.hasClass( className );
self[ state ? "addClass" : "removeClass" ]( className );
}
} else if ( type === "undefined" || type === "boolean" ) {
if ( this.className ) {
// store className if set
jQuery._data( this, "__className__", this.className );
}
// toggle whole className
this.className = this.className || value === false ? "" : jQuery._data( this, "__className__" ) || "";
}
});
},
hasClass: function( selector ) {
var className = " " + selector + " ",
i = 0,
l = this.length;
for ( ; i < l; i++ ) {
if ( this[i].nodeType === 1 && (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) > -1 ) {
return true;
}
}
return false;
},
val: function( value ) {
var hooks, ret, isFunction,
elem = this[0];
if ( !arguments.length ) {
if ( elem ) {
hooks = jQuery.valHooks[ elem.nodeName.toLowerCase() ] || jQuery.valHooks[ elem.type ];
if ( hooks && "get" in hooks && (ret = hooks.get( elem, "value" )) !== undefined ) {
return ret;
}
ret = elem.value;
return typeof ret === "string" ?
// handle most common string cases
ret.replace(rreturn, "") :
// handle cases where value is null/undef or number
ret == null ? "" : ret;
}
return;
}
isFunction = jQuery.isFunction( value );
return this.each(function( i ) {
var self = jQuery(this), val;
if ( this.nodeType !== 1 ) {
return;
}
if ( isFunction ) {
val = value.call( this, i, self.val() );
} else {
val = value;
}
// Treat null/undefined as ""; convert numbers to string
if ( val == null ) {
val = "";
} else if ( typeof val === "number" ) {
val += "";
} else if ( jQuery.isArray( val ) ) {
val = jQuery.map(val, function ( value ) {
return value == null ? "" : value + "";
});
}
hooks = jQuery.valHooks[ this.nodeName.toLowerCase() ] || jQuery.valHooks[ this.type ];
// If set returns undefined, fall back to normal setting
if ( !hooks || !("set" in hooks) || hooks.set( this, val, "value" ) === undefined ) {
this.value = val;
}
});
}
});
jQuery.extend({
valHooks: {
option: {
get: function( elem ) {
// attributes.value is undefined in Blackberry 4.7 but
// uses .value. See #6932
var val = elem.attributes.value;
return !val || val.specified ? elem.value : elem.text;
}
},
select: {
get: function( elem ) {
var value, i, max, option,
index = elem.selectedIndex,
values = [],
options = elem.options,
one = elem.type === "select-one";
// Nothing was selected
if ( index < 0 ) {
return null;
}
// Loop through all the selected options
i = one ? index : 0;
max = one ? index + 1 : options.length;
for ( ; i < max; i++ ) {
option = options[ i ];
// Don't return options that are disabled or in a disabled optgroup
if ( option.selected && (jQuery.support.optDisabled ? !option.disabled : option.getAttribute("disabled") === null) &&
(!option.parentNode.disabled || !jQuery.nodeName( option.parentNode, "optgroup" )) ) {
// Get the specific value for the option
value = jQuery( option ).val();
// We don't need an array for one selects
if ( one ) {
return value;
}
// Multi-Selects return an array
values.push( value );
}
}
// Fixes Bug #2551 -- select.val() broken in IE after form.reset()
if ( one && !values.length && options.length ) {
return jQuery( options[ index ] ).val();
}
return values;
},
set: function( elem, value ) {
var values = jQuery.makeArray( value );
jQuery(elem).find("option").each(function() {
this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0;
});
if ( !values.length ) {
elem.selectedIndex = -1;
}
return values;
}
}
},
attrFn: {
val: true,
css: true,
html: true,
text: true,
data: true,
width: true,
height: true,
offset: true
},
attr: function( elem, name, value, pass ) {
var ret, hooks, notxml,
nType = elem.nodeType;
// don't get/set attributes on text, comment and attribute nodes
if ( !elem || nType === 3 || nType === 8 || nType === 2 ) {
return;
}
if ( pass && name in jQuery.attrFn ) {
return jQuery( elem )[ name ]( value );
}
// Fallback to prop when attributes are not supported
if ( typeof elem.getAttribute === "undefined" ) {
return jQuery.prop( elem, name, value );
}
notxml = nType !== 1 || !jQuery.isXMLDoc( elem );
// All attributes are lowercase
// Grab necessary hook if one is defined
if ( notxml ) {
name = name.toLowerCase();
hooks = jQuery.attrHooks[ name ] || ( rboolean.test( name ) ? boolHook : nodeHook );
}
if ( value !== undefined ) {
if ( value === null ) {
jQuery.removeAttr( elem, name );
return;
} else if ( hooks && "set" in hooks && notxml && (ret = hooks.set( elem, value, name )) !== undefined ) {
return ret;
} else {
elem.setAttribute( name, "" + value );
return value;
}
} else if ( hooks && "get" in hooks && notxml && (ret = hooks.get( elem, name )) !== null ) {
return ret;
} else {
ret = elem.getAttribute( name );
// Non-existent attributes return null, we normalize to undefined
return ret === null ?
undefined :
ret;
}
},
removeAttr: function( elem, value ) {
var propName, attrNames, name, l,
i = 0;
if ( value && elem.nodeType === 1 ) {
attrNames = value.toLowerCase().split( rspace );
l = attrNames.length;
for ( ; i < l; i++ ) {
name = attrNames[ i ];
if ( name ) {
propName = jQuery.propFix[ name ] || name;
// See #9699 for explanation of this approach (setting first, then removal)
jQuery.attr( elem, name, "" );
elem.removeAttribute( getSetAttribute ? name : propName );
// Set corresponding property to false for boolean attributes
if ( rboolean.test( name ) && propName in elem ) {
elem[ propName ] = false;
}
}
}
}
},
attrHooks: {
type: {
set: function( elem, value ) {
// We can't allow the type property to be changed (since it causes problems in IE)
if ( rtype.test( elem.nodeName ) && elem.parentNode ) {
jQuery.error( "type property can't be changed" );
} else if ( !jQuery.support.radioValue && value === "radio" && jQuery.nodeName(elem, "input") ) {
// Setting the type on a radio button after the value resets the value in IE6-9
// Reset value to it's default in case type is set after value
// This is for element creation
var val = elem.value;
elem.setAttribute( "type", value );
if ( val ) {
elem.value = val;
}
return value;
}
}
},
// Use the value property for back compat
// Use the nodeHook for button elements in IE6/7 (#1954)
value: {
get: function( elem, name ) {
if ( nodeHook && jQuery.nodeName( elem, "button" ) ) {
return nodeHook.get( elem, name );
}
return name in elem ?
elem.value :
null;
},
set: function( elem, value, name ) {
if ( nodeHook && jQuery.nodeName( elem, "button" ) ) {
return nodeHook.set( elem, value, name );
}
// Does not return so that setAttribute is also used
elem.value = value;
}
}
},
propFix: {
tabindex: "tabIndex",
readonly: "readOnly",
"for": "htmlFor",
"class": "className",
maxlength: "maxLength",
cellspacing: "cellSpacing",
cellpadding: "cellPadding",
rowspan: "rowSpan",
colspan: "colSpan",
usemap: "useMap",
frameborder: "frameBorder",
contenteditable: "contentEditable"
},
prop: function( elem, name, value ) {
var ret, hooks, notxml,
nType = elem.nodeType;
// don't get/set properties on text, comment and attribute nodes
if ( !elem || nType === 3 || nType === 8 || nType === 2 ) {
return;
}
notxml = nType !== 1 || !jQuery.isXMLDoc( elem );
if ( notxml ) {
// Fix name and attach hooks
name = jQuery.propFix[ name ] || name;
hooks = jQuery.propHooks[ name ];
}
if ( value !== undefined ) {
if ( hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) {
return ret;
} else {
return ( elem[ name ] = value );
}
} else {
if ( hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) {
return ret;
} else {
return elem[ name ];
}
}
},
propHooks: {
tabIndex: {
get: function( elem ) {
// elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set
// http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/
var attributeNode = elem.getAttributeNode("tabindex");
return attributeNode && attributeNode.specified ?
parseInt( attributeNode.value, 10 ) :
rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ?
0 :
undefined;
}
}
}
});
// Add the tabIndex propHook to attrHooks for back-compat (different case is intentional)
jQuery.attrHooks.tabindex = jQuery.propHooks.tabIndex;
// Hook for boolean attributes
boolHook = {
get: function( elem, name ) {
// Align boolean attributes with corresponding properties
// Fall back to attribute presence where some booleans are not supported
var attrNode,
property = jQuery.prop( elem, name );
return property === true || typeof property !== "boolean" && ( attrNode = elem.getAttributeNode(name) ) && attrNode.nodeValue !== false ?
name.toLowerCase() :
undefined;
},
set: function( elem, value, name ) {
var propName;
if ( value === false ) {
// Remove boolean attributes when set to false
jQuery.removeAttr( elem, name );
} else {
// value is true since we know at this point it's type boolean and not false
// Set boolean attributes to the same name and set the DOM property
propName = jQuery.propFix[ name ] || name;
if ( propName in elem ) {
// Only set the IDL specifically if it already exists on the element
elem[ propName ] = true;
}
elem.setAttribute( name, name.toLowerCase() );
}
return name;
}
};
// IE6/7 do not support getting/setting some attributes with get/setAttribute
if ( !getSetAttribute ) {
fixSpecified = {
name: true,
id: true
};
// Use this for any attribute in IE6/7
// This fixes almost every IE6/7 issue
nodeHook = jQuery.valHooks.button = {
get: function( elem, name ) {
var ret;
ret = elem.getAttributeNode( name );
return ret && ( fixSpecified[ name ] ? ret.nodeValue !== "" : ret.specified ) ?
ret.nodeValue :
undefined;
},
set: function( elem, value, name ) {
// Set the existing or create a new attribute node
var ret = elem.getAttributeNode( name );
if ( !ret ) {
ret = document.createAttribute( name );
elem.setAttributeNode( ret );
}
return ( ret.nodeValue = value + "" );
}
};
// Apply the nodeHook to tabindex
jQuery.attrHooks.tabindex.set = nodeHook.set;
// Set width and height to auto instead of 0 on empty string( Bug #8150 )
// This is for removals
jQuery.each([ "width", "height" ], function( i, name ) {
jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], {
set: function( elem, value ) {
if ( value === "" ) {
elem.setAttribute( name, "auto" );
return value;
}
}
});
});
// Set contenteditable to false on removals(#10429)
// Setting to empty string throws an error as an invalid value
jQuery.attrHooks.contenteditable = {
get: nodeHook.get,
set: function( elem, value, name ) {
if ( value === "" ) {
value = "false";
}
nodeHook.set( elem, value, name );
}
};
}
// Some attributes require a special call on IE
if ( !jQuery.support.hrefNormalized ) {
jQuery.each([ "href", "src", "width", "height" ], function( i, name ) {
jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], {
get: function( elem ) {
var ret = elem.getAttribute( name, 2 );
return ret === null ? undefined : ret;
}
});
});
}
if ( !jQuery.support.style ) {
jQuery.attrHooks.style = {
get: function( elem ) {
// Return undefined in the case of empty string
// Normalize to lowercase since IE uppercases css property names
return elem.style.cssText.toLowerCase() || undefined;
},
set: function( elem, value ) {
return ( elem.style.cssText = "" + value );
}
};
}
// Safari mis-reports the default selected property of an option
// Accessing the parent's selectedIndex property fixes it
if ( !jQuery.support.optSelected ) {
jQuery.propHooks.selected = jQuery.extend( jQuery.propHooks.selected, {
get: function( elem ) {
var parent = elem.parentNode;
if ( parent ) {
parent.selectedIndex;
// Make sure that it also works with optgroups, see #5701
if ( parent.parentNode ) {
parent.parentNode.selectedIndex;
}
}
return null;
}
});
}
// IE6/7 call enctype encoding
if ( !jQuery.support.enctype ) {
jQuery.propFix.enctype = "encoding";
}
// Radios and checkboxes getter/setter
if ( !jQuery.support.checkOn ) {
jQuery.each([ "radio", "checkbox" ], function() {
jQuery.valHooks[ this ] = {
get: function( elem ) {
// Handle the case where in Webkit "" is returned instead of "on" if a value isn't specified
return elem.getAttribute("value") === null ? "on" : elem.value;
}
};
});
}
jQuery.each([ "radio", "checkbox" ], function() {
jQuery.valHooks[ this ] = jQuery.extend( jQuery.valHooks[ this ], {
set: function( elem, value ) {
if ( jQuery.isArray( value ) ) {
return ( elem.checked = jQuery.inArray( jQuery(elem).val(), value ) >= 0 );
}
}
});
});
var rformElems = /^(?:textarea|input|select)$/i,
rtypenamespace = /^([^\.]*)?(?:\.(.+))?$/,
rhoverHack = /\bhover(\.\S+)?\b/,
rkeyEvent = /^key/,
rmouseEvent = /^(?:mouse|contextmenu)|click/,
rfocusMorph = /^(?:focusinfocus|focusoutblur)$/,
rquickIs = /^(\w*)(?:#([\w\-]+))?(?:\.([\w\-]+))?$/,
quickParse = function( selector ) {
var quick = rquickIs.exec( selector );
if ( quick ) {
// 0 1 2 3
// [ _, tag, id, class ]
quick[1] = ( quick[1] || "" ).toLowerCase();
quick[3] = quick[3] && new RegExp( "(?:^|\\s)" + quick[3] + "(?:\\s|$)" );
}
return quick;
},
quickIs = function( elem, m ) {
var attrs = elem.attributes || {};
return (
(!m[1] || elem.nodeName.toLowerCase() === m[1]) &&
(!m[2] || (attrs.id || {}).value === m[2]) &&
(!m[3] || m[3].test( (attrs[ "class" ] || {}).value ))
);
},
hoverHack = function( events ) {
return jQuery.event.special.hover ? events : events.replace( rhoverHack, "mouseenter$1 mouseleave$1" );
};
/*
* Helper functions for managing events -- not part of the public interface.
* Props to Dean Edwards' addEvent library for many of the ideas.
*/
jQuery.event = {
add: function( elem, types, handler, data, selector ) {
var elemData, eventHandle, events,
t, tns, type, namespaces, handleObj,
handleObjIn, quick, handlers, special;
// Don't attach events to noData or text/comment nodes (allow plain objects tho)
if ( elem.nodeType === 3 || elem.nodeType === 8 || !types || !handler || !(elemData = jQuery._data( elem )) ) {
return;
}
// Caller can pass in an object of custom data in lieu of the handler
if ( handler.handler ) {
handleObjIn = handler;
handler = handleObjIn.handler;
}
// Make sure that the handler has a unique ID, used to find/remove it later
if ( !handler.guid ) {
handler.guid = jQuery.guid++;
}
// Init the element's event structure and main handler, if this is the first
events = elemData.events;
if ( !events ) {
elemData.events = events = {};
}
eventHandle = elemData.handle;
if ( !eventHandle ) {
elemData.handle = eventHandle = function( e ) {
// Discard the second event of a jQuery.event.trigger() and
// when an event is called after a page has unloaded
return typeof jQuery !== "undefined" && (!e || jQuery.event.triggered !== e.type) ?
jQuery.event.dispatch.apply( eventHandle.elem, arguments ) :
undefined;
};
// Add elem as a property of the handle fn to prevent a memory leak with IE non-native events
eventHandle.elem = elem;
}
// Handle multiple events separated by a space
// jQuery(...).bind("mouseover mouseout", fn);
types = jQuery.trim( hoverHack(types) ).split( " " );
for ( t = 0; t < types.length; t++ ) {
tns = rtypenamespace.exec( types[t] ) || [];
type = tns[1];
namespaces = ( tns[2] || "" ).split( "." ).sort();
// If event changes its type, use the special event handlers for the changed type
special = jQuery.event.special[ type ] || {};
// If selector defined, determine special event api type, otherwise given type
type = ( selector ? special.delegateType : special.bindType ) || type;
// Update special based on newly reset type
special = jQuery.event.special[ type ] || {};
// handleObj is passed to all event handlers
handleObj = jQuery.extend({
type: type,
origType: tns[1],
data: data,
handler: handler,
guid: handler.guid,
selector: selector,
quick: quickParse( selector ),
namespace: namespaces.join(".")
}, handleObjIn );
// Init the event handler queue if we're the first
handlers = events[ type ];
if ( !handlers ) {
handlers = events[ type ] = [];
handlers.delegateCount = 0;
// Only use addEventListener/attachEvent if the special events handler returns false
if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) {
// Bind the global event handler to the element
if ( elem.addEventListener ) {
elem.addEventListener( type, eventHandle, false );
} else if ( elem.attachEvent ) {
elem.attachEvent( "on" + type, eventHandle );
}
}
}
if ( special.add ) {
special.add.call( elem, handleObj );
if ( !handleObj.handler.guid ) {
handleObj.handler.guid = handler.guid;
}
}
// Add to the element's handler list, delegates in front
if ( selector ) {
handlers.splice( handlers.delegateCount++, 0, handleObj );
} else {
handlers.push( handleObj );
}
// Keep track of which events have ever been used, for event optimization
jQuery.event.global[ type ] = true;
}
// Nullify elem to prevent memory leaks in IE
elem = null;
},
global: {},
// Detach an event or set of events from an element
remove: function( elem, types, handler, selector, mappedTypes ) {
var elemData = jQuery.hasData( elem ) && jQuery._data( elem ),
t, tns, type, origType, namespaces, origCount,
j, events, special, handle, eventType, handleObj;
if ( !elemData || !(events = elemData.events) ) {
return;
}
// Once for each type.namespace in types; type may be omitted
types = jQuery.trim( hoverHack( types || "" ) ).split(" ");
for ( t = 0; t < types.length; t++ ) {
tns = rtypenamespace.exec( types[t] ) || [];
type = origType = tns[1];
namespaces = tns[2];
// Unbind all events (on this namespace, if provided) for the element
if ( !type ) {
for ( type in events ) {
jQuery.event.remove( elem, type + types[ t ], handler, selector, true );
}
continue;
}
special = jQuery.event.special[ type ] || {};
type = ( selector? special.delegateType : special.bindType ) || type;
eventType = events[ type ] || [];
origCount = eventType.length;
namespaces = namespaces ? new RegExp("(^|\\.)" + namespaces.split(".").sort().join("\\.(?:.*\\.)?") + "(\\.|$)") : null;
// Remove matching events
for ( j = 0; j < eventType.length; j++ ) {
handleObj = eventType[ j ];
if ( ( mappedTypes || origType === handleObj.origType ) &&
( !handler || handler.guid === handleObj.guid ) &&
( !namespaces || namespaces.test( handleObj.namespace ) ) &&
( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) {
eventType.splice( j--, 1 );
if ( handleObj.selector ) {
eventType.delegateCount--;
}
if ( special.remove ) {
special.remove.call( elem, handleObj );
}
}
}
// Remove generic event handler if we removed something and no more handlers exist
// (avoids potential for endless recursion during removal of special event handlers)
if ( eventType.length === 0 && origCount !== eventType.length ) {
if ( !special.teardown || special.teardown.call( elem, namespaces ) === false ) {
jQuery.removeEvent( elem, type, elemData.handle );
}
delete events[ type ];
}
}
// Remove the expando if it's no longer used
if ( jQuery.isEmptyObject( events ) ) {
handle = elemData.handle;
if ( handle ) {
handle.elem = null;
}
// removeData also checks for emptiness and clears the expando if empty
// so use it instead of delete
jQuery.removeData( elem, [ "events", "handle" ], true );
}
},
// Events that are safe to short-circuit if no handlers are attached.
// Native DOM events should not be added, they may have inline handlers.
customEvent: {
"getData": true,
"setData": true,
"changeData": true
},
trigger: function( event, data, elem, onlyHandlers ) {
// Don't do events on text and comment nodes
if ( elem && (elem.nodeType === 3 || elem.nodeType === 8) ) {
return;
}
// Event object or event type
var type = event.type || event,
namespaces = [],
cache, exclusive, i, cur, old, ontype, special, handle, eventPath, bubbleType;
// focus/blur morphs to focusin/out; ensure we're not firing them right now
if ( rfocusMorph.test( type + jQuery.event.triggered ) ) {
return;
}
if ( type.indexOf( "!" ) >= 0 ) {
// Exclusive events trigger only for the exact event (no namespaces)
type = type.slice(0, -1);
exclusive = true;
}
if ( type.indexOf( "." ) >= 0 ) {
// Namespaced trigger; create a regexp to match event type in handle()
namespaces = type.split(".");
type = namespaces.shift();
namespaces.sort();
}
if ( (!elem || jQuery.event.customEvent[ type ]) && !jQuery.event.global[ type ] ) {
// No jQuery handlers for this event type, and it can't have inline handlers
return;
}
// Caller can pass in an Event, Object, or just an event type string
event = typeof event === "object" ?
// jQuery.Event object
event[ jQuery.expando ] ? event :
// Object literal
new jQuery.Event( type, event ) :
// Just the event type (string)
new jQuery.Event( type );
event.type = type;
event.isTrigger = true;
event.exclusive = exclusive;
event.namespace = namespaces.join( "." );
event.namespace_re = event.namespace? new RegExp("(^|\\.)" + namespaces.join("\\.(?:.*\\.)?") + "(\\.|$)") : null;
ontype = type.indexOf( ":" ) < 0 ? "on" + type : "";
// Handle a global trigger
if ( !elem ) {
// TODO: Stop taunting the data cache; remove global events and always attach to document
cache = jQuery.cache;
for ( i in cache ) {
if ( cache[ i ].events && cache[ i ].events[ type ] ) {
jQuery.event.trigger( event, data, cache[ i ].handle.elem, true );
}
}
return;
}
// Clean up the event in case it is being reused
event.result = undefined;
if ( !event.target ) {
event.target = elem;
}
// Clone any incoming data and prepend the event, creating the handler arg list
data = data != null ? jQuery.makeArray( data ) : [];
data.unshift( event );
// Allow special events to draw outside the lines
special = jQuery.event.special[ type ] || {};
if ( special.trigger && special.trigger.apply( elem, data ) === false ) {
return;
}
// Determine event propagation path in advance, per W3C events spec (#9951)
// Bubble up to document, then to window; watch for a global ownerDocument var (#9724)
eventPath = [[ elem, special.bindType || type ]];
if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) {
bubbleType = special.delegateType || type;
cur = rfocusMorph.test( bubbleType + type ) ? elem : elem.parentNode;
old = null;
for ( ; cur; cur = cur.parentNode ) {
eventPath.push([ cur, bubbleType ]);
old = cur;
}
// Only add window if we got to document (e.g., not plain obj or detached DOM)
if ( old && old === elem.ownerDocument ) {
eventPath.push([ old.defaultView || old.parentWindow || window, bubbleType ]);
}
}
// Fire handlers on the event path
for ( i = 0; i < eventPath.length && !event.isPropagationStopped(); i++ ) {
cur = eventPath[i][0];
event.type = eventPath[i][1];
handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" );
if ( handle ) {
handle.apply( cur, data );
}
// Note that this is a bare JS function and not a jQuery handler
handle = ontype && cur[ ontype ];
if ( handle && jQuery.acceptData( cur ) && handle.apply( cur, data ) === false ) {
event.preventDefault();
}
}
event.type = type;
// If nobody prevented the default action, do it now
if ( !onlyHandlers && !event.isDefaultPrevented() ) {
if ( (!special._default || special._default.apply( elem.ownerDocument, data ) === false) &&
!(type === "click" && jQuery.nodeName( elem, "a" )) && jQuery.acceptData( elem ) ) {
// Call a native DOM method on the target with the same name name as the event.
// Can't use an .isFunction() check here because IE6/7 fails that test.
// Don't do default actions on window, that's where global variables be (#6170)
// IE<9 dies on focus/blur to hidden element (#1486)
if ( ontype && elem[ type ] && ((type !== "focus" && type !== "blur") || event.target.offsetWidth !== 0) && !jQuery.isWindow( elem ) ) {
// Don't re-trigger an onFOO event when we call its FOO() method
old = elem[ ontype ];
if ( old ) {
elem[ ontype ] = null;
}
// Prevent re-triggering of the same event, since we already bubbled it above
jQuery.event.triggered = type;
elem[ type ]();
jQuery.event.triggered = undefined;
if ( old ) {
elem[ ontype ] = old;
}
}
}
}
return event.result;
},
dispatch: function( event ) {
// Make a writable jQuery.Event from the native event object
event = jQuery.event.fix( event || window.event );
var handlers = ( (jQuery._data( this, "events" ) || {} )[ event.type ] || []),
delegateCount = handlers.delegateCount,
args = [].slice.call( arguments, 0 ),
run_all = !event.exclusive && !event.namespace,
handlerQueue = [],
i, j, cur, jqcur, ret, selMatch, matched, matches, handleObj, sel, related;
// Use the fix-ed jQuery.Event rather than the (read-only) native event
args[0] = event;
event.delegateTarget = this;
// Determine handlers that should run if there are delegated events
// Avoid disabled elements in IE (#6911) and non-left-click bubbling in Firefox (#3861)
if ( delegateCount && !event.target.disabled && !(event.button && event.type === "click") ) {
// Pregenerate a single jQuery object for reuse with .is()
jqcur = jQuery(this);
jqcur.context = this.ownerDocument || this;
for ( cur = event.target; cur != this; cur = cur.parentNode || this ) {
selMatch = {};
matches = [];
jqcur[0] = cur;
for ( i = 0; i < delegateCount; i++ ) {
handleObj = handlers[ i ];
sel = handleObj.selector;
if ( selMatch[ sel ] === undefined ) {
selMatch[ sel ] = (
handleObj.quick ? quickIs( cur, handleObj.quick ) : jqcur.is( sel )
);
}
if ( selMatch[ sel ] ) {
matches.push( handleObj );
}
}
if ( matches.length ) {
handlerQueue.push({ elem: cur, matches: matches });
}
}
}
// Add the remaining (directly-bound) handlers
if ( handlers.length > delegateCount ) {
handlerQueue.push({ elem: this, matches: handlers.slice( delegateCount ) });
}
// Run delegates first; they may want to stop propagation beneath us
for ( i = 0; i < handlerQueue.length && !event.isPropagationStopped(); i++ ) {
matched = handlerQueue[ i ];
event.currentTarget = matched.elem;
for ( j = 0; j < matched.matches.length && !event.isImmediatePropagationStopped(); j++ ) {
handleObj = matched.matches[ j ];
// Triggered event must either 1) be non-exclusive and have no namespace, or
// 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace).
if ( run_all || (!event.namespace && !handleObj.namespace) || event.namespace_re && event.namespace_re.test( handleObj.namespace ) ) {
event.data = handleObj.data;
event.handleObj = handleObj;
ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler )
.apply( matched.elem, args );
if ( ret !== undefined ) {
event.result = ret;
if ( ret === false ) {
event.preventDefault();
event.stopPropagation();
}
}
}
}
}
return event.result;
},
// Includes some event props shared by KeyEvent and MouseEvent
// *** attrChange attrName relatedNode srcElement are not normalized, non-W3C, deprecated, will be removed in 1.8 ***
props: "attrChange attrName relatedNode srcElement altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),
fixHooks: {},
keyHooks: {
props: "char charCode key keyCode".split(" "),
filter: function( event, original ) {
// Add which for key events
if ( event.which == null ) {
event.which = original.charCode != null ? original.charCode : original.keyCode;
}
return event;
}
},
mouseHooks: {
props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),
filter: function( event, original ) {
var eventDoc, doc, body,
button = original.button,
fromElement = original.fromElement;
// Calculate pageX/Y if missing and clientX/Y available
if ( event.pageX == null && original.clientX != null ) {
eventDoc = event.target.ownerDocument || document;
doc = eventDoc.documentElement;
body = eventDoc.body;
event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 );
event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 );
}
// Add relatedTarget, if necessary
if ( !event.relatedTarget && fromElement ) {
event.relatedTarget = fromElement === event.target ? original.toElement : fromElement;
}
// Add which for click: 1 === left; 2 === middle; 3 === right
// Note: button is not normalized, so don't use it
if ( !event.which && button !== undefined ) {
event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) );
}
return event;
}
},
fix: function( event ) {
if ( event[ jQuery.expando ] ) {
return event;
}
// Create a writable copy of the event object and normalize some properties
var i, prop,
originalEvent = event,
fixHook = jQuery.event.fixHooks[ event.type ] || {},
copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props;
event = jQuery.Event( originalEvent );
for ( i = copy.length; i; ) {
prop = copy[ --i ];
event[ prop ] = originalEvent[ prop ];
}
// Fix target property, if necessary (#1925, IE 6/7/8 & Safari2)
if ( !event.target ) {
event.target = originalEvent.srcElement || document;
}
// Target should not be a text node (#504, Safari)
if ( event.target.nodeType === 3 ) {
event.target = event.target.parentNode;
}
// For mouse/key events; add metaKey if it's not there (#3368, IE6/7/8)
if ( event.metaKey === undefined ) {
event.metaKey = event.ctrlKey;
}
return fixHook.filter? fixHook.filter( event, originalEvent ) : event;
},
special: {
ready: {
// Make sure the ready event is setup
setup: jQuery.bindReady
},
load: {
// Prevent triggered image.load events from bubbling to window.load
noBubble: true
},
focus: {
delegateType: "focusin"
},
blur: {
delegateType: "focusout"
},
beforeunload: {
setup: function( data, namespaces, eventHandle ) {
// We only want to do this special case on windows
if ( jQuery.isWindow( this ) ) {
this.onbeforeunload = eventHandle;
}
},
teardown: function( namespaces, eventHandle ) {
if ( this.onbeforeunload === eventHandle ) {
this.onbeforeunload = null;
}
}
}
},
simulate: function( type, elem, event, bubble ) {
// Piggyback on a donor event to simulate a different one.
// Fake originalEvent to avoid donor's stopPropagation, but if the
// simulated event prevents default then we do the same on the donor.
var e = jQuery.extend(
new jQuery.Event(),
event,
{ type: type,
isSimulated: true,
originalEvent: {}
}
);
if ( bubble ) {
jQuery.event.trigger( e, null, elem );
} else {
jQuery.event.dispatch.call( elem, e );
}
if ( e.isDefaultPrevented() ) {
event.preventDefault();
}
}
};
// Some plugins are using, but it's undocumented/deprecated and will be removed.
// The 1.7 special event interface should provide all the hooks needed now.
jQuery.event.handle = jQuery.event.dispatch;
jQuery.removeEvent = document.removeEventListener ?
function( elem, type, handle ) {
if ( elem.removeEventListener ) {
elem.removeEventListener( type, handle, false );
}
} :
function( elem, type, handle ) {
if ( elem.detachEvent ) {
elem.detachEvent( "on" + type, handle );
}
};
jQuery.Event = function( src, props ) {
// Allow instantiation without the 'new' keyword
if ( !(this instanceof jQuery.Event) ) {
return new jQuery.Event( src, props );
}
// Event object
if ( src && src.type ) {
this.originalEvent = src;
this.type = src.type;
// Events bubbling up the document may have been marked as prevented
// by a handler lower down the tree; reflect the correct value.
this.isDefaultPrevented = ( src.defaultPrevented || src.returnValue === false ||
src.getPreventDefault && src.getPreventDefault() ) ? returnTrue : returnFalse;
// Event type
} else {
this.type = src;
}
// Put explicitly provided properties onto the event object
if ( props ) {
jQuery.extend( this, props );
}
// Create a timestamp if incoming event doesn't have one
this.timeStamp = src && src.timeStamp || jQuery.now();
// Mark it as fixed
this[ jQuery.expando ] = true;
};
function returnFalse() {
return false;
}
function returnTrue() {
return true;
}
// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding
// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html
jQuery.Event.prototype = {
preventDefault: function() {
this.isDefaultPrevented = returnTrue;
var e = this.originalEvent;
if ( !e ) {
return;
}
// if preventDefault exists run it on the original event
if ( e.preventDefault ) {
e.preventDefault();
// otherwise set the returnValue property of the original event to false (IE)
} else {
e.returnValue = false;
}
},
stopPropagation: function() {
this.isPropagationStopped = returnTrue;
var e = this.originalEvent;
if ( !e ) {
return;
}
// if stopPropagation exists run it on the original event
if ( e.stopPropagation ) {
e.stopPropagation();
}
// otherwise set the cancelBubble property of the original event to true (IE)
e.cancelBubble = true;
},
stopImmediatePropagation: function() {
this.isImmediatePropagationStopped = returnTrue;
this.stopPropagation();
},
isDefaultPrevented: returnFalse,
isPropagationStopped: returnFalse,
isImmediatePropagationStopped: returnFalse
};
// Create mouseenter/leave events using mouseover/out and event-time checks
jQuery.each({
mouseenter: "mouseover",
mouseleave: "mouseout"
}, function( orig, fix ) {
jQuery.event.special[ orig ] = {
delegateType: fix,
bindType: fix,
handle: function( event ) {
var target = this,
related = event.relatedTarget,
handleObj = event.handleObj,
selector = handleObj.selector,
ret;
// For mousenter/leave call the handler if related is outside the target.
// NB: No relatedTarget if the mouse left/entered the browser window
if ( !related || (related !== target && !jQuery.contains( target, related )) ) {
event.type = handleObj.origType;
ret = handleObj.handler.apply( this, arguments );
event.type = fix;
}
return ret;
}
};
});
// IE submit delegation
if ( !jQuery.support.submitBubbles ) {
jQuery.event.special.submit = {
setup: function() {
// Only need this for delegated form submit events
if ( jQuery.nodeName( this, "form" ) ) {
return false;
}
// Lazy-add a submit handler when a descendant form may potentially be submitted
jQuery.event.add( this, "click._submit keypress._submit", function( e ) {
// Node name check avoids a VML-related crash in IE (#9807)
var elem = e.target,
form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined;
if ( form && !form._submit_attached ) {
jQuery.event.add( form, "submit._submit", function( event ) {
// If form was submitted by the user, bubble the event up the tree
if ( this.parentNode && !event.isTrigger ) {
jQuery.event.simulate( "submit", this.parentNode, event, true );
}
});
form._submit_attached = true;
}
});
// return undefined since we don't need an event listener
},
teardown: function() {
// Only need this for delegated form submit events
if ( jQuery.nodeName( this, "form" ) ) {
return false;
}
// Remove delegated handlers; cleanData eventually reaps submit handlers attached above
jQuery.event.remove( this, "._submit" );
}
};
}
// IE change delegation and checkbox/radio fix
if ( !jQuery.support.changeBubbles ) {
jQuery.event.special.change = {
setup: function() {
if ( rformElems.test( this.nodeName ) ) {
// IE doesn't fire change on a check/radio until blur; trigger it on click
// after a propertychange. Eat the blur-change in special.change.handle.
// This still fires onchange a second time for check/radio after blur.
if ( this.type === "checkbox" || this.type === "radio" ) {
jQuery.event.add( this, "propertychange._change", function( event ) {
if ( event.originalEvent.propertyName === "checked" ) {
this._just_changed = true;
}
});
jQuery.event.add( this, "click._change", function( event ) {
if ( this._just_changed && !event.isTrigger ) {
this._just_changed = false;
jQuery.event.simulate( "change", this, event, true );
}
});
}
return false;
}
// Delegated event; lazy-add a change handler on descendant inputs
jQuery.event.add( this, "beforeactivate._change", function( e ) {
var elem = e.target;
if ( rformElems.test( elem.nodeName ) && !elem._change_attached ) {
jQuery.event.add( elem, "change._change", function( event ) {
if ( this.parentNode && !event.isSimulated && !event.isTrigger ) {
jQuery.event.simulate( "change", this.parentNode, event, true );
}
});
elem._change_attached = true;
}
});
},
handle: function( event ) {
var elem = event.target;
// Swallow native change events from checkbox/radio, we already triggered them above
if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) {
return event.handleObj.handler.apply( this, arguments );
}
},
teardown: function() {
jQuery.event.remove( this, "._change" );
return rformElems.test( this.nodeName );
}
};
}
// Create "bubbling" focus and blur events
if ( !jQuery.support.focusinBubbles ) {
jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) {
// Attach a single capturing handler while someone wants focusin/focusout
var attaches = 0,
handler = function( event ) {
jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true );
};
jQuery.event.special[ fix ] = {
setup: function() {
if ( attaches++ === 0 ) {
document.addEventListener( orig, handler, true );
}
},
teardown: function() {
if ( --attaches === 0 ) {
document.removeEventListener( orig, handler, true );
}
}
};
});
}
jQuery.fn.extend({
on: function( types, selector, data, fn, /*INTERNAL*/ one ) {
var origFn, type;
// Types can be a map of types/handlers
if ( typeof types === "object" ) {
// ( types-Object, selector, data )
if ( typeof selector !== "string" ) {
// ( types-Object, data )
data = selector;
selector = undefined;
}
for ( type in types ) {
this.on( type, selector, data, types[ type ], one );
}
return this;
}
if ( data == null && fn == null ) {
// ( types, fn )
fn = selector;
data = selector = undefined;
} else if ( fn == null ) {
if ( typeof selector === "string" ) {
// ( types, selector, fn )
fn = data;
data = undefined;
} else {
// ( types, data, fn )
fn = data;
data = selector;
selector = undefined;
}
}
if ( fn === false ) {
fn = returnFalse;
} else if ( !fn ) {
return this;
}
if ( one === 1 ) {
origFn = fn;
fn = function( event ) {
// Can use an empty set, since event contains the info
jQuery().off( event );
return origFn.apply( this, arguments );
};
// Use same guid so caller can remove using origFn
fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ );
}
return this.each( function() {
jQuery.event.add( this, types, fn, data, selector );
});
},
one: function( types, selector, data, fn ) {
return this.on.call( this, types, selector, data, fn, 1 );
},
off: function( types, selector, fn ) {
if ( types && types.preventDefault && types.handleObj ) {
// ( event ) dispatched jQuery.Event
var handleObj = types.handleObj;
jQuery( types.delegateTarget ).off(
handleObj.namespace? handleObj.type + "." + handleObj.namespace : handleObj.type,
handleObj.selector,
handleObj.handler
);
return this;
}
if ( typeof types === "object" ) {
// ( types-object [, selector] )
for ( var type in types ) {
this.off( type, selector, types[ type ] );
}
return this;
}
if ( selector === false || typeof selector === "function" ) {
// ( types [, fn] )
fn = selector;
selector = undefined;
}
if ( fn === false ) {
fn = returnFalse;
}
return this.each(function() {
jQuery.event.remove( this, types, fn, selector );
});
},
bind: function( types, data, fn ) {
return this.on( types, null, data, fn );
},
unbind: function( types, fn ) {
return this.off( types, null, fn );
},
live: function( types, data, fn ) {
jQuery( this.context ).on( types, this.selector, data, fn );
return this;
},
die: function( types, fn ) {
jQuery( this.context ).off( types, this.selector || "**", fn );
return this;
},
delegate: function( selector, types, data, fn ) {
return this.on( types, selector, data, fn );
},
undelegate: function( selector, types, fn ) {
// ( namespace ) or ( selector, types [, fn] )
return arguments.length == 1? this.off( selector, "**" ) : this.off( types, selector, fn );
},
trigger: function( type, data ) {
return this.each(function() {
jQuery.event.trigger( type, data, this );
});
},
triggerHandler: function( type, data ) {
if ( this[0] ) {
return jQuery.event.trigger( type, data, this[0], true );
}
},
toggle: function( fn ) {
// Save reference to arguments for access in closure
var args = arguments,
guid = fn.guid || jQuery.guid++,
i = 0,
toggler = function( event ) {
// Figure out which function to execute
var lastToggle = ( jQuery._data( this, "lastToggle" + fn.guid ) || 0 ) % i;
jQuery._data( this, "lastToggle" + fn.guid, lastToggle + 1 );
// Make sure that clicks stop
event.preventDefault();
// and execute the function
return args[ lastToggle ].apply( this, arguments ) || false;
};
// link all the functions, so any of them can unbind this click handler
toggler.guid = guid;
while ( i < args.length ) {
args[ i++ ].guid = guid;
}
return this.click( toggler );
},
hover: function( fnOver, fnOut ) {
return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver );
}
});
jQuery.each( ("blur focus focusin focusout load resize scroll unload click dblclick " +
"mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " +
"change select submit keydown keypress keyup error contextmenu").split(" "), function( i, name ) {
// Handle event binding
jQuery.fn[ name ] = function( data, fn ) {
if ( fn == null ) {
fn = data;
data = null;
}
return arguments.length > 0 ?
this.on( name, null, data, fn ) :
this.trigger( name );
};
if ( jQuery.attrFn ) {
jQuery.attrFn[ name ] = true;
}
if ( rkeyEvent.test( name ) ) {
jQuery.event.fixHooks[ name ] = jQuery.event.keyHooks;
}
if ( rmouseEvent.test( name ) ) {
jQuery.event.fixHooks[ name ] = jQuery.event.mouseHooks;
}
});
/*!
* Sizzle CSS Selector Engine
* Copyright 2016, The Dojo Foundation
* Released under the MIT, BSD, and GPL Licenses.
* More information: http://sizzlejs.com/
*/
(function(){
var chunker = /((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,
expando = "sizcache" + (Math.random() + '').replace('.', ''),
done = 0,
toString = Object.prototype.toString,
hasDuplicate = false,
baseHasDuplicate = true,
rBackslash = /\\/g,
rReturn = /\r\n/g,
rNonWord = /\W/;
// Here we check if the JavaScript engine is using some sort of
// optimization where it does not always call our comparison
// function. If that is the case, discard the hasDuplicate value.
// Thus far that includes Google Chrome.
[0, 0].sort(function() {
baseHasDuplicate = false;
return 0;
});
var Sizzle = function( selector, context, results, seed ) {
results = results || [];
context = context || document;
var origContext = context;
if ( context.nodeType !== 1 && context.nodeType !== 9 ) {
return [];
}
if ( !selector || typeof selector !== "string" ) {
return results;
}
var m, set, checkSet, extra, ret, cur, pop, i,
prune = true,
contextXML = Sizzle.isXML( context ),
parts = [],
soFar = selector;
// Reset the position of the chunker regexp (start from head)
do {
chunker.exec( "" );
m = chunker.exec( soFar );
if ( m ) {
soFar = m[3];
parts.push( m[1] );
if ( m[2] ) {
extra = m[3];
break;
}
}
} while ( m );
if ( parts.length > 1 && origPOS.exec( selector ) ) {
if ( parts.length === 2 && Expr.relative[ parts[0] ] ) {
set = posProcess( parts[0] + parts[1], context, seed );
} else {
set = Expr.relative[ parts[0] ] ?
[ context ] :
Sizzle( parts.shift(), context );
while ( parts.length ) {
selector = parts.shift();
if ( Expr.relative[ selector ] ) {
selector += parts.shift();
}
set = posProcess( selector, set, seed );
}
}
} else {
// Take a shortcut and set the context if the root selector is an ID
// (but not if it'll be faster if the inner selector is an ID)
if ( !seed && parts.length > 1 && context.nodeType === 9 && !contextXML &&
Expr.match.ID.test(parts[0]) && !Expr.match.ID.test(parts[parts.length - 1]) ) {
ret = Sizzle.find( parts.shift(), context, contextXML );
context = ret.expr ?
Sizzle.filter( ret.expr, ret.set )[0] :
ret.set[0];
}
if ( context ) {
ret = seed ?
{ expr: parts.pop(), set: makeArray(seed) } :
Sizzle.find( parts.pop(), parts.length === 1 && (parts[0] === "~" || parts[0] === "+") && context.parentNode ? context.parentNode : context, contextXML );
set = ret.expr ?
Sizzle.filter( ret.expr, ret.set ) :
ret.set;
if ( parts.length > 0 ) {
checkSet = makeArray( set );
} else {
prune = false;
}
while ( parts.length ) {
cur = parts.pop();
pop = cur;
if ( !Expr.relative[ cur ] ) {
cur = "";
} else {
pop = parts.pop();
}
if ( pop == null ) {
pop = context;
}
Expr.relative[ cur ]( checkSet, pop, contextXML );
}
} else {
checkSet = parts = [];
}
}
if ( !checkSet ) {
checkSet = set;
}
if ( !checkSet ) {
Sizzle.error( cur || selector );
}
if ( toString.call(checkSet) === "[object Array]" ) {
if ( !prune ) {
results.push.apply( results, checkSet );
} else if ( context && context.nodeType === 1 ) {
for ( i = 0; checkSet[i] != null; i++ ) {
if ( checkSet[i] && (checkSet[i] === true || checkSet[i].nodeType === 1 && Sizzle.contains(context, checkSet[i])) ) {
results.push( set[i] );
}
}
} else {
for ( i = 0; checkSet[i] != null; i++ ) {
if ( checkSet[i] && checkSet[i].nodeType === 1 ) {
results.push( set[i] );
}
}
}
} else {
makeArray( checkSet, results );
}
if ( extra ) {
Sizzle( extra, origContext, results, seed );
Sizzle.uniqueSort( results );
}
return results;
};
Sizzle.uniqueSort = function( results ) {
if ( sortOrder ) {
hasDuplicate = baseHasDuplicate;
results.sort( sortOrder );
if ( hasDuplicate ) {
for ( var i = 1; i < results.length; i++ ) {
if ( results[i] === results[ i - 1 ] ) {
results.splice( i--, 1 );
}
}
}
}
return results;
};
Sizzle.matches = function( expr, set ) {
return Sizzle( expr, null, null, set );
};
Sizzle.matchesSelector = function( node, expr ) {
return Sizzle( expr, null, null, [node] ).length > 0;
};
Sizzle.find = function( expr, context, isXML ) {
var set, i, len, match, type, left;
if ( !expr ) {
return [];
}
for ( i = 0, len = Expr.order.length; i < len; i++ ) {
type = Expr.order[i];
if ( (match = Expr.leftMatch[ type ].exec( expr )) ) {
left = match[1];
match.splice( 1, 1 );
if ( left.substr( left.length - 1 ) !== "\\" ) {
match[1] = (match[1] || "").replace( rBackslash, "" );
set = Expr.find[ type ]( match, context, isXML );
if ( set != null ) {
expr = expr.replace( Expr.match[ type ], "" );
break;
}
}
}
}
if ( !set ) {
set = typeof context.getElementsByTagName !== "undefined" ?
context.getElementsByTagName( "*" ) :
[];
}
return { set: set, expr: expr };
};
Sizzle.filter = function( expr, set, inplace, not ) {
var match, anyFound,
type, found, item, filter, left,
i, pass,
old = expr,
result = [],
curLoop = set,
isXMLFilter = set && set[0] && Sizzle.isXML( set[0] );
while ( expr && set.length ) {
for ( type in Expr.filter ) {
if ( (match = Expr.leftMatch[ type ].exec( expr )) != null && match[2] ) {
filter = Expr.filter[ type ];
left = match[1];
anyFound = false;
match.splice(1,1);
if ( left.substr( left.length - 1 ) === "\\" ) {
continue;
}
if ( curLoop === result ) {
result = [];
}
if ( Expr.preFilter[ type ] ) {
match = Expr.preFilter[ type ]( match, curLoop, inplace, result, not, isXMLFilter );
if ( !match ) {
anyFound = found = true;
} else if ( match === true ) {
continue;
}
}
if ( match ) {
for ( i = 0; (item = curLoop[i]) != null; i++ ) {
if ( item ) {
found = filter( item, match, i, curLoop );
pass = not ^ found;
if ( inplace && found != null ) {
if ( pass ) {
anyFound = true;
} else {
curLoop[i] = false;
}
} else if ( pass ) {
result.push( item );
anyFound = true;
}
}
}
}
if ( found !== undefined ) {
if ( !inplace ) {
curLoop = result;
}
expr = expr.replace( Expr.match[ type ], "" );
if ( !anyFound ) {
return [];
}
break;
}
}
}
// Improper expression
if ( expr === old ) {
if ( anyFound == null ) {
Sizzle.error( expr );
} else {
break;
}
}
old = expr;
}
return curLoop;
};
Sizzle.error = function( msg ) {
throw new Error( "Syntax error, unrecognized expression: " + msg );
};
/**
* Utility function for retrieving the text value of an array of DOM nodes
* @param {Array|Element} elem
*/
var getText = Sizzle.getText = function( elem ) {
var i, node,
nodeType = elem.nodeType,
ret = "";
if ( nodeType ) {
if ( nodeType === 1 || nodeType === 9 ) {
// Use textContent || innerText for elements
if ( typeof elem.textContent === 'string' ) {
return elem.textContent;
} else if ( typeof elem.innerText === 'string' ) {
// Replace IE's carriage returns
return elem.innerText.replace( rReturn, '' );
} else {
// Traverse it's children
for ( elem = elem.firstChild; elem; elem = elem.nextSibling) {
ret += getText( elem );
}
}
} else if ( nodeType === 3 || nodeType === 4 ) {
return elem.nodeValue;
}
} else {
// If no nodeType, this is expected to be an array
for ( i = 0; (node = elem[i]); i++ ) {
// Do not traverse comment nodes
if ( node.nodeType !== 8 ) {
ret += getText( node );
}
}
}
return ret;
};
var Expr = Sizzle.selectors = {
order: [ "ID", "NAME", "TAG" ],
match: {
ID: /#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,
CLASS: /\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,
NAME: /\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,
ATTR: /\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(?:(['"])(.*?)\3|(#?(?:[\w\u00c0-\uFFFF\-]|\\.)*)|)|)\s*\]/,
TAG: /^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,
CHILD: /:(only|nth|last|first)-child(?:\(\s*(even|odd|(?:[+\-]?\d+|(?:[+\-]?\d*)?n\s*(?:[+\-]\s*\d+)?))\s*\))?/,
POS: /:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,
PSEUDO: /:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/
},
leftMatch: {},
attrMap: {
"class": "className",
"for": "htmlFor"
},
attrHandle: {
href: function( elem ) {
return elem.getAttribute( "href" );
},
type: function( elem ) {
return elem.getAttribute( "type" );
}
},
relative: {
"+": function(checkSet, part){
var isPartStr = typeof part === "string",
isTag = isPartStr && !rNonWord.test( part ),
isPartStrNotTag = isPartStr && !isTag;
if ( isTag ) {
part = part.toLowerCase();
}
for ( var i = 0, l = checkSet.length, elem; i < l; i++ ) {
if ( (elem = checkSet[i]) ) {
while ( (elem = elem.previousSibling) && elem.nodeType !== 1 ) {}
checkSet[i] = isPartStrNotTag || elem && elem.nodeName.toLowerCase() === part ?
elem || false :
elem === part;
}
}
if ( isPartStrNotTag ) {
Sizzle.filter( part, checkSet, true );
}
},
">": function( checkSet, part ) {
var elem,
isPartStr = typeof part === "string",
i = 0,
l = checkSet.length;
if ( isPartStr && !rNonWord.test( part ) ) {
part = part.toLowerCase();
for ( ; i < l; i++ ) {
elem = checkSet[i];
if ( elem ) {
var parent = elem.parentNode;
checkSet[i] = parent.nodeName.toLowerCase() === part ? parent : false;
}
}
} else {
for ( ; i < l; i++ ) {
elem = checkSet[i];
if ( elem ) {
checkSet[i] = isPartStr ?
elem.parentNode :
elem.parentNode === part;
}
}
if ( isPartStr ) {
Sizzle.filter( part, checkSet, true );
}
}
},
"": function(checkSet, part, isXML){
var nodeCheck,
doneName = done++,
checkFn = dirCheck;
if ( typeof part === "string" && !rNonWord.test( part ) ) {
part = part.toLowerCase();
nodeCheck = part;
checkFn = dirNodeCheck;
}
checkFn( "parentNode", part, doneName, checkSet, nodeCheck, isXML );
},
"~": function( checkSet, part, isXML ) {
var nodeCheck,
doneName = done++,
checkFn = dirCheck;
if ( typeof part === "string" && !rNonWord.test( part ) ) {
part = part.toLowerCase();
nodeCheck = part;
checkFn = dirNodeCheck;
}
checkFn( "previousSibling", part, doneName, checkSet, nodeCheck, isXML );
}
},
find: {
ID: function( match, context, isXML ) {
if ( typeof context.getElementById !== "undefined" && !isXML ) {
var m = context.getElementById(match[1]);
// Check parentNode to catch when Blackberry 4.6 returns
// nodes that are no longer in the document #6963
return m && m.parentNode ? [m] : [];
}
},
NAME: function( match, context ) {
if ( typeof context.getElementsByName !== "undefined" ) {
var ret = [],
results = context.getElementsByName( match[1] );
for ( var i = 0, l = results.length; i < l; i++ ) {
if ( results[i].getAttribute("name") === match[1] ) {
ret.push( results[i] );
}
}
return ret.length === 0 ? null : ret;
}
},
TAG: function( match, context ) {
if ( typeof context.getElementsByTagName !== "undefined" ) {
return context.getElementsByTagName( match[1] );
}
}
},
preFilter: {
CLASS: function( match, curLoop, inplace, result, not, isXML ) {
match = " " + match[1].replace( rBackslash, "" ) + " ";
if ( isXML ) {
return match;
}
for ( var i = 0, elem; (elem = curLoop[i]) != null; i++ ) {
if ( elem ) {
if ( not ^ (elem.className && (" " + elem.className + " ").replace(/[\t\n\r]/g, " ").indexOf(match) >= 0) ) {
if ( !inplace ) {
result.push( elem );
}
} else if ( inplace ) {
curLoop[i] = false;
}
}
}
return false;
},
ID: function( match ) {
return match[1].replace( rBackslash, "" );
},
TAG: function( match, curLoop ) {
return match[1].replace( rBackslash, "" ).toLowerCase();
},
CHILD: function( match ) {
if ( match[1] === "nth" ) {
if ( !match[2] ) {
Sizzle.error( match[0] );
}
match[2] = match[2].replace(/^\+|\s*/g, '');
// parse equations like 'even', 'odd', '5', '2n', '3n+2', '4n-1', '-n+6'
var test = /(-?)(\d*)(?:n([+\-]?\d*))?/.exec(
match[2] === "even" && "2n" || match[2] === "odd" && "2n+1" ||
!/\D/.test( match[2] ) && "0n+" + match[2] || match[2]);
// calculate the numbers (first)n+(last) including if they are negative
match[2] = (test[1] + (test[2] || 1)) - 0;
match[3] = test[3] - 0;
}
else if ( match[2] ) {
Sizzle.error( match[0] );
}
// TODO: Move to normal caching system
match[0] = done++;
return match;
},
ATTR: function( match, curLoop, inplace, result, not, isXML ) {
var name = match[1] = match[1].replace( rBackslash, "" );
if ( !isXML && Expr.attrMap[name] ) {
match[1] = Expr.attrMap[name];
}
// Handle if an un-quoted value was used
match[4] = ( match[4] || match[5] || "" ).replace( rBackslash, "" );
if ( match[2] === "~=" ) {
match[4] = " " + match[4] + " ";
}
return match;
},
PSEUDO: function( match, curLoop, inplace, result, not ) {
if ( match[1] === "not" ) {
// If we're dealing with a complex expression, or a simple one
if ( ( chunker.exec(match[3]) || "" ).length > 1 || /^\w/.test(match[3]) ) {
match[3] = Sizzle(match[3], null, null, curLoop);
} else {
var ret = Sizzle.filter(match[3], curLoop, inplace, true ^ not);
if ( !inplace ) {
result.push.apply( result, ret );
}
return false;
}
} else if ( Expr.match.POS.test( match[0] ) || Expr.match.CHILD.test( match[0] ) ) {
return true;
}
return match;
},
POS: function( match ) {
match.unshift( true );
return match;
}
},
filters: {
enabled: function( elem ) {
return elem.disabled === false && elem.type !== "hidden";
},
disabled: function( elem ) {
return elem.disabled === true;
},
checked: function( elem ) {
return elem.checked === true;
},
selected: function( elem ) {
// Accessing this property makes selected-by-default
// options in Safari work properly
if ( elem.parentNode ) {
elem.parentNode.selectedIndex;
}
return elem.selected === true;
},
parent: function( elem ) {
return !!elem.firstChild;
},
empty: function( elem ) {
return !elem.firstChild;
},
has: function( elem, i, match ) {
return !!Sizzle( match[3], elem ).length;
},
header: function( elem ) {
return (/h\d/i).test( elem.nodeName );
},
text: function( elem ) {
var attr = elem.getAttribute( "type" ), type = elem.type;
// IE6 and 7 will map elem.type to 'text' for new HTML5 types (search, etc)
// use getAttribute instead to test this case
return elem.nodeName.toLowerCase() === "input" && "text" === type && ( attr === type || attr === null );
},
radio: function( elem ) {
return elem.nodeName.toLowerCase() === "input" && "radio" === elem.type;
},
checkbox: function( elem ) {
return elem.nodeName.toLowerCase() === "input" && "checkbox" === elem.type;
},
file: function( elem ) {
return elem.nodeName.toLowerCase() === "input" && "file" === elem.type;
},
password: function( elem ) {
return elem.nodeName.toLowerCase() === "input" && "password" === elem.type;
},
submit: function( elem ) {
var name = elem.nodeName.toLowerCase();
return (name === "input" || name === "button") && "submit" === elem.type;
},
image: function( elem ) {
return elem.nodeName.toLowerCase() === "input" && "image" === elem.type;
},
reset: function( elem ) {
var name = elem.nodeName.toLowerCase();
return (name === "input" || name === "button") && "reset" === elem.type;
},
button: function( elem ) {
var name = elem.nodeName.toLowerCase();
return name === "input" && "button" === elem.type || name === "button";
},
input: function( elem ) {
return (/input|select|textarea|button/i).test( elem.nodeName );
},
focus: function( elem ) {
return elem === elem.ownerDocument.activeElement;
}
},
setFilters: {
first: function( elem, i ) {
return i === 0;
},
last: function( elem, i, match, array ) {
return i === array.length - 1;
},
even: function( elem, i ) {
return i % 2 === 0;
},
odd: function( elem, i ) {
return i % 2 === 1;
},
lt: function( elem, i, match ) {
return i < match[3] - 0;
},
gt: function( elem, i, match ) {
return i > match[3] - 0;
},
nth: function( elem, i, match ) {
return match[3] - 0 === i;
},
eq: function( elem, i, match ) {
return match[3] - 0 === i;
}
},
filter: {
PSEUDO: function( elem, match, i, array ) {
var name = match[1],
filter = Expr.filters[ name ];
if ( filter ) {
return filter( elem, i, match, array );
} else if ( name === "contains" ) {
return (elem.textContent || elem.innerText || getText([ elem ]) || "").indexOf(match[3]) >= 0;
} else if ( name === "not" ) {
var not = match[3];
for ( var j = 0, l = not.length; j < l; j++ ) {
if ( not[j] === elem ) {
return false;
}
}
return true;
} else {
Sizzle.error( name );
}
},
CHILD: function( elem, match ) {
var first, last,
doneName, parent, cache,
count, diff,
type = match[1],
node = elem;
switch ( type ) {
case "only":
case "first":
while ( (node = node.previousSibling) ) {
if ( node.nodeType === 1 ) {
return false;
}
}
if ( type === "first" ) {
return true;
}
node = elem;
case "last":
while ( (node = node.nextSibling) ) {
if ( node.nodeType === 1 ) {
return false;
}
}
return true;
case "nth":
first = match[2];
last = match[3];
if ( first === 1 && last === 0 ) {
return true;
}
doneName = match[0];
parent = elem.parentNode;
if ( parent && (parent[ expando ] !== doneName || !elem.nodeIndex) ) {
count = 0;
for ( node = parent.firstChild; node; node = node.nextSibling ) {
if ( node.nodeType === 1 ) {
node.nodeIndex = ++count;
}
}
parent[ expando ] = doneName;
}
diff = elem.nodeIndex - last;
if ( first === 0 ) {
return diff === 0;
} else {
return ( diff % first === 0 && diff / first >= 0 );
}
}
},
ID: function( elem, match ) {
return elem.nodeType === 1 && elem.getAttribute("id") === match;
},
TAG: function( elem, match ) {
return (match === "*" && elem.nodeType === 1) || !!elem.nodeName && elem.nodeName.toLowerCase() === match;
},
CLASS: function( elem, match ) {
return (" " + (elem.className || elem.getAttribute("class")) + " ")
.indexOf( match ) > -1;
},
ATTR: function( elem, match ) {
var name = match[1],
result = Sizzle.attr ?
Sizzle.attr( elem, name ) :
Expr.attrHandle[ name ] ?
Expr.attrHandle[ name ]( elem ) :
elem[ name ] != null ?
elem[ name ] :
elem.getAttribute( name ),
value = result + "",
type = match[2],
check = match[4];
return result == null ?
type === "!=" :
!type && Sizzle.attr ?
result != null :
type === "=" ?
value === check :
type === "*=" ?
value.indexOf(check) >= 0 :
type === "~=" ?
(" " + value + " ").indexOf(check) >= 0 :
!check ?
value && result !== false :
type === "!=" ?
value !== check :
type === "^=" ?
value.indexOf(check) === 0 :
type === "$=" ?
value.substr(value.length - check.length) === check :
type === "|=" ?
value === check || value.substr(0, check.length + 1) === check + "-" :
false;
},
POS: function( elem, match, i, array ) {
var name = match[2],
filter = Expr.setFilters[ name ];
if ( filter ) {
return filter( elem, i, match, array );
}
}
}
};
var origPOS = Expr.match.POS,
fescape = function(all, num){
return "\\" + (num - 0 + 1);
};
for ( var type in Expr.match ) {
Expr.match[ type ] = new RegExp( Expr.match[ type ].source + (/(?![^\[]*\])(?![^\(]*\))/.source) );
Expr.leftMatch[ type ] = new RegExp( /(^(?:.|\r|\n)*?)/.source + Expr.match[ type ].source.replace(/\\(\d+)/g, fescape) );
}
var makeArray = function( array, results ) {
array = Array.prototype.slice.call( array, 0 );
if ( results ) {
results.push.apply( results, array );
return results;
}
return array;
};
// Perform a simple check to determine if the browser is capable of
// converting a NodeList to an array using builtin methods.
// Also verifies that the returned array holds DOM nodes
// (which is not the case in the Blackberry browser)
try {
Array.prototype.slice.call( document.documentElement.childNodes, 0 )[0].nodeType;
// Provide a fallback method if it does not work
} catch( e ) {
makeArray = function( array, results ) {
var i = 0,
ret = results || [];
if ( toString.call(array) === "[object Array]" ) {
Array.prototype.push.apply( ret, array );
} else {
if ( typeof array.length === "number" ) {
for ( var l = array.length; i < l; i++ ) {
ret.push( array[i] );
}
} else {
for ( ; array[i]; i++ ) {
ret.push( array[i] );
}
}
}
return ret;
};
}
var sortOrder, siblingCheck;
if ( document.documentElement.compareDocumentPosition ) {
sortOrder = function( a, b ) {
if ( a === b ) {
hasDuplicate = true;
return 0;
}
if ( !a.compareDocumentPosition || !b.compareDocumentPosition ) {
return a.compareDocumentPosition ? -1 : 1;
}
return a.compareDocumentPosition(b) & 4 ? -1 : 1;
};
} else {
sortOrder = function( a, b ) {
// The nodes are identical, we can exit early
if ( a === b ) {
hasDuplicate = true;
return 0;
// Fallback to using sourceIndex (in IE) if it's available on both nodes
} else if ( a.sourceIndex && b.sourceIndex ) {
return a.sourceIndex - b.sourceIndex;
}
var al, bl,
ap = [],
bp = [],
aup = a.parentNode,
bup = b.parentNode,
cur = aup;
// If the nodes are siblings (or identical) we can do a quick check
if ( aup === bup ) {
return siblingCheck( a, b );
// If no parents were found then the nodes are disconnected
} else if ( !aup ) {
return -1;
} else if ( !bup ) {
return 1;
}
// Otherwise they're somewhere else in the tree so we need
// to build up a full list of the parentNodes for comparison
while ( cur ) {
ap.unshift( cur );
cur = cur.parentNode;
}
cur = bup;
while ( cur ) {
bp.unshift( cur );
cur = cur.parentNode;
}
al = ap.length;
bl = bp.length;
// Start walking down the tree looking for a discrepancy
for ( var i = 0; i < al && i < bl; i++ ) {
if ( ap[i] !== bp[i] ) {
return siblingCheck( ap[i], bp[i] );
}
}
// We ended someplace up the tree so do a sibling check
return i === al ?
siblingCheck( a, bp[i], -1 ) :
siblingCheck( ap[i], b, 1 );
};
siblingCheck = function( a, b, ret ) {
if ( a === b ) {
return ret;
}
var cur = a.nextSibling;
while ( cur ) {
if ( cur === b ) {
return -1;
}
cur = cur.nextSibling;
}
return 1;
};
}
// Check to see if the browser returns elements by name when
// querying by getElementById (and provide a workaround)
(function(){
// We're going to inject a fake input element with a specified name
var form = document.createElement("div"),
id = "script" + (new Date()).getTime(),
root = document.documentElement;
form.innerHTML = " ";
// Inject it into the root element, check its status, and remove it quickly
root.insertBefore( form, root.firstChild );
// The workaround has to do additional checks after a getElementById
// Which slows things down for other browsers (hence the branching)
if ( document.getElementById( id ) ) {
Expr.find.ID = function( match, context, isXML ) {
if ( typeof context.getElementById !== "undefined" && !isXML ) {
var m = context.getElementById(match[1]);
return m ?
m.id === match[1] || typeof m.getAttributeNode !== "undefined" && m.getAttributeNode("id").nodeValue === match[1] ?
[m] :
undefined :
[];
}
};
Expr.filter.ID = function( elem, match ) {
var node = typeof elem.getAttributeNode !== "undefined" && elem.getAttributeNode("id");
return elem.nodeType === 1 && node && node.nodeValue === match;
};
}
root.removeChild( form );
// release memory in IE
root = form = null;
})();
(function(){
// Check to see if the browser returns only elements
// when doing getElementsByTagName("*")
// Create a fake element
var div = document.createElement("div");
div.appendChild( document.createComment("") );
// Make sure no comments are found
if ( div.getElementsByTagName("*").length > 0 ) {
Expr.find.TAG = function( match, context ) {
var results = context.getElementsByTagName( match[1] );
// Filter out possible comments
if ( match[1] === "*" ) {
var tmp = [];
for ( var i = 0; results[i]; i++ ) {
if ( results[i].nodeType === 1 ) {
tmp.push( results[i] );
}
}
results = tmp;
}
return results;
};
}
// Check to see if an attribute returns normalized href attributes
div.innerHTML = " ";
if ( div.firstChild && typeof div.firstChild.getAttribute !== "undefined" &&
div.firstChild.getAttribute("href") !== "#" ) {
Expr.attrHandle.href = function( elem ) {
return elem.getAttribute( "href", 2 );
};
}
// release memory in IE
div = null;
})();
if ( document.querySelectorAll ) {
(function(){
var oldSizzle = Sizzle,
div = document.createElement("div"),
id = "__sizzle__";
div.innerHTML = "
";
// Safari can't handle uppercase or unicode characters when
// in quirks mode.
if ( div.querySelectorAll && div.querySelectorAll(".TEST").length === 0 ) {
return;
}
Sizzle = function( query, context, extra, seed ) {
context = context || document;
// Only use querySelectorAll on non-XML documents
// (ID selectors don't work in non-HTML documents)
if ( !seed && !Sizzle.isXML(context) ) {
// See if we find a selector to speed up
var match = /^(\w+$)|^\.([\w\-]+$)|^#([\w\-]+$)/.exec( query );
if ( match && (context.nodeType === 1 || context.nodeType === 9) ) {
// Speed-up: Sizzle("TAG")
if ( match[1] ) {
return makeArray( context.getElementsByTagName( query ), extra );
// Speed-up: Sizzle(".CLASS")
} else if ( match[2] && Expr.find.CLASS && context.getElementsByClassName ) {
return makeArray( context.getElementsByClassName( match[2] ), extra );
}
}
if ( context.nodeType === 9 ) {
// Speed-up: Sizzle("body")
// The body element only exists once, optimize finding it
if ( query === "body" && context.body ) {
return makeArray( [ context.body ], extra );
// Speed-up: Sizzle("#ID")
} else if ( match && match[3] ) {
var elem = context.getElementById( match[3] );
// Check parentNode to catch when Blackberry 4.6 returns
// nodes that are no longer in the document #6963
if ( elem && elem.parentNode ) {
// Handle the case where IE and Opera return items
// by name instead of ID
if ( elem.id === match[3] ) {
return makeArray( [ elem ], extra );
}
} else {
return makeArray( [], extra );
}
}
try {
return makeArray( context.querySelectorAll(query), extra );
} catch(qsaError) {}
// qSA works strangely on Element-rooted queries
// We can work around this by specifying an extra ID on the root
// and working up from there (Thanks to Andrew Dupont for the technique)
// IE 8 doesn't work on object elements
} else if ( context.nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) {
var oldContext = context,
old = context.getAttribute( "id" ),
nid = old || id,
hasParent = context.parentNode,
relativeHierarchySelector = /^\s*[+~]/.test( query );
if ( !old ) {
context.setAttribute( "id", nid );
} else {
nid = nid.replace( /'/g, "\\$&" );
}
if ( relativeHierarchySelector && hasParent ) {
context = context.parentNode;
}
try {
if ( !relativeHierarchySelector || hasParent ) {
return makeArray( context.querySelectorAll( "[id='" + nid + "'] " + query ), extra );
}
} catch(pseudoError) {
} finally {
if ( !old ) {
oldContext.removeAttribute( "id" );
}
}
}
}
return oldSizzle(query, context, extra, seed);
};
for ( var prop in oldSizzle ) {
Sizzle[ prop ] = oldSizzle[ prop ];
}
// release memory in IE
div = null;
})();
}
(function(){
var html = document.documentElement,
matches = html.matchesSelector || html.mozMatchesSelector || html.webkitMatchesSelector || html.msMatchesSelector;
if ( matches ) {
// Check to see if it's possible to do matchesSelector
// on a disconnected node (IE 9 fails this)
var disconnectedMatch = !matches.call( document.createElement( "div" ), "div" ),
pseudoWorks = false;
try {
// This should fail with an exception
// Gecko does not error, returns false instead
matches.call( document.documentElement, "[test!='']:sizzle" );
} catch( pseudoError ) {
pseudoWorks = true;
}
Sizzle.matchesSelector = function( node, expr ) {
// Make sure that attribute selectors are quoted
expr = expr.replace(/\=\s*([^'"\]]*)\s*\]/g, "='$1']");
if ( !Sizzle.isXML( node ) ) {
try {
if ( pseudoWorks || !Expr.match.PSEUDO.test( expr ) && !/!=/.test( expr ) ) {
var ret = matches.call( node, expr );
// IE 9's matchesSelector returns false on disconnected nodes
if ( ret || !disconnectedMatch ||
// As well, disconnected nodes are said to be in a document
// fragment in IE 9, so check for that
node.document && node.document.nodeType !== 11 ) {
return ret;
}
}
} catch(e) {}
}
return Sizzle(expr, null, null, [node]).length > 0;
};
}
})();
(function(){
var div = document.createElement("div");
div.innerHTML = "
";
// Opera can't find a second classname (in 9.6)
// Also, make sure that getElementsByClassName actually exists
if ( !div.getElementsByClassName || div.getElementsByClassName("e").length === 0 ) {
return;
}
// Safari caches class attributes, doesn't catch changes (in 3.2)
div.lastChild.className = "e";
if ( div.getElementsByClassName("e").length === 1 ) {
return;
}
Expr.order.splice(1, 0, "CLASS");
Expr.find.CLASS = function( match, context, isXML ) {
if ( typeof context.getElementsByClassName !== "undefined" && !isXML ) {
return context.getElementsByClassName(match[1]);
}
};
// release memory in IE
div = null;
})();
function dirNodeCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) {
for ( var i = 0, l = checkSet.length; i < l; i++ ) {
var elem = checkSet[i];
if ( elem ) {
var match = false;
elem = elem[dir];
while ( elem ) {
if ( elem[ expando ] === doneName ) {
match = checkSet[elem.sizset];
break;
}
if ( elem.nodeType === 1 && !isXML ){
elem[ expando ] = doneName;
elem.sizset = i;
}
if ( elem.nodeName.toLowerCase() === cur ) {
match = elem;
break;
}
elem = elem[dir];
}
checkSet[i] = match;
}
}
}
function dirCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) {
for ( var i = 0, l = checkSet.length; i < l; i++ ) {
var elem = checkSet[i];
if ( elem ) {
var match = false;
elem = elem[dir];
while ( elem ) {
if ( elem[ expando ] === doneName ) {
match = checkSet[elem.sizset];
break;
}
if ( elem.nodeType === 1 ) {
if ( !isXML ) {
elem[ expando ] = doneName;
elem.sizset = i;
}
if ( typeof cur !== "string" ) {
if ( elem === cur ) {
match = true;
break;
}
} else if ( Sizzle.filter( cur, [elem] ).length > 0 ) {
match = elem;
break;
}
}
elem = elem[dir];
}
checkSet[i] = match;
}
}
}
if ( document.documentElement.contains ) {
Sizzle.contains = function( a, b ) {
return a !== b && (a.contains ? a.contains(b) : true);
};
} else if ( document.documentElement.compareDocumentPosition ) {
Sizzle.contains = function( a, b ) {
return !!(a.compareDocumentPosition(b) & 16);
};
} else {
Sizzle.contains = function() {
return false;
};
}
Sizzle.isXML = function( elem ) {
// documentElement is verified for cases where it doesn't yet exist
// (such as loading iframes in IE - #4833)
var documentElement = (elem ? elem.ownerDocument || elem : 0).documentElement;
return documentElement ? documentElement.nodeName !== "HTML" : false;
};
var posProcess = function( selector, context, seed ) {
var match,
tmpSet = [],
later = "",
root = context.nodeType ? [context] : context;
// Position selectors must be done after the filter
// And so must :not(positional) so we move all PSEUDOs to the end
while ( (match = Expr.match.PSEUDO.exec( selector )) ) {
later += match[0];
selector = selector.replace( Expr.match.PSEUDO, "" );
}
selector = Expr.relative[selector] ? selector + "*" : selector;
for ( var i = 0, l = root.length; i < l; i++ ) {
Sizzle( selector, root[i], tmpSet, seed );
}
return Sizzle.filter( later, tmpSet );
};
// EXPOSE
// Override sizzle attribute retrieval
Sizzle.attr = jQuery.attr;
Sizzle.selectors.attrMap = {};
jQuery.find = Sizzle;
jQuery.expr = Sizzle.selectors;
jQuery.expr[":"] = jQuery.expr.filters;
jQuery.unique = Sizzle.uniqueSort;
jQuery.text = Sizzle.getText;
jQuery.isXMLDoc = Sizzle.isXML;
jQuery.contains = Sizzle.contains;
})();
var runtil = /Until$/,
rparentsprev = /^(?:parents|prevUntil|prevAll)/,
// Note: This RegExp should be improved, or likely pulled from Sizzle
rmultiselector = /,/,
isSimple = /^.[^:#\[\.,]*$/,
slice = Array.prototype.slice,
POS = jQuery.expr.match.POS,
// methods guaranteed to produce a unique set when starting from a unique set
guaranteedUnique = {
children: true,
contents: true,
next: true,
prev: true
};
jQuery.fn.extend({
find: function( selector ) {
var self = this,
i, l;
if ( typeof selector !== "string" ) {
return jQuery( selector ).filter(function() {
for ( i = 0, l = self.length; i < l; i++ ) {
if ( jQuery.contains( self[ i ], this ) ) {
return true;
}
}
});
}
var ret = this.pushStack( "", "find", selector ),
length, n, r;
for ( i = 0, l = this.length; i < l; i++ ) {
length = ret.length;
jQuery.find( selector, this[i], ret );
if ( i > 0 ) {
// Make sure that the results are unique
for ( n = length; n < ret.length; n++ ) {
for ( r = 0; r < length; r++ ) {
if ( ret[r] === ret[n] ) {
ret.splice(n--, 1);
break;
}
}
}
}
}
return ret;
},
has: function( target ) {
var targets = jQuery( target );
return this.filter(function() {
for ( var i = 0, l = targets.length; i < l; i++ ) {
if ( jQuery.contains( this, targets[i] ) ) {
return true;
}
}
});
},
not: function( selector ) {
return this.pushStack( winnow(this, selector, false), "not", selector);
},
filter: function( selector ) {
return this.pushStack( winnow(this, selector, true), "filter", selector );
},
is: function( selector ) {
return !!selector && (
typeof selector === "string" ?
// If this is a positional selector, check membership in the returned set
// so $("p:first").is("p:last") won't return true for a doc with two "p".
POS.test( selector ) ?
jQuery( selector, this.context ).index( this[0] ) >= 0 :
jQuery.filter( selector, this ).length > 0 :
this.filter( selector ).length > 0 );
},
closest: function( selectors, context ) {
var ret = [], i, l, cur = this[0];
// Array (deprecated as of jQuery 1.7)
if ( jQuery.isArray( selectors ) ) {
var level = 1;
while ( cur && cur.ownerDocument && cur !== context ) {
for ( i = 0; i < selectors.length; i++ ) {
if ( jQuery( cur ).is( selectors[ i ] ) ) {
ret.push({ selector: selectors[ i ], elem: cur, level: level });
}
}
cur = cur.parentNode;
level++;
}
return ret;
}
// String
var pos = POS.test( selectors ) || typeof selectors !== "string" ?
jQuery( selectors, context || this.context ) :
0;
for ( i = 0, l = this.length; i < l; i++ ) {
cur = this[i];
while ( cur ) {
if ( pos ? pos.index(cur) > -1 : jQuery.find.matchesSelector(cur, selectors) ) {
ret.push( cur );
break;
} else {
cur = cur.parentNode;
if ( !cur || !cur.ownerDocument || cur === context || cur.nodeType === 11 ) {
break;
}
}
}
}
ret = ret.length > 1 ? jQuery.unique( ret ) : ret;
return this.pushStack( ret, "closest", selectors );
},
// Determine the position of an element within
// the matched set of elements
index: function( elem ) {
// No argument, return index in parent
if ( !elem ) {
return ( this[0] && this[0].parentNode ) ? this.prevAll().length : -1;
}
// index in selector
if ( typeof elem === "string" ) {
return jQuery.inArray( this[0], jQuery( elem ) );
}
// Locate the position of the desired element
return jQuery.inArray(
// If it receives a jQuery object, the first element is used
elem.jquery ? elem[0] : elem, this );
},
add: function( selector, context ) {
var set = typeof selector === "string" ?
jQuery( selector, context ) :
jQuery.makeArray( selector && selector.nodeType ? [ selector ] : selector ),
all = jQuery.merge( this.get(), set );
return this.pushStack( isDisconnected( set[0] ) || isDisconnected( all[0] ) ?
all :
jQuery.unique( all ) );
},
andSelf: function() {
return this.add( this.prevObject );
}
});
// A painfully simple check to see if an element is disconnected
// from a document (should be improved, where feasible).
function isDisconnected( node ) {
return !node || !node.parentNode || node.parentNode.nodeType === 11;
}
jQuery.each({
parent: function( elem ) {
var parent = elem.parentNode;
return parent && parent.nodeType !== 11 ? parent : null;
},
parents: function( elem ) {
return jQuery.dir( elem, "parentNode" );
},
parentsUntil: function( elem, i, until ) {
return jQuery.dir( elem, "parentNode", until );
},
next: function( elem ) {
return jQuery.nth( elem, 2, "nextSibling" );
},
prev: function( elem ) {
return jQuery.nth( elem, 2, "previousSibling" );
},
nextAll: function( elem ) {
return jQuery.dir( elem, "nextSibling" );
},
prevAll: function( elem ) {
return jQuery.dir( elem, "previousSibling" );
},
nextUntil: function( elem, i, until ) {
return jQuery.dir( elem, "nextSibling", until );
},
prevUntil: function( elem, i, until ) {
return jQuery.dir( elem, "previousSibling", until );
},
siblings: function( elem ) {
return jQuery.sibling( elem.parentNode.firstChild, elem );
},
children: function( elem ) {
return jQuery.sibling( elem.firstChild );
},
contents: function( elem ) {
return jQuery.nodeName( elem, "iframe" ) ?
elem.contentDocument || elem.contentWindow.document :
jQuery.makeArray( elem.childNodes );
}
}, function( name, fn ) {
jQuery.fn[ name ] = function( until, selector ) {
var ret = jQuery.map( this, fn, until );
if ( !runtil.test( name ) ) {
selector = until;
}
if ( selector && typeof selector === "string" ) {
ret = jQuery.filter( selector, ret );
}
ret = this.length > 1 && !guaranteedUnique[ name ] ? jQuery.unique( ret ) : ret;
if ( (this.length > 1 || rmultiselector.test( selector )) && rparentsprev.test( name ) ) {
ret = ret.reverse();
}
return this.pushStack( ret, name, slice.call( arguments ).join(",") );
};
});
jQuery.extend({
filter: function( expr, elems, not ) {
if ( not ) {
expr = ":not(" + expr + ")";
}
return elems.length === 1 ?
jQuery.find.matchesSelector(elems[0], expr) ? [ elems[0] ] : [] :
jQuery.find.matches(expr, elems);
},
dir: function( elem, dir, until ) {
var matched = [],
cur = elem[ dir ];
while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) {
if ( cur.nodeType === 1 ) {
matched.push( cur );
}
cur = cur[dir];
}
return matched;
},
nth: function( cur, result, dir, elem ) {
result = result || 1;
var num = 0;
for ( ; cur; cur = cur[dir] ) {
if ( cur.nodeType === 1 && ++num === result ) {
break;
}
}
return cur;
},
sibling: function( n, elem ) {
var r = [];
for ( ; n; n = n.nextSibling ) {
if ( n.nodeType === 1 && n !== elem ) {
r.push( n );
}
}
return r;
}
});
// Implement the identical functionality for filter and not
function winnow( elements, qualifier, keep ) {
// Can't pass null or undefined to indexOf in Firefox 4
// Set to 0 to skip string check
qualifier = qualifier || 0;
if ( jQuery.isFunction( qualifier ) ) {
return jQuery.grep(elements, function( elem, i ) {
var retVal = !!qualifier.call( elem, i, elem );
return retVal === keep;
});
} else if ( qualifier.nodeType ) {
return jQuery.grep(elements, function( elem, i ) {
return ( elem === qualifier ) === keep;
});
} else if ( typeof qualifier === "string" ) {
var filtered = jQuery.grep(elements, function( elem ) {
return elem.nodeType === 1;
});
if ( isSimple.test( qualifier ) ) {
return jQuery.filter(qualifier, filtered, !keep);
} else {
qualifier = jQuery.filter( qualifier, filtered );
}
}
return jQuery.grep(elements, function( elem, i ) {
return ( jQuery.inArray( elem, qualifier ) >= 0 ) === keep;
});
}
function createSafeFragment( document ) {
var list = nodeNames.split( "|" ),
safeFrag = document.createDocumentFragment();
if ( safeFrag.createElement ) {
while ( list.length ) {
safeFrag.createElement(
list.pop()
);
}
}
return safeFrag;
}
var nodeNames = "abbr|article|aside|audio|canvas|datalist|details|figcaption|figure|footer|" +
"header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",
rinlinejQuery = / jQuery\d+="(?:\d+|null)"/g,
rleadingWhitespace = /^\s+/,
rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,
rtagName = /<([\w:]+)/,
rtbody = /", "" ],
legend: [ 1, "", " " ],
thead: [ 1, "" ],
tr: [ 2, "" ],
td: [ 3, "" ],
col: [ 2, "" ],
area: [ 1, "", " " ],
_default: [ 0, "", "" ]
},
safeFragment = createSafeFragment( document );
wrapMap.optgroup = wrapMap.option;
wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead;
wrapMap.th = wrapMap.td;
// IE can't serialize and