pax_global_header 0000666 0000000 0000000 00000000064 15051100704 0014503 g ustar 00root root 0000000 0000000 52 comment=4679c9fbd8916630966118c6fcca865ce12d0238
python-lap-0.5.12/ 0000775 0000000 0000000 00000000000 15051100704 0013663 5 ustar 00root root 0000000 0000000 python-lap-0.5.12/.github/ 0000775 0000000 0000000 00000000000 15051100704 0015223 5 ustar 00root root 0000000 0000000 python-lap-0.5.12/.github/workflows/ 0000775 0000000 0000000 00000000000 15051100704 0017260 5 ustar 00root root 0000000 0000000 python-lap-0.5.12/.github/workflows/benchmark.yaml 0000664 0000000 0000000 00000001512 15051100704 0022075 0 ustar 00root root 0000000 0000000 name: Benchmark
on: [workflow_dispatch]
jobs:
build_and_test:
name: "${{ matrix.os }} + python ${{ matrix.python-version }}"
runs-on: "${{ matrix.os }}"
strategy:
fail-fast: false
matrix:
os: ["windows-latest", "ubuntu-latest", "macos-13"]
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install lap from source
run: pip install git+https://github.com/gatagat/lap.git
- name: Install scipy
run: pip install scipy
- name: 📊 Run benchmark_simple.py
run: |
cd benchmark
python benchmark_simple.py
python-lap-0.5.12/.github/workflows/prepublish.yaml 0000664 0000000 0000000 00000010332 15051100704 0022320 0 ustar 00root root 0000000 0000000 name: PyPI Build
on: [workflow_dispatch]
jobs:
build_windows_all:
name: Build windows wheels
runs-on: windows-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.x
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.21.3
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_BUILD: "cp37-* cp38-* cp39-* cp310-* cp311-* cp312-* cp313-*"
CIBW_SKIP: "*-win32 pp*"
CIBW_ARCHS_WINDOWS: "AMD64 ARM64"
- name: Archive wheels
uses: actions/upload-artifact@v4
with:
name: windows-all-wheels
path: ./wheelhouse/*.whl
build_macos_intel:
name: Build macos-intel wheels
runs-on: macos-13
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.x
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.21.3
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_BUILD: "cp37-* cp38-* cp39-* cp310-* cp311-* cp312-* cp313-*"
CIBW_SKIP: "pp*"
CIBW_ARCHS_MACOS: "x86_64"
- name: Archive wheels
uses: actions/upload-artifact@v4
with:
name: macos-intel-wheels
path: ./wheelhouse/*.whl
build_macos_arm:
name: Build macos-arm wheels
runs-on: macos-14
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.x
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.21.3
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_BUILD: "cp37-* cp38-* cp39-* cp310-* cp311-* cp312-* cp313-*"
CIBW_SKIP: "pp*"
CIBW_ARCHS_MACOS: "arm64"
- name: Archive wheels
uses: actions/upload-artifact@v4
with:
name: macos-arm-wheels
path: ./wheelhouse/*.whl
build_linux_intel:
name: Build linux-intel wheels
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.x
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.21.3
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_BUILD: "cp37-* cp38-* cp39-* cp310-* cp311-* cp312-* cp313-*"
CIBW_SKIP: "*-manylinux_i686 pp*"
CIBW_ARCHS_LINUX: "x86_64"
- name: Archive wheels
uses: actions/upload-artifact@v4
with:
name: linux-intel-wheels
path: ./wheelhouse/*.whl
build_linux_arm:
name: Build linux-arm wheels
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: arm64
- name: Build wheels
uses: pypa/cibuildwheel@v2.21.3
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_BUILD: "cp37-* cp38-* cp39-* cp310-* cp311-* cp312-* cp313-*"
CIBW_ARCHS_LINUX: "aarch64"
- name: Archive wheels
uses: actions/upload-artifact@v4
with:
name: linux-arm-wheels
path: ./wheelhouse/*.whl
build_source:
name: Build source distribution
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.x
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install requirements
run: |
pip install "setuptools>=67.2.0"
pip install wheel build
- name: Build .tar.gz
run: python -m build --sdist
- name: Archive .tar.gz
uses: actions/upload-artifact@v4
with:
name: source-dist
path: dist/*.tar.gz
python-lap-0.5.12/.github/workflows/publish.yaml 0000664 0000000 0000000 00000011410 15051100704 0021607 0 ustar 00root root 0000000 0000000 name: Publish to PyPI
on:
push:
tags:
- "v*"
jobs:
build_windows_all:
name: Build windows wheels
runs-on: windows-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.x
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.21.3
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_BUILD: "cp37-* cp38-* cp39-* cp310-* cp311-* cp312-* cp313-*"
CIBW_SKIP: "*-win32 pp*"
CIBW_ARCHS_WINDOWS: "AMD64 ARM64"
- name: Archive wheels
uses: actions/upload-artifact@v4
with:
name: pypi-windows-all-wheels
path: ./wheelhouse/*.whl
build_macos_intel:
name: Build macos-intel wheels
runs-on: macos-13
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.x
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.21.3
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_BUILD: "cp37-* cp38-* cp39-* cp310-* cp311-* cp312-* cp313-*"
CIBW_SKIP: "pp*"
CIBW_ARCHS_MACOS: "x86_64"
- name: Archive wheels
uses: actions/upload-artifact@v4
with:
name: pypi-macos-intel-wheels
path: ./wheelhouse/*.whl
build_macos_arm:
name: Build macos-arm wheels
runs-on: macos-14
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.x
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.21.3
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_BUILD: "cp37-* cp38-* cp39-* cp310-* cp311-* cp312-* cp313-*"
CIBW_SKIP: "pp*"
CIBW_ARCHS_MACOS: "arm64"
- name: Archive wheels
uses: actions/upload-artifact@v4
with:
name: pypi-macos-arm-wheels
path: ./wheelhouse/*.whl
build_linux_intel:
name: Build linux-intel wheels
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.x
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install cibuildwheel
run: python -m pip install cibuildwheel==2.21.3
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_BUILD: "cp37-* cp38-* cp39-* cp310-* cp311-* cp312-* cp313-*"
CIBW_SKIP: "*-manylinux_i686 pp*"
CIBW_ARCHS_LINUX: "x86_64"
- name: Archive wheels
uses: actions/upload-artifact@v4
with:
name: pypi-linux-intel-wheels
path: ./wheelhouse/*.whl
build_linux_arm:
name: Build linux-arm wheels
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: arm64
- name: Build wheels
uses: pypa/cibuildwheel@v2.21.3
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_BUILD: "cp37-* cp38-* cp39-* cp310-* cp311-* cp312-* cp313-*"
CIBW_ARCHS_LINUX: "aarch64"
- name: Archive wheels
uses: actions/upload-artifact@v4
with:
name: pypi-linux-arm-wheels
path: ./wheelhouse/*.whl
build_source:
name: Build source distribution
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.x
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install requirements
run: |
pip install "setuptools>=67.2.0"
pip install wheel build
- name: Build .tar.gz
run: python -m build --sdist
- name: Archive .tar.gz
uses: actions/upload-artifact@v4
with:
name: pypi-source-dist
path: dist/*.tar.gz
publish_pypi:
name: Publish to PyPI
needs: [build_windows_all, build_macos_intel, build_macos_arm, build_linux_intel, build_linux_arm, build_source]
runs-on: ubuntu-latest
environment: pypi
permissions:
id-token: write
steps:
- name: Collect and extract artifact
uses: actions/download-artifact@v4
with:
path: dist
pattern: pypi-*
merge-multiple: true
- name: Publish
uses: pypa/gh-action-pypi-publish@release/v1
python-lap-0.5.12/.github/workflows/test_simple.yaml 0000664 0000000 0000000 00000002277 15051100704 0022504 0 ustar 00root root 0000000 0000000 name: Test Simple
on: [workflow_dispatch]
jobs:
build_and_test:
name: "${{ matrix.os }} + python ${{ matrix.python-version }}"
runs-on: "${{ matrix.os }}"
strategy:
fail-fast: false
matrix:
os: ["windows-latest", "ubuntu-latest", "macos-13"]
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install requirements
run: |
python -m pip install --upgrade pip
pip install "setuptools>=67.2.0"
pip install wheel build
- name: Test build from source
run: python -m build --wheel
- name: Test direct install from GitHub main
run: pip install git+https://github.com/gatagat/lap.git
- name: 🧪 Run test_simple.py
run: |
cd lap/tests/
python test_simple.py
- name: Archive wheels
uses: actions/upload-artifact@v4
with:
name: wheel-${{ matrix.os }}-${{ matrix.python-version }}
path: dist/*.whl
python-lap-0.5.12/.gitignore 0000664 0000000 0000000 00000000041 15051100704 0015646 0 ustar 00root root 0000000 0000000 dist
build
lap.egg-info
MANIFEST
python-lap-0.5.12/LICENSE 0000664 0000000 0000000 00000002421 15051100704 0014667 0 ustar 00root root 0000000 0000000 Copyright (c) 2012-2024, Tomas Kazmar
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
python-lap-0.5.12/MANIFEST.in 0000664 0000000 0000000 00000000307 15051100704 0015421 0 ustar 00root root 0000000 0000000 include *.txt *.md *.in LICENSE
recursive-include _lapjv_cpp *
recursive-include lap *
recursive-include lap/tests *
global-exclude */__pycache__
global-exclude *.pyc
prune .github
prune .githubtest
python-lap-0.5.12/README.md 0000664 0000000 0000000 00000010711 15051100704 0015142 0 ustar 00root root 0000000 0000000 [](https://github.com/gatagat/lap/actions/workflows/test_simple.yaml)
[](https://github.com/gatagat/lap/actions/workflows/benchmark.yaml)
[](https://github.com/gatagat/lap/actions/workflows/prepublish.yaml)
[](https://github.com/gatagat/lap/actions/workflows/publish.yaml)
# lap: Linear Assignment Problem Solver
[`lap`](https://github.com/gatagat/lap) is a [linear assignment problem](https://en.wikipedia.org/wiki/Assignment_problem) solver using Jonker-Volgenant algorithm for dense LAPJV¹ or sparse LAPMOD² matrices. Both algorithms are implemented from scratch based solely on the papers¹˒² and the public domain Pascal implementation provided by A. Volgenant³. The LAPMOD implementation seems to be faster than the LAPJV implementation for matrices with a side of more than ~5000 and with less than 50% finite coefficients.
¹ R. Jonker and A. Volgenant, "A Shortest Augmenting Path Algorithm for Dense and Sparse Linear Assignment Problems", Computing 38, 325-340 (1987)
² A. Volgenant, "Linear and Semi-Assignment Problems: A Core Oriented Approach", Computer Ops Res. 23, 917-932 (1996)
³ http://www.assignmentproblems.com/LAPJV.htm | [[archive.org](https://web.archive.org/web/20220221010749/http://www.assignmentproblems.com/LAPJV.htm)]
## 💽 Installation
### Install from [PyPI](https://pypi.org/project/lap/):
[](https://badge.fury.io/py/lap)
[](https://pepy.tech/project/lap)
[](https://pepy.tech/project/lap)
```
pip install lap
```
| **Pre-built Wheels** 🛞 | **Windows** ✅ | **Linux** ✅ | **macOS** ✅ |
|:---:|:---:|:---:|:---:|
| Python 3.7 | AMD64 | x86_64/aarch64 | x86_64 |
| Python 3.8 | AMD64 | x86_64/aarch64 | x86_64/arm64 |
| Python 3.9-3.13 ¹ | AMD64/ARM64 ² | x86_64/aarch64 | x86_64/arm64 |
¹ v0.5.10 supports numpy v2.x for Python 3.9-3.13. 🆕
² Windows ARM64 is experimental.
Other options
### Install from GitHub repo (requires C++ compiler):
```
pip install git+https://github.com/gatagat/lap.git
```
### Build and install (requires C++ compiler):
```
git clone https://github.com/gatagat/lap.git
cd lap
pip install "setuptools>=67.8.0"
pip install wheel build
python -m build --wheel
cd dist
```
## 🧪 Usage
```
import lap
import numpy as np
print(lap.lapjv(np.random.rand(4, 5), extend_cost=True))
```
More details
### `cost, x, y = lap.lapjv(C)`
The function `lapjv(C)` returns the assignment cost `cost` and two arrays `x` and `y`. If cost matrix `C` has shape NxM, then `x` is a size-N array specifying to which column each row is assigned, and `y` is a size-M array specifying to which row each column is assigned. For example, an output of `x = [1, 0]` indicates that row 0 is assigned to column 1 and row 1 is assigned to column 0. Similarly, an output of `x = [2, 1, 0]` indicates that row 0 is assigned to column 2, row 1 is assigned to column 1, and row 2 is assigned to column 0.
Note that this function *does not* return the assignment matrix (as done by scipy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html) and lapsolver's [`solve dense`](https://github.com/cheind/py-lapsolver)). The assignment matrix can be constructed from `x` as follows:
```
A = np.zeros((N, M))
for i in range(N):
A[i, x[i]] = 1
```
Equivalently, we could construct the assignment matrix from `y`:
```
A = np.zeros((N, M))
for j in range(M):
A[y[j], j] = 1
```
Finally, note that the outputs are redundant: we can construct `x` from `y`, and vise versa:
```
x = [np.where(y == i)[0][0] for i in range(N)]
y = [np.where(x == j)[0][0] for j in range(M)]
```
# License
Released under the 2-clause BSD license, see [LICENSE](./LICENSE).
Copyright (C) 2012-2024, Tomas Kazmar
Contributors (in alphabetic order):
- Benjamin Eysenbach
- Léo Duret
- Raphael Reme
- Ratha Siv
- Robert Wen
- Steven
- Tom White
- Tomas Kazmar
- Wok
python-lap-0.5.12/_lapjv_cpp/ 0000775 0000000 0000000 00000000000 15051100704 0016000 5 ustar 00root root 0000000 0000000 python-lap-0.5.12/_lapjv_cpp/.gitignore 0000664 0000000 0000000 00000000013 15051100704 0017762 0 ustar 00root root 0000000 0000000 _lapjv.cpp
python-lap-0.5.12/_lapjv_cpp/_lapjv.pyx 0000664 0000000 0000000 00000012273 15051100704 0020022 0 ustar 00root root 0000000 0000000 # Force compiling with Python 3
# cython: language_level=3
import numpy as np
cimport numpy as cnp
cimport cython
from libc.stdlib cimport malloc, free
cdef extern from "lapjv.h" nogil:
ctypedef signed int int_t
ctypedef unsigned int uint_t
cdef int LARGE
cdef enum fp_t:
FP_1
FP_2
FP_DYNAMIC
int lapjv_internal(const uint_t n,
double *cost[],
int_t *x,
int_t *y)
int lapmod_internal(const uint_t n,
double *cc,
uint_t *ii,
uint_t *kk,
int_t *x,
int_t *y,
fp_t fp_version)
LARGE_ = LARGE
FP_1_ = FP_1
FP_2_ = FP_2
FP_DYNAMIC_ = FP_DYNAMIC
@cython.boundscheck(False)
@cython.wraparound(False)
def lapjv(cnp.ndarray cost not None, char extend_cost=False,
double cost_limit=np.inf, char return_cost=True):
"""Solve linear assignment problem using Jonker-Volgenant algorithm.
Parameters
----------
cost: (N,N) ndarray
Cost matrix. Entry `cost[i, j]` is the cost of assigning row `i` to
column `j`.
extend_cost: bool, optional
Whether or not extend a non-square matrix. Default: False.
cost_limit: double, optional
An upper limit for a cost of a single assignment. Default: `np.inf`.
return_cost: bool, optional
Whether or not to return the assignment cost.
Returns
-------
opt: double
Assignment cost. Not returned if `return_cost is False`.
x: (N,) ndarray
Assignment. `x[i]` specifies the column to which row `i` is assigned.
y: (N,) ndarray
Assignment. `y[j]` specifies the row to which column `j` is assigned.
Notes
-----
For non-square matrices (with `extend_cost is True`) or `cost_limit` set
low enough, there will be unmatched rows, columns in the solution `x`, `y`.
All such entries are set to -1.
"""
if cost.ndim != 2:
raise ValueError('2-dimensional array expected')
cdef cnp.ndarray[cnp.double_t, ndim=2, mode='c'] cost_c = \
np.ascontiguousarray(cost, dtype=np.double)
cdef cnp.ndarray[cnp.double_t, ndim=2, mode='c'] cost_c_extended
cdef uint_t n_rows = cost_c.shape[0]
cdef uint_t n_cols = cost_c.shape[1]
cdef uint_t n = 0
if n_rows == n_cols:
n = n_rows
else:
if not extend_cost:
raise ValueError(
'Square cost array expected. If cost is intentionally '
'non-square, pass extend_cost=True.')
if cost_limit < np.inf:
n = n_rows + n_cols
cost_c_extended = np.empty((n, n), dtype=np.double)
cost_c_extended[:] = cost_limit / 2.
cost_c_extended[n_rows:, n_cols:] = 0
cost_c_extended[:n_rows, :n_cols] = cost_c
cost_c = cost_c_extended
elif extend_cost:
n = max(n_rows, n_cols)
cost_c_extended = np.zeros((n, n), dtype=np.double)
cost_c_extended[:n_rows, :n_cols] = cost_c
cost_c = cost_c_extended
cdef double **cost_ptr
cost_ptr = malloc(n * sizeof(double *))
cdef int i
for i in range(n):
cost_ptr[i] = &cost_c[i, 0]
cdef cnp.ndarray[int_t, ndim=1, mode='c'] x_c = \
np.empty((n,), dtype=np.int32)
cdef cnp.ndarray[int_t, ndim=1, mode='c'] y_c = \
np.empty((n,), dtype=np.int32)
cdef int ret = lapjv_internal(n, cost_ptr, &x_c[0], &y_c[0])
free(cost_ptr)
if ret != 0:
if ret == -1:
raise MemoryError('Out of memory.')
raise RuntimeError('Unknown error (lapjv_internal returned %d).' % ret)
cdef double opt = np.nan
if cost_limit < np.inf or extend_cost:
x_c[x_c >= n_cols] = -1
y_c[y_c >= n_rows] = -1
x_c = x_c[:n_rows]
y_c = y_c[:n_cols]
if return_cost:
opt = cost_c[np.nonzero(x_c != -1)[0], x_c[x_c != -1]].sum()
elif return_cost:
opt = cost_c[np.arange(n_rows), x_c].sum()
if return_cost:
return opt, x_c, y_c
else:
return x_c, y_c
@cython.boundscheck(False)
@cython.wraparound(False)
def _lapmod(const uint_t n,
cnp.ndarray cc not None,
cnp.ndarray ii not None,
cnp.ndarray kk not None,
fp_t fp_version=FP_DYNAMIC):
"""Internal function called from lapmod(..., fast=True)."""
cdef cnp.ndarray[cnp.double_t, ndim=1, mode='c'] cc_c = \
np.ascontiguousarray(cc, dtype=np.double)
cdef cnp.ndarray[uint_t, ndim=1, mode='c'] ii_c = \
np.ascontiguousarray(ii, dtype=np.uint32)
cdef cnp.ndarray[uint_t, ndim=1, mode='c'] kk_c = \
np.ascontiguousarray(kk, dtype=np.uint32)
cdef cnp.ndarray[int_t, ndim=1, mode='c'] x_c = \
np.empty((n,), dtype=np.int32)
cdef cnp.ndarray[int_t, ndim=1, mode='c'] y_c = \
np.empty((n,), dtype=np.int32)
cdef int_t ret = lapmod_internal(n, &cc_c[0], &ii_c[0], &kk_c[0],
&x_c[0], &y_c[0], fp_version)
if ret != 0:
if ret == -1:
raise MemoryError('Out of memory.')
raise RuntimeError('Unknown error (lapmod_internal returned %d).' % ret)
return x_c, y_c
python-lap-0.5.12/_lapjv_cpp/lapjv.cpp 0000664 0000000 0000000 00000021105 15051100704 0017617 0 ustar 00root root 0000000 0000000 #include
#include
#include
#include "lapjv.h"
/** Column-reduction and reduction transfer for a dense cost matrix. */
int_t _ccrrt_dense(const uint_t n, cost_t *cost[],
int_t *free_rows, int_t *x, int_t *y, cost_t *v)
{
int_t n_free_rows;
boolean *unique;
for (uint_t i = 0; i < n; i++) {
x[i] = -1;
v[i] = LARGE;
y[i] = 0;
}
for (uint_t i = 0; i < n; i++) {
for (uint_t j = 0; j < n; j++) {
const cost_t c = cost[i][j];
if (c < v[j]) {
v[j] = c;
y[j] = i;
}
PRINTF("i=%d, j=%d, c[i,j]=%f, v[j]=%f y[j]=%d\n", i, j, c, v[j], y[j]);
}
}
PRINT_COST_ARRAY(v, n);
PRINT_INDEX_ARRAY(y, n);
NEW(unique, boolean, n);
memset(unique, TRUE, n);
{
int_t j = n;
do {
j--;
const int_t i = y[j];
if (x[i] < 0) {
x[i] = j;
} else {
unique[i] = FALSE;
y[j] = -1;
}
} while (j > 0);
}
n_free_rows = 0;
for (uint_t i = 0; i < n; i++) {
if (x[i] < 0) {
free_rows[n_free_rows++] = i;
} else if (unique[i]) {
const int_t j = x[i];
cost_t min = LARGE;
for (uint_t j2 = 0; j2 < n; j2++) {
if (j2 == (uint_t)j) {
continue;
}
const cost_t c = cost[i][j2] - v[j2];
if (c < min) {
min = c;
}
}
PRINTF("v[%d] = %f - %f\n", j, v[j], min);
v[j] -= min;
}
}
FREE(unique);
return n_free_rows;
}
/** Augmenting row reduction for a dense cost matrix. */
int_t _carr_dense(const uint_t n, cost_t *cost[], const uint_t n_free_rows,
int_t *free_rows, int_t *x, int_t *y, cost_t *v)
{
uint_t current = 0;
int_t new_free_rows = 0;
uint_t rr_cnt = 0;
PRINT_INDEX_ARRAY(x, n);
PRINT_INDEX_ARRAY(y, n);
PRINT_COST_ARRAY(v, n);
PRINT_INDEX_ARRAY(free_rows, n_free_rows);
while (current < n_free_rows) {
int_t i0;
int_t j1, j2;
cost_t v1, v2, v1_new;
boolean v1_lowers;
rr_cnt++;
PRINTF("current = %d rr_cnt = %d\n", current, rr_cnt);
const int_t free_i = free_rows[current++];
j1 = 0;
v1 = cost[free_i][0] - v[0];
j2 = -1;
v2 = LARGE;
for (uint_t j = 1; j < n; j++) {
PRINTF("%d = %f %d = %f\n", j1, v1, j2, v2);
const cost_t c = cost[free_i][j] - v[j];
if (c < v2) {
if (c >= v1) {
v2 = c;
j2 = j;
} else {
v2 = v1;
v1 = c;
j2 = j1;
j1 = j;
}
}
}
i0 = y[j1];
v1_new = v[j1] - (v2 - v1);
v1_lowers = v1_new < v[j1];
PRINTF("%d %d 1=%d,%f 2=%d,%f v1'=%f(%d,%g) \n",
free_i, i0, j1, v1, j2, v2, v1_new, v1_lowers, v[j1] - v1_new);
if (rr_cnt < current * n) {
if (v1_lowers) {
v[j1] = v1_new;
} else if (i0 >= 0 && j2 >= 0) {
j1 = j2;
i0 = y[j2];
}
if (i0 >= 0) {
if (v1_lowers) {
free_rows[--current] = i0;
} else {
free_rows[new_free_rows++] = i0;
}
}
} else {
PRINTF("rr_cnt=%d >= %d (current=%d * n=%d)\n", rr_cnt, current * n, current, n);
if (i0 >= 0) {
free_rows[new_free_rows++] = i0;
}
}
x[free_i] = j1;
y[j1] = free_i;
}
return new_free_rows;
}
/** Find columns with minimum d[j] and put them on the SCAN list. */
uint_t _find_dense(const uint_t n, uint_t lo, cost_t *d, int_t *cols, int_t *y)
{
uint_t hi = lo + 1;
cost_t mind = d[cols[lo]];
for (uint_t k = hi; k < n; k++) {
int_t j = cols[k];
if (d[j] <= mind) {
if (d[j] < mind) {
hi = lo;
mind = d[j];
}
cols[k] = cols[hi];
cols[hi++] = j;
}
}
return hi;
}
/**
* Scan all columns in TODO starting from arbitrary column in SCAN
* and try to decrease d of the TODO columns using the SCAN column.
*/
int_t _scan_dense(const uint_t n, cost_t *cost[], uint_t *plo, uint_t*phi,
cost_t *d, int_t *cols, int_t *pred, int_t *y, cost_t *v)
{
uint_t lo = *plo;
uint_t hi = *phi;
cost_t h, cred_ij;
while (lo != hi) {
int_t j = cols[lo++];
const int_t i = y[j];
const cost_t mind = d[j];
h = cost[i][j] - v[j] - mind;
PRINTF("i=%d j=%d h=%f\n", i, j, h);
// For all columns in TODO
for (uint_t k = hi; k < n; k++) {
j = cols[k];
cred_ij = cost[i][j] - v[j] - h;
if (cred_ij < d[j]) {
d[j] = cred_ij;
pred[j] = i;
if (cred_ij == mind) {
if (y[j] < 0) {
return j;
}
cols[k] = cols[hi];
cols[hi++] = j;
}
}
}
}
*plo = lo;
*phi = hi;
return -1;
}
/**
* Single iteration of modified Dijkstra shortest path algorithm as explained in the JV paper.
* This is a dense matrix version.
* @return The closest free column index.
*/
int_t find_path_dense(const uint_t n, cost_t *cost[], const int_t start_i,
int_t *y, cost_t *v, int_t *pred)
{
uint_t lo = 0, hi = 0;
int_t final_j = -1;
uint_t n_ready = 0;
int_t *cols;
cost_t *d;
NEW(cols, int_t, n);
NEW(d, cost_t, n);
for (uint_t i = 0; i < n; i++) {
cols[i] = i;
pred[i] = start_i;
d[i] = cost[start_i][i] - v[i];
}
PRINT_COST_ARRAY(d, n);
while (final_j == -1) {
// No columns left on the SCAN list.
if (lo == hi) {
PRINTF("%d..%d -> find\n", lo, hi);
n_ready = lo;
hi = _find_dense(n, lo, d, cols, y);
PRINTF("check %d..%d\n", lo, hi);
PRINT_INDEX_ARRAY(cols, n);
for (uint_t k = lo; k < hi; k++) {
const int_t j = cols[k];
if (y[j] < 0) {
final_j = j;
}
}
}
if (final_j == -1) {
PRINTF("%d..%d -> scan\n", lo, hi);
final_j = _scan_dense(
n, cost, &lo, &hi, d, cols, pred, y, v);
PRINT_COST_ARRAY(d, n);
PRINT_INDEX_ARRAY(cols, n);
PRINT_INDEX_ARRAY(pred, n);
}
}
PRINTF("found final_j=%d\n", final_j);
PRINT_INDEX_ARRAY(cols, n);
{
const cost_t mind = d[cols[lo]];
for (uint_t k = 0; k < n_ready; k++) {
const int_t j = cols[k];
v[j] += d[j] - mind;
}
}
FREE(cols);
FREE(d);
return final_j;
}
/** Augment for a dense cost matrix. */
int_t _ca_dense(const uint_t n, cost_t *cost[], const uint_t n_free_rows,
int_t *free_rows, int_t *x, int_t *y, cost_t *v)
{
int_t *pred;
NEW(pred, int_t, n);
for (int_t *pfree_i = free_rows; pfree_i < free_rows + n_free_rows; pfree_i++) {
int_t i = -1, j;
uint_t k = 0;
PRINTF("looking at free_i=%d\n", *pfree_i);
j = find_path_dense(n, cost, *pfree_i, y, v, pred);
ASSERT(j >= 0);
ASSERT(j < n);
while (i != *pfree_i) {
PRINTF("augment %d\n", j);
PRINT_INDEX_ARRAY(pred, n);
i = pred[j];
PRINTF("y[%d]=%d -> %d\n", j, y[j], i);
y[j] = i;
PRINT_INDEX_ARRAY(x, n);
SWAP_INDICES(j, x[i]);
k++;
if (k >= n) {
ASSERT(FALSE);
}
}
}
FREE(pred);
return 0;
}
/** Solve dense sparse LAP. */
int lapjv_internal(const uint_t n, cost_t *cost[], int_t *x, int_t *y)
{
int ret;
int_t *free_rows;
cost_t *v;
NEW(free_rows, int_t, n);
NEW(v, cost_t, n);
ret = _ccrrt_dense(n, cost, free_rows, x, y, v);
int i = 0;
while (ret > 0 && i < 2) {
ret = _carr_dense(n, cost, ret, free_rows, x, y, v);
i++;
}
if (ret > 0) {
ret = _ca_dense(n, cost, ret, free_rows, x, y, v);
}
FREE(v);
FREE(free_rows);
return ret;
}
python-lap-0.5.12/_lapjv_cpp/lapjv.h 0000664 0000000 0000000 00000003074 15051100704 0017271 0 ustar 00root root 0000000 0000000 #ifndef LAPJV_H
#define LAPJV_H
#define LARGE 1000000
#if !defined TRUE
#define TRUE 1
#endif
#if !defined FALSE
#define FALSE 0
#endif
#define NEW(x, t, n) if ((x = (t *)malloc(sizeof(t) * (n))) == 0) { return -1; }
#define FREE(x) if (x != 0) { free(x); x = 0; }
#define SWAP_INDICES(a, b) { int_t _temp_index = a; a = b; b = _temp_index; }
#if 0
#include
#define ASSERT(cond) assert(cond)
#define PRINTF(fmt, ...) printf(fmt, ##__VA_ARGS__)
#define PRINT_COST_ARRAY(a, n) \
while (1) { \
printf(#a" = ["); \
if ((n) > 0) { \
printf("%f", (a)[0]); \
for (uint_t j = 1; j < n; j++) { \
printf(", %f", (a)[j]); \
} \
} \
printf("]\n"); \
break; \
}
#define PRINT_INDEX_ARRAY(a, n) \
while (1) { \
printf(#a" = ["); \
if ((n) > 0) { \
printf("%d", (a)[0]); \
for (uint_t j = 1; j < n; j++) { \
printf(", %d", (a)[j]); \
} \
} \
printf("]\n"); \
break; \
}
#else
#define ASSERT(cond)
#define PRINTF(fmt, ...)
#define PRINT_COST_ARRAY(a, n)
#define PRINT_INDEX_ARRAY(a, n)
#endif
typedef signed int int_t;
typedef unsigned int uint_t;
typedef double cost_t;
typedef char boolean;
typedef enum fp_t { FP_1 = 1, FP_2 = 2, FP_DYNAMIC = 3 } fp_t;
extern int_t lapjv_internal(
const uint_t n, cost_t *cost[],
int_t *x, int_t *y);
extern int_t lapmod_internal(
const uint_t n, cost_t *cc, uint_t *ii, uint_t *kk,
int_t *x, int_t *y, fp_t fp_version);
#endif // LAPJV_H
python-lap-0.5.12/_lapjv_cpp/lapmod.cpp 0000664 0000000 0000000 00000043102 15051100704 0017760 0 ustar 00root root 0000000 0000000 #include
#include
#include
#include "lapjv.h"
/** Column-reduction and reduction transfer for a sparse cost matrix. */
int_t _ccrrt_sparse(const uint_t n, cost_t *cc, uint_t *ii, uint_t *kk,
int_t *free_rows, int_t *x, int_t *y, cost_t *v)
{
int_t n_free_rows;
boolean *unique;
for (uint_t i = 0; i < n; i++) {
x[i] = -1;
v[i] = LARGE;
y[i] = 0;
}
for (uint_t i = 0; i < n; i++) {
for (uint_t k = ii[i]; k < ii[i+1]; k++) {
const int_t j = kk[k];
const cost_t c = cc[k];
if (c < v[j]) {
v[j] = c;
y[j] = i;
}
PRINTF("i=%d, k=%d, j=%d, c[i,j]=%f, v[j]=%f y[j]=%d\n", i, k, j, c, v[j], y[j]);
}
}
PRINT_COST_ARRAY(v, n);
PRINT_INDEX_ARRAY(y, n);
NEW(unique, boolean, n);
memset(unique, TRUE, n);
{
int_t j = n;
do {
j--;
const int_t i = y[j];
if (x[i] < 0) {
x[i] = j;
} else {
unique[i] = FALSE;
y[j] = -1;
}
} while (j > 0);
}
n_free_rows = 0;
for (uint_t i = 0; i < n; i++) {
if (x[i] < 0) {
free_rows[n_free_rows++] = i;
} else if (unique[i] && (ii[i+1] - ii[i] > 1)) {
const int_t j = x[i];
cost_t min = LARGE;
for (uint_t k = ii[i]; k < ii[i+1]; k++) {
const int_t j2 = kk[k];
if (j2 == j) {
continue;
}
const cost_t c = cc[k] - v[j2];
if (c < min) {
min = c;
}
}
PRINTF("v[%d] = %f - %f\n", j, v[j], min);
v[j] -= min;
}
}
FREE(unique);
return n_free_rows;
}
/** Augmenting row reduction for a sparse cost matrix. */
int_t _carr_sparse(const uint_t n, cost_t *cc, uint_t *ii, uint_t *kk,
const uint_t n_free_rows, int_t *free_rows,
int_t *x, int_t *y, cost_t *v)
{
uint_t current = 0;
int_t new_free_rows = 0;
uint_t rr_cnt = 0;
PRINT_INDEX_ARRAY(x, n);
PRINT_INDEX_ARRAY(y, n);
PRINT_COST_ARRAY(v, n);
PRINT_INDEX_ARRAY(free_rows, n_free_rows);
while (current < n_free_rows) {
int_t i0;
int_t j1, j2;
cost_t v1, v2, v1_new;
boolean v1_lowers;
rr_cnt++;
PRINTF("current = %d rr_cnt = %d\n", current, rr_cnt);
const int_t free_i = free_rows[current++];
if (ii[free_i+1] - ii[free_i] > 0) {
const uint_t k = ii[free_i];
j1 = kk[k];
v1 = cc[k] - v[j1];
} else {
j1 = 0;
v1 = LARGE;
}
j2 = -1;
v2 = LARGE;
for (uint_t k = ii[free_i]+1; k < ii[free_i+1]; k++) {
PRINTF("%d = %f %d = %f\n", j1, v1, j2, v2);
const int_t j = kk[k];
const cost_t c = cc[k] - v[j];
if (c < v2) {
if (c >= v1) {
v2 = c;
j2 = j;
} else {
v2 = v1;
v1 = c;
j2 = j1;
j1 = j;
}
}
}
i0 = y[j1];
v1_new = v[j1] - (v2 - v1);
v1_lowers = v1_new < v[j1];
PRINTF("%d %d 1=%d,%f 2=%d,%f v1'=%f(%d,%g) \n",
free_i, i0, j1, v1, j2, v2, v1_new, v1_lowers, v[j1] - v1_new);
if (rr_cnt < current * n) {
if (v1_lowers) {
v[j1] = v1_new;
} else if (i0 >= 0 && j2 >= 0) {
j1 = j2;
i0 = y[j2];
}
if (i0 >= 0) {
if (v1_lowers) {
free_rows[--current] = i0;
} else {
free_rows[new_free_rows++] = i0;
}
}
} else {
PRINTF("rr_cnt=%d >= %d (current=%d * n=%d)\n", rr_cnt, current * n, current, n);
if (i0 >= 0) {
free_rows[new_free_rows++] = i0;
}
}
x[free_i] = j1;
y[j1] = free_i;
}
return new_free_rows;
}
/** Find columns with minimum d[j] and put them on the SCAN list. */
uint_t _find_sparse_1(const uint_t n, uint_t lo, cost_t *d, int_t *cols, int_t *y)
{
uint_t hi = lo + 1;
cost_t mind = d[cols[lo]];
for (uint_t k = hi; k < n; k++) {
int_t j = cols[k];
if (d[j] <= mind) {
if (d[j] < mind) {
hi = lo;
mind = d[j];
}
cols[k] = cols[hi];
cols[hi++] = j;
}
}
return hi;
}
/** Find columns with minimum d[j] and put them on the SCAN list. */
int_t _find_sparse_2(cost_t *d, int_t *scan, const uint_t n_todo, int_t *todo, boolean *done)
{
int_t hi = 0;
cost_t mind = LARGE;
for (uint_t k = 0; k < n_todo; k++) {
int_t j = todo[k];
if (done[j]) {
continue;
}
if (d[j] <= mind) {
if (d[j] < mind) {
hi = 0;
mind = d[j];
}
scan[hi++] = j;
}
}
return hi;
}
/**
* Scan all columns in TODO starting from arbitrary column in SCAN and try to
* decrease d of the TODO columns using the SCAN column.
*/
int_t _scan_sparse_1(const uint_t n, cost_t *cc, uint_t *ii, uint_t *kk,
uint_t *plo, uint_t *phi, cost_t *d, int_t *cols,
int_t *pred, int_t *y, cost_t *v)
{
uint_t lo = *plo;
uint_t hi = *phi;
cost_t h, cred_ij;
int_t *rev_kk;
NEW(rev_kk, int_t, n);
while (lo != hi) {
int_t kj;
int_t j = cols[lo++];
const int_t i = y[j];
const cost_t mind = d[j];
for (uint_t k = 0; k < n; k++) {
rev_kk[k] = -1;
}
for (uint_t k = ii[i]; k < ii[i+1]; k++) {
const int_t j = kk[k];
rev_kk[j] = k;
}
PRINTF("?%d kk[%d:%d]=", j, ii[i], ii[i+1]);
PRINT_INDEX_ARRAY(kk + ii[i], ii[i+1] - ii[i]);
kj = rev_kk[j];
if (kj == -1) {
continue;
}
ASSERT(kk[kj] == j);
h = cc[kj] - v[j] - mind;
PRINTF("i=%d j=%d kj=%d h=%f\n", i, j, kj, h);
// For all columns in TODO
for (uint_t k = hi; k < n; k++) {
j = cols[k];
PRINTF("?%d kk[%d:%d]=", j, ii[i], ii[i+1]);
PRINT_INDEX_ARRAY(kk + ii[i], ii[i+1] - ii[i]);
if ((kj = rev_kk[j]) == -1) {
continue;
}
ASSERT(kk[kj] == j);
cred_ij = cc[kj] - v[j] - h;
if (cred_ij < d[j]) {
d[j] = cred_ij;
pred[j] = i;
if (cred_ij == mind) {
if (y[j] < 0) {
FREE(rev_kk);
return j;
}
cols[k] = cols[hi];
cols[hi++] = j;
}
}
}
}
*plo = lo;
*phi = hi;
FREE(rev_kk);
return -1;
}
/**
* Scan all columns in TODO starting from arbitrary column in SCAN and try to
* decrease d of the TODO columns using the SCAN column.
*/
int_t _scan_sparse_2(const uint_t n, cost_t *cc, uint_t *ii, uint_t *kk,
uint_t *plo, uint_t *phi, cost_t *d, int_t *pred,
boolean *done, uint_t *pn_ready, int_t *ready,
int_t *scan, uint_t *pn_todo, int_t *todo,
boolean *added, int_t *y, cost_t *v)
{
uint_t lo = *plo;
uint_t hi = *phi;
uint_t n_todo = *pn_todo;
uint_t n_ready = *pn_ready;
cost_t h, cred_ij;
int_t *rev_kk;
NEW(rev_kk, int_t, n);
for (uint_t k = 0; k < n; k++) {
rev_kk[k] = -1;
}
while (lo != hi) {
int_t kj;
int_t j = scan[lo++];
const int_t i = y[j];
ready[n_ready++] = j;
const cost_t mind = d[j];
for (uint_t k = ii[i]; k < ii[i+1]; k++) {
const int_t j = kk[k];
rev_kk[j] = k;
}
PRINTF("?%d kk[%d:%d]=", j, ii[i], ii[i+1]);
PRINT_INDEX_ARRAY(kk + ii[i], ii[i+1] - ii[i]);
kj = rev_kk[j];
ASSERT(kj != -1);
ASSERT(kk[kj] == j);
h = cc[kj] - v[j] - mind;
PRINTF("i=%d j=%d kj=%d h=%f\n", i, j, kj, h);
// For all columns in TODO
for (uint_t k = 0; k < ii[i+1] - ii[i]; k++) {
j = kk[ii[i] + k];
if (done[j]) {
continue;
}
PRINTF("?%d kk[%d:%d]=", j, ii[i], ii[i+1]);
PRINT_INDEX_ARRAY(kk + ii[i], ii[i+1] - ii[i]);
cred_ij = cc[ii[i] + k] - v[j] - h;
if (cred_ij < d[j]) {
d[j] = cred_ij;
pred[j] = i;
if (cred_ij <= mind) {
if (y[j] < 0) {
FREE(rev_kk);
return j;
}
scan[hi++] = j;
done[j] = TRUE;
} else if (!added[j]) {
todo[n_todo++] = j;
added[j] = TRUE;
}
}
}
for (uint_t k = ii[i]; k < ii[i+1]; k++) {
const int_t j = kk[k];
rev_kk[j] = -1;
}
}
*pn_todo = n_todo;
*pn_ready = n_ready;
*plo = lo;
*phi = hi;
FREE(rev_kk);
return -1;
}
/**
* Single iteration of modified Dijkstra shortest path algorithm as explained in the JV paper.
* This version loops over all column indices (some of which might be inf).
* @return The closest free column index.
*/
int_t find_path_sparse_1(const uint_t n, cost_t *cc, uint_t *ii, uint_t *kk,
const int_t start_i, int_t *y, cost_t *v,int_t *pred)
{
uint_t lo = 0, hi = 0;
int_t final_j = -1;
uint_t n_ready = 0;
int_t *cols;
cost_t *d;
NEW(cols, int_t, n);
NEW(d, cost_t, n);
for (uint_t i = 0; i < n; i++) {
cols[i] = i;
d[i] = LARGE;
pred[i] = start_i;
}
for (uint_t i = ii[start_i]; i < ii[start_i + 1]; i++) {
const int_t j = kk[i];
d[j] = cc[i] - v[j];
}
PRINT_COST_ARRAY(d, n);
while (final_j == -1) {
// No columns left on the SCAN list.
if (lo == hi) {
PRINTF("%d..%d -> find\n", lo, hi);
n_ready = lo;
hi = _find_sparse_1(n, lo, d, cols, y);
PRINTF("check %d..%d\n", lo, hi);
PRINT_INDEX_ARRAY(cols, n);
for (uint_t k = lo; k < hi; k++) {
const int_t j = cols[k];
if (y[j] < 0) {
final_j = j;
}
}
}
if (final_j == -1) {
PRINTF("%d..%d -> scan\n", lo, hi);
final_j = _scan_sparse_1(n, cc, ii, kk, &lo, &hi, d, cols, pred, y, v);
PRINT_COST_ARRAY(d, n);
PRINT_INDEX_ARRAY(cols, n);
PRINT_INDEX_ARRAY(pred, n);
}
}
PRINTF("found final_j=%d\n", final_j);
PRINT_INDEX_ARRAY(cols, n);
{
const cost_t mind = d[cols[lo]];
for (uint_t k = 0; k < n_ready; k++) {
const int_t j = cols[k];
v[j] += d[j] - mind;
}
}
FREE(cols);
FREE(d);
return final_j;
}
/**
* Single iteration of modified Dijkstra shortest path algorithm as explained in the JV paper.
* This version loops over non-inf column indices (which requires some additional bookkeeping).
* @return The closest free column index.
*/
int_t find_path_sparse_2(const uint_t n, cost_t *cc, uint_t *ii, uint_t *kk,
const int_t start_i, int_t *y, cost_t *v, int_t *pred)
{
uint_t lo = 0, hi = 0;
int_t final_j = -1;
uint_t n_ready = 0;
uint_t n_todo = (ii[start_i + 1] - ii[start_i]);
boolean *done, *added;
int_t *ready, *scan, *todo;
cost_t *d;
NEW(done, boolean, n);
NEW(added, boolean, n);
NEW(ready, int_t, n);
NEW(scan, int_t, n);
NEW(todo, int_t, n);
NEW(d, cost_t, n);
memset(done, FALSE, n);
memset(added, FALSE, n);
for (uint_t i = 0; i < n; i++) {
d[i] = LARGE;
pred[i] = start_i;
}
for (uint_t i = ii[start_i]; i < ii[start_i + 1]; i++) {
const int_t j = kk[i];
d[j] = cc[i] - v[j];
todo[i - ii[start_i]] = j;
added[j] = TRUE;
}
PRINT_COST_ARRAY(d, n);
PRINT_INDEX_ARRAY(pred, n);
PRINT_INDEX_ARRAY(done, n);
PRINT_INDEX_ARRAY(ready, n_ready);
PRINT_INDEX_ARRAY(scan + lo, hi - lo);
PRINT_INDEX_ARRAY(todo, n_todo);
PRINT_INDEX_ARRAY(added, n);
while (final_j == -1) {
// No columns left on the SCAN list.
if (lo == hi) {
PRINTF("%d..%d -> find\n", lo, hi);
lo = 0;
hi = _find_sparse_2(d, scan, n_todo, todo, done);
PRINTF("check %d..%d\n", lo, hi);
if (!hi) {
// XXX: the assignment is unsolvable, lets try to return
// something reasonable nevertheless.
for (uint_t j = 0; j < n; j++) {
if (!done[j] && y[j] < 0) {
final_j = j;
}
}
ASSERT(final_j != -1);
break;
}
ASSERT(hi > lo);
for (uint_t k = lo; k < hi; k++) {
const int_t j = scan[k];
if (y[j] < 0) {
final_j = j;
} else {
done[j] = TRUE;
}
}
}
if (final_j == -1) {
PRINTF("%d..%d -> scan\n", lo, hi);
PRINT_INDEX_ARRAY(done, n);
PRINT_INDEX_ARRAY(ready, n_ready);
PRINT_INDEX_ARRAY(scan + lo, hi - lo);
PRINT_INDEX_ARRAY(todo, n_todo);
final_j = _scan_sparse_2(n, cc, ii, kk, &lo, &hi, d, pred,
done, &n_ready, ready, scan,
&n_todo, todo, added, y, v);
PRINT_COST_ARRAY(d, n);
PRINT_INDEX_ARRAY(pred, n);
PRINT_INDEX_ARRAY(done, n);
PRINT_INDEX_ARRAY(ready, n_ready);
PRINT_INDEX_ARRAY(scan + lo, hi - lo);
PRINT_INDEX_ARRAY(todo, n_todo);
PRINT_INDEX_ARRAY(added, n);
}
}
PRINTF("found final_j=%d\n", final_j);
{
const cost_t mind = d[scan[lo]];
for (uint_t k = 0; k < n_ready; k++) {
const int_t j = ready[k];
v[j] += d[j] - mind;
}
}
FREE(done);
FREE(added);
FREE(ready);
FREE(scan);
FREE(todo);
FREE(d);
return final_j;
}
/** Find path using one of the two find_path variants selected based on sparsity. */
int_t find_path_sparse_dynamic(const uint_t n, cost_t *cc, uint_t *ii, uint_t *kk,
const int_t start_i, int_t *y, cost_t *v, int_t *pred)
{
const uint_t n_i = ii[start_i+1] - ii[start_i];
// XXX: wouldnt it be better to decide for the whole matrix?
if (n_i > 0.25 * n) {
return find_path_sparse_1(n, cc, ii, kk, start_i, y, v, pred);
} else {
return find_path_sparse_2(n, cc, ii, kk, start_i, y, v, pred);
}
}
typedef int_t (*fp_function_t)(const uint_t, cost_t *, uint_t *, uint_t *,
const int_t, int_t *, cost_t *, int_t *);
fp_function_t get_better_find_path(const uint_t n, uint_t *ii)
{
const double sparsity = ii[n] / (double)(n * n);
if (sparsity > 0.25) {
PRINTF("Using find_path_sparse_1 for sparsity=%f\n", sparsity);
return find_path_sparse_1;
} else {
PRINTF("Using find_path_sparse_2 for sparsity=%f\n", sparsity);
return find_path_sparse_2;
}
}
/** Augment for a sparse cost matrix. */
int_t _ca_sparse(const uint_t n, cost_t *cc, uint_t *ii, uint_t *kk, const uint_t n_free_rows,
int_t *free_rows, int_t *x, int_t *y, cost_t *v, int fp_version)
{
int_t *pred;
NEW(pred, int_t, n);
fp_function_t fp;
switch (fp_version) {
case FP_1: fp = find_path_sparse_1; break;
case FP_2: fp = find_path_sparse_2; break;
case FP_DYNAMIC: fp = get_better_find_path(n, ii); break;
default: return -2;
}
for (int_t *pfree_i = free_rows; pfree_i < free_rows + n_free_rows; pfree_i++) {
int_t i = -1, j;
uint_t k = 0;
PRINTF("looking at free_i=%d\n", *pfree_i);
j = fp(n, cc, ii, kk, *pfree_i, y, v, pred);
ASSERT(j >= 0);
ASSERT(j < n);
while (i != *pfree_i) {
PRINTF("augment %d\n", j);
PRINT_INDEX_ARRAY(pred, n);
i = pred[j];
PRINTF("y[%d]=%d -> %d\n", j, y[j], i);
y[j] = i;
PRINT_INDEX_ARRAY(x, n);
SWAP_INDICES(j, x[i]);
k++;
if (k >= n) {
ASSERT(FALSE);
}
}
}
FREE(pred);
return 0;
}
/** Solve square sparse LAP. */
int lapmod_internal(const uint_t n, cost_t *cc, uint_t *ii, uint_t *kk,
int_t *x, int_t *y, fp_t fp_version)
{
int ret;
int_t *free_rows;
cost_t *v;
NEW(free_rows, int_t, n);
NEW(v, cost_t, n);
ret = _ccrrt_sparse(n, cc, ii, kk, free_rows, x, y, v);
int i = 0;
while (ret > 0 && i < 2) {
ret = _carr_sparse(n, cc, ii, kk, ret, free_rows, x, y, v);
i++;
}
if (ret > 0) {
ret = _ca_sparse(n, cc, ii, kk, ret, free_rows, x, y, v, fp_version);
}
FREE(v);
FREE(free_rows);
return ret;
}
python-lap-0.5.12/benchmark/ 0000775 0000000 0000000 00000000000 15051100704 0015615 5 ustar 00root root 0000000 0000000 python-lap-0.5.12/benchmark/bench.sh 0000775 0000000 0000000 00000001341 15051100704 0017232 0 ustar 00root root 0000000 0000000 #!/bin/sh
export PYTHONPATH=$LAPJV_OLD:$PYTHONPATH
PYTEST_OPTS="-v -s --benchmark-sort=mean --benchmark-columns=mean,min,max,median,rounds,iterations"
if [ ! -e matrix_dense_hard.json ]; then
pytest $PYTEST_OPTS --benchmark-json=matrix_dense_hard.json matrix_dense_hard.py
fi
if [ ! -e matrix_sparse.json ]; then
pytest $PYTEST_OPTS --benchmark-json=matrix_sparse.json matrix_sparse.py
fi
if [ ! -e matrix_nnz.json ]; then
pytest $PYTEST_OPTS --benchmark-json=matrix_nnz.json matrix_nnz.py
fi
if [ ! -e overview_dense.json ]; then
pytest $PYTEST_OPTS --benchmark-json=overview_dense.json overview_dense.py
fi
if [ ! -e overview_sparse.json ]; then
pytest $PYTEST_OPTS --benchmark-json=overview_sparse.json overview_sparse.py
fi
python-lap-0.5.12/benchmark/benchmark_simple.py 0000664 0000000 0000000 00000004012 15051100704 0021467 0 ustar 00root root 0000000 0000000 import sys
sys.stdout.reconfigure(encoding='utf-8')
import timeit
import lap
import numpy as np
from scipy.optimize import linear_sum_assignment
def do_lap(input):
start_time = timeit.default_timer()
ext_cost = input.shape[0] != input.shape[1]
_, x, y = lap.lapjv(input, extend_cost=ext_cost)
res_lap = np.array([[y[i],i] for i in x if i >= 0])
elapsed_lap = timeit.default_timer() - start_time
return res_lap, elapsed_lap
def do_scipy(input):
start_time = timeit.default_timer()
x, y = linear_sum_assignment(input)
res_scipy = np.array(list(zip(x, y)))
elapsed_scipy = timeit.default_timer() - start_time
return res_scipy, elapsed_scipy
def test(n, m, tries=3):
print("test(" + str(n) + ", " + str(m) + ")")
print("-----------------------------------------")
same_result = []
elapsed_scipy = []
elapsed_lap = []
for i in range(tries):
a = np.random.rand(n, m)
res_scipy, elapsed = do_scipy(a)
elapsed_scipy.append(elapsed)
res_lap, elapsed = do_lap(a)
elapsed_lap.append(elapsed)
same_result.append((res_lap == res_scipy).all())
if all(same_result):
print(" * ✅ PASS !!!")
elapsed_scipy = sum(elapsed_scipy) / tries
elapsed_lap = sum(elapsed_lap) / tries
print(f" scipy completed in {elapsed_scipy:.8f}s")
print(f" lap completed in {elapsed_lap:.8f}s")
if elapsed_lap <= elapsed_scipy:
print(f" * 🏆 lap is faster by {round(elapsed_scipy/elapsed_lap, 4)}x time.")
else:
print(f" * 🐌 lap is slower by {round(elapsed_lap/elapsed_scipy, 4)}x time.")
else:
print(" * ❌ FAIL !!!")
print("-----------------------------------------")
if __name__ == '__main__':
test(n=4, m=5)
test(n=5, m=5)
test(n=5, m=6)
test(n=45, m=50)
test(n=50, m=50)
test(n=50, m=55)
test(n=450, m=500)
test(n=500, m=500)
test(n=500, m=550)
test(n=2500, m=5000)
test(n=5000, m=5000)
test(n=5000, m=7500)
python-lap-0.5.12/benchmark/matrix_dense_hard.py 0000664 0000000 0000000 00000005034 15051100704 0021651 0 ustar 00root root 0000000 0000000 from pytest import mark
from joblib import Memory
from lap import lapjv, lapmod
from lap.lapmod import get_cost
try:
from lap_old import lapjv as lapjv_old
except ImportError:
print(
'''If you get here, you do not have the old lapjv to compare to.
git clone git@github.com:gatagat/lapjv.git lapjv-old
cd lapjv-old
git checkout old
python setup.py build_ext -i
mv lapjv lapjv_old
And run the benchmark:
LAPJV_OLD=lapjv-old bench.sh
''')
lapjv_old = None
from centrosome.lapjv import lapjv as lapjv_centrosome
from lap.tests.test_utils import (
get_dense_int, get_cost_CS, sparse_from_dense_CS, sparse_from_dense)
max_time_per_benchmark = 20
szs = [10, 100, 200, 500, 1000, 2000, 5000]
rngs = [100, 1000, 10000, 100000]
seeds = [1299821, 15485867, 32452867, 49979693]
cachedir = '/tmp/lapjv-cache'
memory = Memory(cachedir=cachedir, verbose=1)
@memory.cache
def get_hard_data(sz, rng, seed):
cost = get_dense_int(sz, 100, hard=True, seed=seed)
opt = lapjv(cost)[0]
return cost, opt
if lapjv_old is not None:
@mark.timeout(max_time_per_benchmark)
@mark.parametrize('sz,rng,seed', [
(sz, rng, seed) for sz in szs for rng in rngs for seed in seeds])
def test_JV_old(benchmark, sz, rng, seed):
cost, opt = get_hard_data(sz, rng, seed)
ret = benchmark(lapjv_old, cost)
assert ret[0] == opt
@mark.timeout(max_time_per_benchmark)
@mark.parametrize('sz,rng,seed', [
(sz, rng, seed) for sz in szs for rng in rngs for seed in seeds])
def test_JV(benchmark, sz, rng, seed):
cost, opt = get_hard_data(sz, rng, seed)
ret = benchmark(lapjv, cost, return_cost=False)
assert cost[range(cost.shape[0]), ret[0]].sum() == opt
@mark.timeout(max_time_per_benchmark)
@mark.parametrize('sz,rng,seed', [
(sz, rng, seed) for sz in szs for rng in rngs for seed in seeds])
def test_MOD_c(benchmark, sz, rng, seed):
cost, opt = get_hard_data(sz, rng, seed)
_, cc, ii, kk = sparse_from_dense(cost)
ret = benchmark(lapmod, sz, cc, ii, kk, fast=True, return_cost=False)
assert get_cost(sz, cc, ii, kk, ret[0]) == opt
@mark.timeout(max_time_per_benchmark)
@mark.parametrize('sz,rng,seed', [
(sz, rng, seed) for sz in szs for rng in rngs for seed in seeds])
def test_CSCY(benchmark, sz, rng, seed):
cost, opt = get_hard_data(sz, rng, seed)
i, j, cc = sparse_from_dense_CS(cost)
ret = benchmark(lapjv_centrosome, i, j, cc)
assert get_cost_CS(cost, ret[0]) == opt
python-lap-0.5.12/benchmark/matrix_nnz.py 0000664 0000000 0000000 00000002730 15051100704 0020362 0 ustar 00root root 0000000 0000000 from pytest import mark
from joblib import Memory
from lap import lapmod, FP_1, FP_2, FP_DYNAMIC
from lap.tests.test_utils import get_nnz_int
max_time_per_benchmark = 20
szs = [5000]
nnzs = [10, 100, 500, 1000, 1500, 2000, 3000, 4000]
seeds = [1299821, 15485867, 32452867, 49979693]
cachedir = '/tmp/lapjv-cache'
memory = Memory(cachedir=cachedir, verbose=1)
@memory.cache
def get_data(sz, nnz, seed, rng=100):
return get_nnz_int(sz, nnz, rng=rng, seed=seed)
@mark.timeout(max_time_per_benchmark)
@mark.parametrize('sz,nnz,seed', [
(sz, nnz, seed) for sz in szs for nnz in nnzs for seed in seeds])
def test_MOD_c_3(benchmark, sz, nnz, seed):
cc, ii, kk = get_data(sz, nnz, seed)
benchmark(
lapmod, sz, cc, ii, kk,
fast=True, return_cost=False, fp_version=FP_DYNAMIC)
@mark.timeout(max_time_per_benchmark)
@mark.parametrize('sz,nnz,seed', [
(sz, nnz, seed) for sz in szs for nnz in nnzs for seed in seeds])
def test_MOD_c_1(benchmark, sz, nnz, seed):
cc, ii, kk = get_data(sz, nnz, seed)
benchmark(
lapmod, sz, cc, ii, kk,
fast=True, return_cost=False, fp_version=FP_1)
@mark.timeout(max_time_per_benchmark)
@mark.parametrize('sz,nnz,seed', [
(sz, nnz, seed) for sz in szs for nnz in nnzs for seed in seeds])
def test_MOD_c_2(benchmark, sz, nnz, seed):
cc, ii, kk = get_data(sz, nnz, seed)
benchmark(
lapmod, sz, cc, ii, kk,
fast=True, return_cost=False, fp_version=FP_2)
python-lap-0.5.12/benchmark/matrix_sparse.py 0000664 0000000 0000000 00000005167 15051100704 0021061 0 ustar 00root root 0000000 0000000 from __future__ import print_function
from pytest import mark
from joblib import Memory
from lap import lapjv, lapmod
try:
from lap_old import lapjv as lapjv_old
except ImportError:
print(
'''If you get here, you do not have the old lapjv to compare to.
git clone git@github.com:gatagat/lapjv.git lapjv-old
cd lapjv-old
git checkout old
python setup.py build_ext -i
mv lapjv lapjv_old
And run the benchmark:
LAPJV_OLD=lapjv-old bench.sh
''')
lapjv_old = None
from centrosome.lapjv import lapjv as lapjv_centrosome
from lap.tests.test_utils import (
sparse_from_masked,
sparse_from_masked_CS,
get_sparse_int,
get_platform_maxint)
max_time_per_benchmark = 15
szs = [1000, 5000, 10000]
sparsities = [0.5, 0.05, 0.005, 0.0005, 0.00005]
seeds = [1299821, 15485867, 32452867, 49979693]
cachedir = '/tmp/lapjv-cache'
memory = Memory(cachedir=cachedir, verbose=1)
@memory.cache
def get_data(sz, sparsity, seed, rng=100):
cost, mask = get_sparse_int(sz, rng, sparsity, hard=False, seed=seed)
print('Requested sparsity %f generated %f' % (sparsity, mask.mean()))
cost[~mask] = get_platform_maxint()
return cost, mask
if lapjv_old is not None:
@mark.timeout(max_time_per_benchmark)
@mark.parametrize('sz,sparsity,seed', [
(sz, sparsity, seed)
for sz in szs for sparsity in sparsities for seed in seeds])
def test_JV_old(benchmark, sz, sparsity, seed):
cost, mask = get_data(sz, sparsity, seed)
benchmark(lapjv_old, cost)
@mark.timeout(max_time_per_benchmark)
@mark.parametrize('sz,sparsity,seed', [
(sz, sparsity, seed)
for sz in szs for sparsity in sparsities for seed in seeds])
def test_JV(benchmark, sz, sparsity, seed):
cost, mask = get_data(sz, sparsity, seed)
benchmark(lapjv, cost, return_cost=False)
@mark.timeout(max_time_per_benchmark)
@mark.parametrize('sz,sparsity,seed', [
(sz, sparsity, seed)
for sz in szs for sparsity in sparsities for seed in seeds])
def test_MOD_c(benchmark, sz, sparsity, seed):
cost, mask = get_data(sz, sparsity, seed)
_, cc, ii, kk = sparse_from_masked(cost, mask)
benchmark(lapmod, sz, cc, ii, kk, fast=True, return_cost=False)
@mark.timeout(max_time_per_benchmark)
@mark.parametrize('sz,sparsity,seed', [
(sz, sparsity, seed)
for sz in szs for sparsity in sparsities for seed in seeds])
def test_CSCY(benchmark, sz, sparsity, seed):
cost, mask = get_data(sz, sparsity, seed)
i, j, cc = sparse_from_masked_CS(cost, mask)
benchmark(lapjv_centrosome, i, j, cc)
python-lap-0.5.12/benchmark/overview_dense.py 0000664 0000000 0000000 00000004523 15051100704 0021217 0 ustar 00root root 0000000 0000000 from pytest import mark
from joblib import Memory
from lap import lapjv, lapmod
from lap.lapmod import get_cost
try:
from lap_old import lapjv as lapjv_old
except ImportError:
print(
'''If you get here, you do not have the old lapjv to compare to.
git clone git@github.com:gatagat/lapjv.git lapjv-old
cd lapjv-old
git checkout old
python setup.py build_ext -i
mv lapjv lapjv_old
And run the benchmark:
LAPJV_OLD=lapjv-old bench.sh
''')
lapjv_old = None
from pymatgen.optimization.linear_assignment import LinearAssignment
from centrosome.lapjv import lapjv as lapjv_centrosome
from lap.tests.test_utils import (
sparse_from_dense,
sparse_from_dense_CS, get_cost_CS,
get_dense_int
)
cachedir = '/tmp/lapjv-cache'
memory = Memory(cachedir=cachedir, verbose=1)
@memory.cache
def get_data(seed):
cost = get_dense_int(100, 1000, hard=True, seed=seed)
opt = lapjv(cost)[0]
return cost, opt
seeds = [1299821, 15485867, 32452867, 49979693]
if lapjv_old is not None:
@mark.parametrize('seed', seeds)
def test_JV_old(benchmark, seed):
cost, opt = get_data(seed)
ret = benchmark(lapjv_old, cost)
assert ret[0] == opt
@mark.parametrize('seed', seeds)
def test_JV(benchmark, seed):
cost, opt = get_data(seed)
ret = benchmark(lapjv, cost)
assert ret[0] == opt
@mark.parametrize('seed', seeds)
def test_MODPY(benchmark, seed):
cost, opt = get_data(seed)
n, cc, ii, kk = sparse_from_dense(cost)
ret = benchmark(lapmod, n, cc, ii, kk, fast=False, return_cost=False)
assert get_cost(n, cc, ii, kk, ret[0]) == opt
@mark.parametrize('seed', seeds)
def test_MOD_c(benchmark, seed):
cost, opt = get_data(seed)
n, cc, ii, kk = sparse_from_dense(cost)
ret = benchmark(lapmod, n, cc, ii, kk, fast=True, return_cost=False)
assert get_cost(n, cc, ii, kk, ret[0]) == opt
@mark.parametrize('seed', seeds)
def test_PMG(benchmark, seed):
cost, opt = get_data(seed)
ret = benchmark(LinearAssignment, cost)
assert ret.min_cost == opt
@mark.parametrize('seed', seeds)
def test_CSCY(benchmark, seed):
cost, opt = get_data(seed)
i, j, cc = sparse_from_dense_CS(cost)
ret = benchmark(lapjv_centrosome, i, j, cc)
assert get_cost_CS(cost, ret[0]) == opt
python-lap-0.5.12/benchmark/overview_sparse.py 0000664 0000000 0000000 00000004233 15051100704 0021414 0 ustar 00root root 0000000 0000000 from pytest import mark
from joblib import Memory
import numpy as np
from lap import lapjv, lapmod
from lap.lapmod import get_cost
try:
from lap_old import lapjv as lapjv_old
except ImportError:
print(
'''If you get here, you do not have the old lapjv to compare to.
git clone git@github.com:gatagat/lapjv.git lapjv-old
cd lapjv-old
git checkout old
python setup.py build_ext -i
mv lapjv lapjv_old
And run the benchmark:
LAPJV_OLD=lapjv-old bench.sh
''')
lapjv_old = None
from centrosome.lapjv import lapjv as lapjv_centrosome
from lap.tests.test_utils import (
sparse_from_masked,
sparse_from_masked_CS,
get_sparse_int,
get_platform_maxint
)
cachedir = '/tmp/lapjv-cache'
memory = Memory(cachedir=cachedir, verbose=1)
@memory.cache
def get_data(seed):
cost, mask = get_sparse_int(5000, 1000, 0.01, hard=False, seed=seed)
cost_ = cost.copy()
cost_[~mask] = get_platform_maxint()
opt = lapjv(cost_)[0]
return cost, mask, opt
seeds = [1299821, 15485867, 32452867, 49979693]
def _get_cost_CS(cost, x):
return cost[np.arange(cost.shape[0]), x].sum()
@mark.parametrize('seed', seeds)
def test_CSCY(benchmark, seed):
cost, mask, opt = get_data(seed)
i, j, cc = sparse_from_masked_CS(cost, mask)
ret = benchmark(lapjv_centrosome, i, j, cc)
assert _get_cost_CS(cost, ret[0]) == opt
if lapjv_old is not None:
@mark.parametrize('seed', seeds)
def test_JV_old(benchmark, seed):
cost, mask, opt = get_data(seed)
cost[~mask] = get_platform_maxint()
ret = benchmark(lapjv_old, cost)
assert ret[0] == opt
@mark.parametrize('seed', seeds)
def test_JV(benchmark, seed):
cost, mask, opt = get_data(seed)
cost[~mask] = get_platform_maxint()
ret = benchmark(lapjv, cost)
assert ret[0] == opt
@mark.parametrize('seed', seeds)
def test_MOD_c(benchmark, seed):
cost, mask, opt = get_data(seed)
n, cc, ii, kk = sparse_from_masked(cost, mask)
ret = benchmark(lapmod, n, cc, ii, kk, fast=True, return_cost=False)
assert get_cost(n, cc, ii, kk, ret[0]) == opt
python-lap-0.5.12/lap/ 0000775 0000000 0000000 00000000000 15051100704 0014437 5 ustar 00root root 0000000 0000000 python-lap-0.5.12/lap/__init__.py 0000664 0000000 0000000 00000001054 15051100704 0016550 0 ustar 00root root 0000000 0000000 """LAP
``lap`` is a linear assignment problem solver using Jonker-Volgenant
algorithm for dense (LAPJV) or sparse (LAPMOD) matrices.
Functions
---------
lapjv
Find optimal (minimum-cost) assignment for a dense cost matrix.
lapmod
Find optimal (minimum-cost) assignment for a sparse cost matrix.
"""
__version__ = '0.5.12'
from ._lapjv import (
lapjv,
LARGE_ as LARGE,
FP_1_ as FP_1,
FP_2_ as FP_2,
FP_DYNAMIC_ as FP_DYNAMIC
)
from .lapmod import lapmod
__all__ = ['lapjv', 'lapmod', 'FP_1', 'FP_2', 'FP_DYNAMIC', 'LARGE']
python-lap-0.5.12/lap/lapmod.py 0000664 0000000 0000000 00000026075 15051100704 0016277 0 ustar 00root root 0000000 0000000 import numpy as np
from bisect import bisect_left
# import logging
from ._lapjv import _lapmod, FP_DYNAMIC_ as FP_DYNAMIC, LARGE_ as LARGE
def _pycrrt(n, cc, ii, kk, free_rows, x, y, v):
# log = logging.getLogger('do_column_reduction_and_reduction_transfer')
x[:] = -1
y[:] = -1
v[:] = LARGE
for i in range(n):
ks = slice(ii[i], ii[i+1])
js = kk[ks]
ccs = cc[ks]
mask = ccs < v[js]
js = js[mask]
v[js] = ccs[mask]
y[js] = i
# log.debug('v = %s', v)
# for j in range(cost.shape[1]):
unique = np.empty((n,), dtype=bool)
unique[:] = True
for j in range(n-1, -1, -1):
i = y[j]
# If row is not taken yet, initialize it with the minimum stored in y.
if x[i] < 0:
x[i] = j
else:
unique[i] = False
y[j] = -1
# log.debug('bw %s %s %s %s', i, j, x, y)
# log.debug('unique %s', unique)
n_free_rows = 0
for i in range(n):
# Store unassigned row i.
if x[i] < 0:
free_rows[n_free_rows] = i
n_free_rows += 1
elif unique[i] and ii[i+1] - ii[i] > 1:
# >1 check prevents choking on rows with a single entry
# Transfer from an assigned row.
j = x[i]
# Find the current 2nd minimum of the reduced column costs:
# (cost[i,j] - v[j]) for some j.
ks = slice(ii[i], ii[i+1])
js = kk[ks]
minv = np.min(cc[ks][js != j] - v[js][js != j])
# log.debug("v[%d] = %f - %f", j, v[j], minv)
v[j] -= minv
# log.debug('free: %s', free_rows[:n_free_rows])
# log.debug('%s %s', x, v)
return n_free_rows
def find_minima(indices, values):
if len(indices) > 0:
j1 = indices[0]
v1 = values[0]
else:
j1 = 0
v1 = LARGE
j2 = -1
v2 = LARGE
# log = logging.getLogger('find_minima')
# log.debug(sorted(zip(values, indices))[:2])
for j, h in zip(indices[1:], values[1:]):
# log.debug('%d = %f %d = %f', j1, v1, j2, v2)
if h < v2:
if h >= v1:
v2 = h
j2 = j
else:
v2 = v1
v1 = h
j2 = j1
j1 = j
# log.debug('%d = %f %d = %f', j1, v1, j2, v2)
return j1, v1, j2, v2
def _pyarr(n, cc, ii, kk, n_free_rows, free_rows, x, y, v):
# log = logging.getLogger('do_augmenting_row_reduction')
# log.debug('%s %s %s', x, y, v)
current = 0
# log.debug('free: %s', free_rows[:n_free_rows])
new_free_rows = 0
while current < n_free_rows:
free_i = free_rows[current]
# log.debug('current = %d', current)
current += 1
ks = slice(ii[free_i], ii[free_i+1])
js = kk[ks]
j1, v1, j2, v2 = find_minima(js, cc[ks] - v[js])
i0 = y[j1]
v1_new = v[j1] - (v2 - v1)
v1_lowers = v1_new < v[j1]
# log.debug(
# '%d %d 1=%s,%f 2=%s,%f %f %s',
# free_i, i0, j1, v1, j2, v2, v1_new, v1_lowers)
if v1_lowers:
v[j1] = v1_new
elif i0 >= 0 and j2 != -1: # i0 is assigned, try j2
j1 = j2
i0 = y[j2]
x[free_i] = j1
y[j1] = free_i
if i0 >= 0:
if v1_lowers:
current -= 1
# log.debug('continue augmenting path from current %s %s %s')
free_rows[current] = i0
else:
# log.debug('stop the augmenting path and keep for later')
free_rows[new_free_rows] = i0
new_free_rows += 1
# log.debug('free: %s', free_rows[:new_free_rows])
return new_free_rows
def binary_search(data, key):
# log = logging.getLogger('binary_search')
i = bisect_left(data, key)
# log.debug('Found data[%d]=%d for %d', i, data[i], key)
if i < len(data) and data[i] == key:
return i
else:
return None
def _find(hi, d, cols, y):
lo, hi = hi, hi + 1
minv = d[cols[lo]]
# XXX: anytime this happens to be NaN, i'm screwed...
# assert not np.isnan(minv)
for k in range(hi, len(cols)):
j = cols[k]
if d[j] <= minv:
# New minimum found, trash the new SCAN columns found so far.
if d[j] < minv:
hi = lo
minv = d[j]
cols[k], cols[hi] = cols[hi], j
hi += 1
return minv, hi, cols
def _scan(n, cc, ii, kk, minv, lo, hi, d, cols, pred, y, v):
# log = logging.getLogger('_scan')
# Scan all TODO columns.
while lo != hi:
j = cols[lo]
lo += 1
i = y[j]
# log.debug('?%d kk[%d:%d]=%s', j, ii[i], ii[i+1], kk[ii[i]:ii[i+1]])
kj = binary_search(kk[ii[i]:ii[i+1]], j)
if kj is None:
continue
kj = ii[i] + kj
h = cc[kj] - v[j] - minv
# log.debug('i=%d j=%d kj=%s h=%f', i, j, kj, h)
for k in range(hi, n):
j = cols[k]
kj = binary_search(kk[ii[i]:ii[i+1]], j)
if kj is None:
continue
kj = ii[i] + kj
cred_ij = cc[kj] - v[j] - h
if cred_ij < d[j]:
d[j] = cred_ij
pred[j] = i
if cred_ij == minv:
if y[j] < 0:
return j, None, None, d, cols, pred
cols[k] = cols[hi]
cols[hi] = j
hi += 1
return -1, lo, hi, d, cols, pred
def find_path(n, cc, ii, kk, start_i, y, v):
# log = logging.getLogger('find_path')
cols = np.arange(n, dtype=int)
pred = np.empty((n,), dtype=int)
pred[:] = start_i
d = np.empty((n,), dtype=float)
d[:] = LARGE
ks = slice(ii[start_i], ii[start_i+1])
js = kk[ks]
d[js] = cc[ks] - v[js]
# log.debug('d = %s', d)
minv = LARGE
lo, hi = 0, 0
n_ready = 0
final_j = -1
while final_j == -1:
# No SCAN columns, find new ones.
if lo == hi:
# log.debug('%d..%d -> find', lo, hi)
# log.debug('cols = %s', cols)
n_ready = lo
minv, hi, cols = _find(hi, d, cols, y)
# log.debug('%d..%d -> check', lo, hi)
# log.debug('cols = %s', cols)
# log.debug('y = %s', y)
for h in range(lo, hi):
# If any of the new SCAN columns is unassigned, use it.
if y[cols[h]] < 0:
final_j = cols[h]
if final_j == -1:
# log.debug('%d..%d -> scan', lo, hi)
final_j, lo, hi, d, cols, pred = _scan(
n, cc, ii, kk, minv, lo, hi, d, cols, pred, y, v)
# log.debug('d = %s', d)
# log.debug('cols = %s', cols)
# log.debug('pred = %s', pred)
# Update prices for READY columns.
for k in range(n_ready):
j0 = cols[k]
v[j0] += d[j0] - minv
assert final_j >= 0
assert final_j < n
return final_j, pred
def _pya(n, cc, ii, kk, n_free_rows, free_rows, x, y, v):
# log = logging.getLogger('augment')
for free_i in free_rows[:n_free_rows]:
# log.debug('looking at free_i=%s', free_i)
j, pred = find_path(n, cc, ii, kk, free_i, y, v)
# Augment the path starting from column j and backtracking to free_i.
i = -1
while i != free_i:
# log.debug('augment %s', j)
# log.debug('pred = %s', pred)
i = pred[j]
assert i >= 0
assert i < n
# log.debug('y[%d]=%d -> %d', j, y[j], i)
y[j] = i
j, x[i] = x[i], j
def check_cost(n, cc, ii, kk):
if n == 0:
raise ValueError('Cost matrix has zero rows.')
if len(kk) == 0:
raise ValueError('Cost matrix has zero columns.')
lo = cc.min()
hi = cc.max()
if lo < 0:
raise ValueError('Cost matrix values must be non-negative.')
if hi >= LARGE:
raise ValueError(
'Cost matrix values must be less than %s' % LARGE)
def get_cost(n, cc, ii, kk, x0):
ret = 0
for i, j in enumerate(x0):
kj = binary_search(kk[ii[i]:ii[i+1]], j)
if kj is None:
return np.inf
kj = ii[i] + kj
ret += cc[kj]
return ret
def lapmod(n, cc, ii, kk, fast=True, return_cost=True, fp_version=FP_DYNAMIC):
"""Solve sparse linear assignment problem using Jonker-Volgenant algorithm.
n: number of rows of the assignment cost matrix
cc: 1D array of all finite elements of the assignement cost matrix
ii: 1D array of indices of the row starts in cc. The following must hold:
ii[0] = 0 and ii[n+1] = len(cc).
kk: 1D array of the column indices so that:
cost[i, kk[ii[i] + k]] == cc[ii[i] + k].
Indices within one row must be sorted.
extend_cost: whether or not extend a non-square matrix [default: False]
cost_limit: an upper limit for a cost of a single assignment
[default: np.inf]
return_cost: whether or not to return the assignment cost
Returns (opt, x, y) where:
opt: cost of the assignment
x: vector of columns assigned to rows
y: vector of rows assigned to columns
or (x, y) if return_cost is not True.
When extend_cost and/or cost_limit is set, all unmatched entries will be
marked by -1 in x/y.
"""
# log = logging.getLogger('lapmod')
check_cost(n, cc, ii, kk)
if fast is True:
# log.debug('[----CR & RT & ARR & augmentation ----]')
x, y = _lapmod(n, cc, ii, kk, fp_version=fp_version)
else:
cc = np.ascontiguousarray(cc, dtype=np.float64)
ii = np.ascontiguousarray(ii, dtype=np.int32)
kk = np.ascontiguousarray(kk, dtype=np.int32)
x = np.empty((n,), dtype=np.int32)
y = np.empty((n,), dtype=np.int32)
v = np.empty((n,), dtype=np.float64)
free_rows = np.empty((n,), dtype=np.int32)
# log.debug('[----Column reduction & reduction transfer----]')
n_free_rows = _pycrrt(n, cc, ii, kk, free_rows, x, y, v)
# log.debug(
# 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v)
if n_free_rows == 0:
# log.info('Reduction solved it.')
if return_cost is True:
return get_cost(n, cc, ii, kk, x), x, y
else:
return x, y
for it in range(2):
# log.debug('[---Augmenting row reduction (iteration: %d)---]', it)
n_free_rows = _pyarr(
n, cc, ii, kk, n_free_rows, free_rows, x, y, v)
# log.debug(
# 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v)
if n_free_rows == 0:
# log.info('Augmenting row reduction solved it.')
if return_cost is True:
return get_cost(n, cc, ii, kk, x), x, y
else:
return x, y
# log.info('[----Augmentation----]')
_pya(n, cc, ii, kk, n_free_rows, free_rows, x, y, v)
# log.debug('x, y, v: %s %s %s', x, y, v)
if return_cost is True:
return get_cost(n, cc, ii, kk, x), x, y
else:
return x, y
python-lap-0.5.12/lap/tests/ 0000775 0000000 0000000 00000000000 15051100704 0015601 5 ustar 00root root 0000000 0000000 python-lap-0.5.12/lap/tests/__init__.py 0000664 0000000 0000000 00000000000 15051100704 0017700 0 ustar 00root root 0000000 0000000 python-lap-0.5.12/lap/tests/cost_eps.csv.gz 0000664 0000000 0000000 00005314216 15051100704 0020570 0 ustar 00root root 0000000 0000000 {Utest_cost_eps.csv T[4;n]K
!RdYv{D ֿ{.%LZ=џc[^k/~{odUw*vmramS~h,=ug>]Cݠ;?R(GptK8kEWr];+,k֣zO9G}itx2wo'Ԫnj=4~hЙX#z'Zu!}rԼ+VәSo)!S{6;?ï5dy?g8{k SǴ*P\myKNV$elksZS;ƣ/O)ݤO~Iݻcۢ/*4L1Qws+j%slB[nxՙXsIM~{W[J/|v69zuuu?hBߺ_WYS ӛfMFQ\M1^}WY(E[{jzw?!"6}[\EZ]]s9턇nVNau=\S허e 4u3c3tm3x0lEEn_ݬ3]='J{WoV_T{KGbNEr
q(U-/DuEb^i=Z丝AI;߹6[c-JZSȮ
چ.JmԽ#h-KWK2߾Z|VNE"
()ҕMN+b%^a2Ρg>G"ݾ:~*ԱoEk|R wZrCwTapo}z~I29ڛzDH&*\Zݽ,f:\F'؊
~cZ}>ZfUR^#Y{A~H;țQdIb-pX#Vrh%Z`%~-xOT5$,\EFZAE_+i}C{:d&$=%Ddz|%=ƅ3:Iu#̈_7~]mEgZp%GK:tGNmu,:k{++}ou1+T#}8,Jq{2y