././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/.coveragerc0000644000000000000000000000013400000000000013145 0ustar00[run] branch = True source = breezy [report] exclude_lines = raise NotImplementedError ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/.github/0000755000000000000000000000000000000000000012366 5ustar00././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/.gitignore0000644000000000000000000000017600000000000013022 0ustar00__pycache__ *.pyc build/ *_pyx.so *_pyx.c *_pyx.h *_pyx_api.h *_pyx.cpython-*.so *_c.cpython-*.so *_c.so *_pyx.cpython-*.c *~ ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/.mailmap0000644000000000000000000000045300000000000012451 0ustar00Jelmer Vernooij Jelmer Vernooij Jelmer Vernooij INADA Naoki Martin Packman ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/.rsyncexclude0000644000000000000000000000031300000000000013534 0ustar00*.pyc *.pyo *~ # arch can bite me {arch} .arch-ids ,,* ++* /doc/*.html *.tmp bzr-test.log [#]*# .#* testrev.* /tmp # do want this after all + CHANGELOG /build test*.tmp .*.swp *.orig .*.orig .bzr-shelf* ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/.testr.conf0000644000000000000000000000024300000000000013113 0ustar00[DEFAULT] test_command=BRZ_PLUGIN_PATH=-site:-user python3 ./brz selftest --subunit2 $IDOPTION $LISTOPT test_id_option=--load-list $IDFILE test_list_option=--list ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/BRANCH.TODO0000644000000000000000000000022600000000000012452 0ustar00# This file is for listing TODOs for branches that are being worked on. # It should ALWAYS be empty in the mainline or in integration branches. # # ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/CODE_OF_CONDUCT.md0000644000000000000000000000642700000000000013636 0ustar00# Contributor Covenant Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at core@breezy-vcs.org. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/COPYING.txt0000644000000000000000000004325400000000000012707 0ustar00 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/INSTALL0000644000000000000000000000412000000000000012054 0ustar00Breezy install instructions *************************** Dependencies ------------ Breezy requires Python 3.5 or newer. It also requires the `configobj` and `patiencediff` Python modules to be installed. These can be installed either from your operating system's package manager, using pip or by downloading them from: configobj: https://github.com/DiffSK/configobj patiencediff: https://github.com/breezy-team/patiencediff Optional dependencies ~~~~~~~~~~~~~~~~~~~~~ If you wish to access branches over sftp, you will need paramiko and pycrypto: http://www.lag.net/paramiko/ To PGP sign commits and verify PGP signatures on commits, install python-gpgme. For Git support, install Dulwich: https://www.dulwich.io/ For fastimport support, install python-fastimport: https://github.com/jelmer/python-fastimport brz can optionally use compiled versions of some parts of the code for increased speed. When installing brz you need the ability to build C extensions. Some GNU/Linux distributions package the necessary headers separately from the main Python package. This package is probably named something like python-dev or python-devel. FreeBSD, Windows, source-based GNU/Linux distributions, and possibly other operating systems, have the required files installed by default. If you are installing brz from a brz branch rather than a release tarball, then you should also have the Cython package installed. This is not necessary for release tarballs as they already contain the C files that Cython is needed to create. http://www.cython.org/ Installation ------------ When upgrading using setup.py, it is recommended that you first delete the bzrlib directory from the install target. To install brz as a user, run python setup.py install --home ~ To install system-wide, run (as root) python setup.py install For more information on installation, see for the Bazaar installation FAQ (that also applies to Breezy) or write to bazaar@lists.canonical.com mentioning you use Breezy, or ask a question at . ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/MANIFEST.in0000644000000000000000000000143600000000000012570 0ustar00include brz README.rst setup.py COPYING.txt # FIXME: Not needed, remove after 2.7.0 -- vila 2016-02-07 include BRANCH.TODO INSTALL Makefile MANIFEST.in NEWS profile_imports.py README_BDIST_RPM .rsyncexclude .testr.conf TODO tools/brz_epydoc tools/packaging/lp-upload-release tools/subunit-sum breezy/plugins/news_merge/README breezy/plugins/po_merge/README breezy/tests/ssl_certs/ca.key breezy/tests/ssl_certs/server.csr # bzr export still create some empty dirs that need to be removed # breezy/plugins/weave_fmt/tests/ breezy/store/revision/ doc/ja/_templates/ man1/ man1 recursive-include tools/win32 * recursive-include breezy *.py *.pyx *.pxd *.txt *.c *.h recursive-include tools *.py *.sh recursive-include apport * recursive-include contrib * recursive-include doc * recursive-include po * ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/Makefile0000644000000000000000000002600400000000000012470 0ustar00# Copyright (C) 2005-2012, 2016, 2017 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # A relatively simple Makefile to assist in building parts of brz. Mostly for # building documentation, etc. ### Core Stuff ### SHELL=bash PYTHON?=python3 PYTHON3?=python3 BRZ_TARGET=release PLUGIN_TARGET=plugin-release PYTHON_BUILDFLAGS= BRZ_PLUGIN_PATH=-site:-user # Shorter replacement for $(sort $(wildcard )) as $(call sw,) sw = $(sort $(wildcard $(1))) .PHONY: all clean realclean extensions flake8 api-docs check-nodocs check all: extensions extensions: @echo "building extension modules." $(PYTHON) setup.py build_ext -i $(PYTHON_BUILDFLAGS) check: docs check-nodocs check-nodocs: check-nodocs2 check-nodocs3 check-nodocs3: # Generate a stream for PQM to watch. -$(RM) -f selftest.log echo `date` ": selftest starts" 1>&2 set -o pipefail; BRZ_PLUGIN_PATH=$(BRZ_PLUGIN_PATH) $(PYTHON3) -Werror -Wignore::ImportWarning -Wignore::PendingDeprecationWarning -Wignore::DeprecationWarning -O \ ./brz selftest -Oselftest.timeout=120 --strict \ --subunit2 $(tests) | tee selftest.log | subunit-2to1 echo `date` ": selftest ends" 1>&2 # An empty log file should catch errors in the $(PYTHON3) # command above (the '|' swallow any errors since 'make' # sees the 'tee' exit code for the whole line if [ ! -s selftest.log ] ; then exit 1 ; fi # Check that there were no errors reported. subunit-stats < selftest.log check-nodocs2: extensions # Generate a stream for PQM to watch. -$(RM) -f selftest.log echo `date` ": selftest starts" 1>&2 set -o pipefail; BRZ_PLUGIN_PATH=$(BRZ_PLUGIN_PATH) $(PYTHON) -Werror -Wignore::ImportWarning -Wignore::DeprecationWarning -O \ ./brz selftest -Oselftest.timeout=120 \ --subunit2 $(tests) | tee selftest.log | subunit-2to1 echo `date` ": selftest ends" 1>&2 # An empty log file should catch errors in the $(PYTHON) # command above (the '|' swallow any errors since 'make' # sees the 'tee' exit code for the whole line if [ ! -s selftest.log ] ; then exit 1 ; fi # Check that there were no errors reported. subunit-stats < selftest.log check-ci: docs extensions # FIXME: Remove -Wignore::FutureWarning once # https://github.com/paramiko/paramiko/issues/713 is not a concern # anymore -- vila 2017-05-24 set -o pipefail; \ BRZ_PLUGIN_PATH=$(BRZ_PLUGIN_PATH) $(PYTHON3) -Werror -Wignore::FutureWarning -Wignore::DeprecationWarning -Wignore::PendingDeprecationWarning -Wignore::ImportWarning -Wignore::ResourceWarning -O \ ./brz selftest -v --parallel=fork -Oselftest.timeout=120 --subunit2 \ | subunit-filter -s --passthrough --rename "^" "python3." # Run Python style checker (apt-get install flake8) # # Note that at present this gives many false warnings, because it doesn't # know about identifiers loaded through lazy_import. flake8: flake8 clean: $(PYTHON) setup.py clean -find . -name "*.pyc" -o -name "*.pyo" -o -name "*.so" | xargs rm -f realclean: clean # Remove files which are autogenerated but included by the tarball. rm -f breezy/*_pyx.c breezy/bzr/*_pyx.c rm -f breezy/_simple_set_pyx.h breezy/_simple_set_pyx_api.h # build tags for emacs and vim TAGS: ctags -R -e breezy tags: ctags -R breezy # these are treated as phony so they'll always be rebuilt - it's pretty quick .PHONY: TAGS tags ### Documentation ### docs: docs-sphinx clean-docs: clean-sphinx html-docs: html-sphinx ### Man-page Documentation ### MAN_DEPENDENCIES = breezy/builtins.py \ $(call sw,breezy/*.py) \ $(call sw,breezy/*/*.py) \ tools/generate_docs.py \ $(call sw,$(addsuffix /*.txt, breezy/help_topics/en)) MAN_PAGES = man1/brz.1 man1/brz.1: $(MAN_DEPENDENCIES) mkdir -p $(dir $@) $(PYTHON) tools/generate_docs.py -o $@ man ### Sphinx-style Documentation ### # Build the documentation. To keep the dependencies down to a minimum # for distro packagers, we only build the html documentation by default. # Sphinx 0.6 or later is preferred for the best rendering, though # Sphinx 0.4 or later should work. See http://sphinx.pocoo.org/index.html # for installation instructions. docs-sphinx: html-sphinx # Clean out generated documentation clean-sphinx: $(MAKE) -C doc/en clean $(MAKE) -C doc/developers clean SPHINX_DEPENDENCIES = \ doc/en/release-notes/index.txt \ doc/en/user-reference/index.txt \ doc/developers/Makefile \ doc/developers/make.bat NEWS_FILES = $(call sw,doc/en/release-notes/brz-*.txt) doc/en/user-reference/index.txt: $(MAN_DEPENDENCIES) LANGUAGE=C $(PYTHON) tools/generate_docs.py -o $@ rstx doc/en/release-notes/index.txt: $(NEWS_FILES) tools/generate_release_notes.py $(PYTHON) tools/generate_release_notes.py $@ $(NEWS_FILES) doc/%/Makefile: doc/en/Makefile $(PYTHON) -c "import shutil; shutil.copyfile('$<', '$@')" doc/%/make.bat: doc/en/make.bat $(PYTHON) -c "import shutil; shutil.copyfile('$<', '$@')" # Build the html docs using Sphinx. html-sphinx: $(SPHINX_DEPENDENCIES) $(MAKE) -C doc/en html $(MAKE) -C doc/developers api html # Build the PDF docs using Sphinx. This requires numerous LaTeX # packages. See http://sphinx.pocoo.org/builders.html for details. # Note: We don't currently build PDFs for the Russian docs because # they require additional packages to be installed (to handle # Russian hyphenation rules, etc.) pdf-sphinx: $(SPHINX_DEPENDENCIES) $(MAKE) -C doc/en latex $(MAKE) -C doc/developers latex $(MAKE) -C doc/en/_build/latex all-pdf $(MAKE) -C doc/developers/_build/latex all-pdf # Build the CHM (Windows Help) docs using Sphinx. # Note: HtmlHelp Workshop needs to be used on the generated hhp files # to generate the final chm files. chm-sphinx: $(SPHINX_DEPENDENCIES) $(MAKE) -C doc/en htmlhelp $(MAKE) -C doc/developers htmlhelp # Build the texinfo files using Sphinx. texinfo-sphinx: $(SPHINX_DEPENDENCIES) $(MAKE) -C doc/en texinfo $(MAKE) -C doc/developers texinfo ### Documentation Website ### # Where to build the website DOC_WEBSITE_BUILD = build_doc_website # Build and package docs into a website, complete with downloads. doc-website: html-sphinx pdf-sphinx $(PYTHON) tools/package_docs.py doc/en $(DOC_WEBSITE_BUILD) $(PYTHON) tools/package_docs.py doc/developers $(DOC_WEBSITE_BUILD) ### Miscellaneous Documentation Targets ### # build a png of our performance task list # this is no longer built by default; you can build it if you want to look at it doc/developers/performance.png: doc/developers/performance.dot @echo Generating $@ @dot -Tpng $< -o$@ || echo "Dot not installed; skipping generation of $@" ### Windows Support ### # make all the installers completely from scratch, using zc.buildout # to fetch the dependencies # These are files that need to be copied into the build location to boostrap # the build process. # Note that the path is relative to tools/win32 BUILDOUT_FILES = buildout.cfg \ buildout-templates/bin/build-installer.bat.in \ ostools.py bootstrap.py installer-all: @echo Make all the installers from scratch @# Build everything in a separate directory, to avoid cluttering the WT $(PYTHON) tools/win32/ostools.py makedir build-win32 @# cd to tools/win32 so that the relative paths are copied correctly cd tools/win32 && $(PYTHON) ostools.py copytree $(BUILDOUT_FILES) ../../build-win32 @# There seems to be a bug in gf.release.brz, It doesn't correctly update @# existing release directories, so delete them manually before building @# It means things may be rebuilt that don't need to be, but at least @# it will be correct when they do. cd build-win32 && $(PYTHON) ostools.py remove release */release cd build-win32 && $(PYTHON) bootstrap.py cd build-win32 && bin/buildout cd build-win32 && bin/build-installer.bat $(BRZ_TARGET) $(PLUGIN_TARGET) clean-installer-all: $(PYTHON) tools/win32/ostools.py remove build-win32 # make brz.exe for win32 with py2exe exe: @echo *** Make brz.exe $(PYTHON) tools/win32/ostools.py remove breezy/*.pyd $(PYTHON) setup.py build_ext -i -f $(PYTHON_BUILDFLAGS) $(PYTHON) setup.py py2exe > py2exe.log $(PYTHON) tools/win32/ostools.py copytodir tools/win32/start_brz.bat win32_brz.exe $(PYTHON) tools/win32/ostools.py copytodir tools/win32/breezy.url win32_brz.exe # win32 installer for brz.exe installer: exe copy-docs @echo *** Make Windows installer $(PYTHON) tools/win32/run_script.py cog.py -d -o tools/win32/brz.iss tools/win32/brz.iss.cog iscc /Q tools/win32/brz.iss py-inst-37: docs $(PYTHON37) setup.py bdist_wininst --install-script="brz-win32-bdist-postinstall.py" -d . python-installer: py-inst-37 copy-docs: docs $(PYTHON) tools/win32/ostools.py copytodir README win32_brz.exe/doc $(PYTHON) tools/win32/ostools.py copydir doc/en/_build/html win32_brz.exe/doc $(PYTHON) tools/win32/ostools.py copydir doc/developers/_build/html win32_brz.exe/doc/developers # clean on win32 all installer-related files and directories clean-win32: clean-docs $(PYTHON) tools/win32/ostools.py remove build $(PYTHON) tools/win32/ostools.py remove win32_brz.exe $(PYTHON) tools/win32/ostools.py remove py2exe.log $(PYTHON) tools/win32/ostools.py remove tools/win32/brz.iss $(PYTHON) tools/win32/ostools.py remove brz-setup*.exe $(PYTHON) tools/win32/ostools.py remove brz-*win32.exe $(PYTHON) tools/win32/ostools.py remove dist # i18n targets .PHONY: update-pot po/brz.pot update-pot: po/brz.pot TRANSLATABLE_PYFILES:=$(shell find breezy -name '*.py' \ | grep -v 'breezy/tests/' \ | grep -v 'breezy/doc' \ ) po/brz.pot: $(PYFILES) $(DOCFILES) $(PYTHON) ./brz export-pot --include-duplicates > po/brz.pot echo $(TRANSLATABLE_PYFILES) | xargs \ xgettext --package-name "brz" \ --msgid-bugs-address "" \ --copyright-holder "Canonical" \ --from-code ISO-8859-1 --join --sort-by-file --add-comments=i18n: \ -d bzr -p po -o brz.pot ### Packaging Targets ### .PHONY: dist check-dist-tarball # build a distribution source tarball dist: version=`./brz version --short` && \ echo Building distribution of brz $$version && \ expbasedir=`mktemp -t -d tmp_brz_dist.XXXXXXXXXX` && \ expdir=$$expbasedir/brz-$$version && \ tarball=$$PWD/../breezy-$$version.tar.gz && \ $(MAKE) clean && \ $(MAKE) && \ $(PYTHON) setup.py sdist -d $$PWD/.. && \ gpg --detach-sign --armor $$tarball && \ rm -rf $$expbasedir # run all tests in a previously built tarball check-dist-tarball: tmpdir=`mktemp -t -d tmp_brz_check_dist.XXXXXXXXXX` && \ version=`./brz version --short` && \ tarball=$$PWD/../breezy-$$version.tar.gz && \ tar Cxz $$tmpdir -f $$tarball && \ $(MAKE) -C $$tmpdir/breezy-$$version check && \ rm -rf $$tmpdir ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/NEWS0000644000000000000000000000021500000000000011523 0ustar00The NEWS file has been moved and split into multiple files (one per release series). The NEWS files are now found in doc/en/release-notes/. ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/README.rst0000644000000000000000000000463600000000000012526 0ustar00Breezy (``brz``) is a decentralized revision control system, designed to be easy for developers and end users alike. By default, Breezy provides support for both the `Bazaar `_ and `Git `_ file formats. You can install from source by following the instructions in the INSTALL file. At the moment of writing there are no binary packages available. To learn how to use Breezy, see the official documentation in the `doc` directory or refer to the Bazaar documentation at . Breezy is Free Software, and is released under the GNU General Public License, version 2 or later. Breezy is a friendly fork of the Bazaar (``bzr``) project, hosted on http://bazaar.canonical.com/. It is backwards compatibility with Bazaar's disk format and protocols. One of the key differences with Bazaar is that Breezy runs on Python 3.3 and later, rather than on Python 2. Breezy highlights ================= Breezy directly supports both central version control (like cvs/svn) and distributed version control (like git/hg). Developers can organize their workspace in whichever way they want on a per project basis including: * checkouts (like svn) * feature branches (like hg) * shared working tree (like git). It also directly supports and encourages a large number of development best practices like refactoring and pre-commit regression testing. Users can choose between our command line tool and our cross-platform GUI application. For further details, see our website. Feedback ======== If you encounter any problems with Breezy, need help understanding it, or would like to offer suggestions or feedback, please get in touch with us: * Ask a question through our web support interface, at https://answers.launchpad.net/brz/ * Report bugs at https://bugs.launchpad.net/brz/+filebug * Write to the mailing list at bazaar@lists.canonical.com You can join the list at . You don't need to subscribe to post, but your first post will be held briefly for manual moderation. Please mention that you are using Breezy rather than Bazaar. * Talk to us in irc://irc.freenode.net/bzr Please mention that you are using Breezy rather than Bazaar. Our mission is to make a version control tool that developers LOVE to use and that casual contributors feel confident with. Please let us know how we're going. The Breezy Team ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/SECURITY.md0000644000000000000000000000043200000000000012616 0ustar00# Security Policy ## Supported Versions | Version | Supported | | -------- | ------------------ | | 3.1.x | :white_check_mark: | | 3.0.x | :x: | ## Reporting a Vulnerability Please report security issues by e-mail to breezy-core@googlegroups.com. ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/TODO0000644000000000000000000000011500000000000011513 0ustar00For things to do in Breezy development, see https://bugs.launchpad.net/brz/ ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/apport/0000755000000000000000000000000000000000000012333 5ustar00././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/0000755000000000000000000000000000000000000012326 5ustar00././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/brz0000755000000000000000000000513700000000000011557 0ustar00#! /usr/bin/env python3 # Copyright (C) 2005-2013, 2016, 2017 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Breezy -- a free distributed version-control tool""" import os import sys import warnings # update this on each release _script_version = (3, 2, 1) profiling = False if '--profile-imports' in sys.argv: import profile_imports profile_imports.install() profiling = True if os.name == "posix": import locale try: locale.setlocale(locale.LC_ALL, '') except locale.Error as e: sys.stderr.write( 'brz: warning: %s\n' ' bzr could not set the application locale.\n' ' Although this should be no problem for bzr itself, it might\n' ' cause problems with some plugins. To investigate the issue,\n' ' look at the output of the locale(1p) tool.\n' % e) # Use better default than ascii with posix filesystems that deal in bytes # natively even when the C locale or no locale at all is given. Note that # we need an immortal string for the hack, hence the lack of a hyphen. sys._brz_default_fs_enc = "utf8" try: import breezy except ImportError as e: sys.stderr.write( "brz: ERROR: " "Couldn't import breezy and dependencies.\n" "Please check the directory containing breezy is on your PYTHONPATH.\n" "\n") raise if breezy.version_info[:3] != _script_version: sys.stderr.write( "brz: WARNING: breezy version doesn't match the brz program.\n" "This may indicate an installation problem.\n" "breezy is version %s from %s\n" "brz is version %s from %s\n" % ( breezy._format_version_tuple(breezy.version_info), breezy.__path__[0], breezy._format_version_tuple(_script_version), __file__)) if __name__ == '__main__': from breezy.__main__ import main main() else: raise ImportError("The brz script cannot be imported.") ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/build.cmd0000644000000000000000000000150100000000000012607 0ustar00@echo off :: To build extensions for 64 bit Python 3, we need to configure environment :: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of: :: MS Windows SDK for Windows 7 and .NET Framework 4 :: :: More details at: :: https://github.com/cython/cython/wiki/CythonExtensionsOnWindows IF "%DISTUTILS_USE_SDK%"=="1" ( ECHO Configuring environment to build with MSVC on a 64bit architecture ECHO Using Windows SDK 7.1 "C:\Program Files\Microsoft SDKs\Windows\v7.1\Setup\WindowsSdkVer.exe" -q -version:v7.1 CALL "C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd" /x64 /release SET MSSdk=1 REM Need the following to allow tox to see the SDK compiler SET TOX_TESTENV_PASSENV=DISTUTILS_USE_SDK MSSdk INCLUDE LIB ) ELSE ( ECHO Using default MSVC build environment ) CALL %* ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/byov.conf0000644000000000000000000000574700000000000012671 0ustar00vm.class = lxd # Start with an up to date system by default vm.update = True # External sources dependencies, packages are not recent enough subunit.clone = (git clone https://github.com/testing-cabal/subunit.git ../subunit) sphinx_epytext.install = (pip3 install sphinx_epytext) flake8.install3 = (pip3 install flake8) brz.extras = fastimport,launchpad,workspace,git,cext,doc [brz] # because paramiko 2.0.0 is broken: # breezy.tests.test_transport.TestSSHConnections.test_bzr_connect_to_bzr_ssh # runs into a deprecation issue which is only fixed in 2.3.0 which is # available nowhere in ubuntu/debian /o\ # https://github.com/paramiko/paramiko/issues/1030 # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=882322 -- vila 2018-05-16 # FIXME: Arguably this should be vm.build_deps=brz but it requires either an # available package or at least a debian/ dir ? -- vila 2018-02-23 brz.build_deps = gcc, debhelper, python3, python3-all-dev, python3-configobj, python3-docutils, python3-paramiko, python3-subunit, python3-testtools, subunit, python3-pip, python3-setuptools, python3-flake8, python3-sphinx, python3-launchpadlib, python3-pyinotify subunit.build_deps = python3-testscenarios, python3-testtools, cython, cython3, quilt vm.packages = {brz.build_deps}, {subunit.build_deps}, bzr, git, python-junitxml [brz-xenial] vm.release = bionic byoci.setup.command = (pip3 install --upgrade pip && pip3 install --upgrade cython sphinx paramiko && {subunit.clone} && {flake8.install3} && PATH="$HOME/.local/bin:$PATH" python3 setup.py develop --user && pip3 install -e '.[{brz.extras}]') # FIXME: bzr log -l2 should be by default -- vila 2018-03-09 byoci.tests.command = bash -o pipefail -c "bzr log -l2 && PYTHONPATH=tools:$PYTHONPATH python3 -m flake8 && PYTHONPATH=../subunit/python:$PYTHONPATH PATH=../subunit/filters:$HOME/.local/bin:$PATH make check-ci | subunit2junitxml -o ../results.xml -f | subunit2pyunit" [brz-cosmic] vm.release = cosmic [brz-disco] vm.release = disco vm.packages = {brz.build_deps}, {subunit.build_deps}, bzr, git, python3-dulwich, python-junitxml byoci.setup.command = ({subunit.clone} && {sphinx_epytext.install} ) byoci.tests.command = bash -o pipefail -c "bzr log -l2 && PYTHONPATH=tools:$PYTHONPATH python3 -m flake8 && (PYTHONPATH=../subunit/python:$PYTHONPATH PATH=../subunit/filters:$HOME/.local/bin:$PATH make check-ci | subunit2junitxml -o ../results.xml -f | subunit2pyunit)" [brz-eoan] vm.release = eoan lxd.image = ubuntu-daily:{vm.release}/{vm.architecture} [brz-buster] vm.distribution = debian vm.release = buster vm.packages = {brz.build_deps}, {subunit.build_deps}, bzr, git, python3-dulwich, python-junitxml byoci.setup.command = ({subunit.clone} && {sphinx_epytext.install} ) byoci.tests.command = bash -o pipefail -c "bzr log -l2 && PYTHONPATH=tools:$PYTHONPATH python3 -m flake8 && (PYTHONPATH=../subunit/python:$PYTHONPATH PATH=../subunit/filters:$HOME/.local/bin:$PATH make check-ci | subunit2junitxml -o ../results.xml -f | subunit2pyunit)" [brz-sid] vm.distribution = debian vm.release = sid ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/contrib/0000755000000000000000000000000000000000000012466 5ustar00././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/doc/0000755000000000000000000000000000000000000011573 5ustar00././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/man1/0000755000000000000000000000000000000000000011662 5ustar00././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/po/0000755000000000000000000000000000000000000011444 5ustar00././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/profile_imports.py0000644000000000000000000001432500000000000014622 0ustar00# Copyright (C) 2006, 2008, 2009, 2010 by Canonical Ltd # Written by John Arbash Meinel # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """A custom importer and regex compiler which logs time spent.""" import re import sys import time _parent_stack = [] _total_stack = {} _info = {} _cur_id = 0 _timer = time.time if sys.platform == 'win32': _timer = time.clock def stack_add(name, frame_name, frame_lineno, scope_name=None): """Start a new record on the stack""" global _cur_id _cur_id += 1 this_stack = (_cur_id, name) if _parent_stack: _total_stack[_parent_stack[-1]].append(this_stack) _total_stack[this_stack] = [] _parent_stack.append(this_stack) _info[this_stack] = [len(_parent_stack) - 1, frame_name, frame_lineno, scope_name] return this_stack def stack_finish(this, cost): """Finish a given entry, and record its cost in time""" global _parent_stack assert _parent_stack[-1] == this, \ 'import stack does not end with this %s: %s' % (this, _parent_stack) _parent_stack.pop() _info[this].append(cost) def log_stack_info(out_file, sorted=True, hide_fast=True): # Find all of the roots with import = 0 out_file.write( '%5s %5s %-40s @ %s:%s\n' % ('cum', 'local', 'name', 'file', 'line')) todo = [(value[-1], key) for key, value in _info.items() if value[0] == 0] if sorted: todo.sort() while todo: cum_time, cur = todo.pop() children = _total_stack[cur] c_times = [] info = _info[cur] if hide_fast and info[-1] < 0.0001: continue # Compute the module time by removing the children times mod_time = info[-1] for child in children: c_info = _info[child] mod_time -= c_info[-1] c_times.append((c_info[-1], child)) # indent, cum_time, mod_time, name, # scope_name, frame_name, frame_lineno out_file.write( '%5.1f %5.1f %-40s @ %s:%d\n' % ( info[-1] * 1000., mod_time * 1000., ('+' * info[0] + cur[1]), info[1], info[2])) if sorted: c_times.sort() else: c_times.reverse() todo.extend(c_times) _real_import = __import__ def timed_import(name, globals=None, locals=None, fromlist=None, level=0): """Wrap around standard importer to log import time""" # normally there are 4, but if this is called as __import__ eg by # /usr/lib/python2.6/email/__init__.py then there may be only one # parameter # level has different default between Python 2 and 3, but codebase if globals is None: # can't determine the scope name afaics; we could peek up the stack to # see where this is being called from, but it should be a rare case. scope_name = None else: scope_name = globals.get('__name__', None) if scope_name is None: scope_name = globals.get('__file__', None) if scope_name is None: scope_name = globals.keys() else: # Trim out paths before breezy loc = scope_name.find('breezy') if loc != -1: scope_name = scope_name[loc:] # Figure out the frame that is doing the importing frame = sys._getframe(1) frame_name = frame.f_globals.get('__name__', '') extra = '' if frame_name.endswith('demandload'): # If this was demandloaded, we have 3 frames to ignore extra = '(demandload) ' frame = sys._getframe(4) frame_name = frame.f_globals.get('__name__', '') elif frame_name.endswith('lazy_import'): # If this was lazily imported, we have 3 frames to ignore extra = '[l] ' frame = sys._getframe(4) frame_name = frame.f_globals.get('__name__', '') if fromlist: extra += ' [%s]' % (', '.join(map(str, fromlist)),) frame_lineno = frame.f_lineno this = stack_add(extra + name, frame_name, frame_lineno, scope_name) tstart = _timer() try: # Do the import return _real_import(name, globals, locals, fromlist, level=level) finally: tload = _timer() - tstart stack_finish(this, tload) def _repr_regexp(pattern, max_len=30): """Present regexp pattern for logging, truncating if over max_len.""" if len(pattern) > max_len: return repr(pattern[:max_len - 3]) + "..." return repr(pattern) _real_compile = re._compile def timed_compile(*args, **kwargs): """Log how long it takes to compile a regex""" # And who is requesting this? frame = sys._getframe(2) frame_name = frame.f_globals.get('__name__', '') extra = '' if frame_name.endswith('lazy_regex'): # If this was lazily compiled, we have 3 more frames to ignore extra = '[l] ' frame = sys._getframe(5) frame_name = frame.f_globals.get('__name__', '') frame_lineno = frame.f_lineno this = stack_add(extra + _repr_regexp(args[0]), frame_name, frame_lineno) tstart = _timer() try: # Measure the compile time comp = _real_compile(*args, **kwargs) finally: tcompile = _timer() - tstart stack_finish(this, tcompile) return comp def install(): """Install the hooks for measuring import and regex compile time.""" __builtins__['__import__'] = timed_import re._compile = timed_compile def uninstall(): """Remove the import and regex compile timing hooks.""" __builtins__['__import__'] = _real_import re._compile = _real_compile ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/setup.cfg0000644000000000000000000000103400000000000012645 0ustar00[flake8] # Ignore E402 ("module level import not at top of file"), # because even with the lazy import plugin it still triggers # for lazy_import statements before other imports. ignore = D I E123 E261 E265 E266 E301 E302 E303 E305 E306 E401 E402 E501 E502 E702 E704 E722 E731 E741 F401 F402 F403 F405 F811 F812 F821 F841 W503 W504 W605 filename = *.py,brz [flake8:local-plugins] extension = MC1 = flake8_lazy_import:LazyImport paths = ./tools/ ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/setup.py0000755000000000000000000007277000000000000012560 0ustar00#! /usr/bin/env python3 """Installation script for brz. Run it with './setup.py install', or './setup.py --help' for more options """ import os import os.path import sys import copy import glob try: import setuptools except ImportError as e: sys.stderr.write("[ERROR] Please install setuptools (%s)\n" % e) sys.exit(1) # NOTE: The directory containing setup.py, whether run by 'python setup.py' or # './setup.py' or the equivalent with another path, should always be at the # start of the path, so this should find the right one... import breezy def get_long_description(): dirname = os.path.dirname(__file__) readme = os.path.join(dirname, 'README.rst') with open(readme, 'r') as f: return f.read() ## # META INFORMATION FOR SETUP # see http://docs.python.org/dist/meta-data.html META_INFO = { 'name': 'breezy', 'version': breezy.__version__, 'maintainer': 'Breezy Developers', 'maintainer_email': 'team@breezy-vcs.org', 'url': 'https://www.breezy-vcs.org/', 'description': 'Friendly distributed version control system', 'license': 'GNU GPL v2', 'download_url': 'https://launchpad.net/brz/+download', 'long_description': get_long_description(), 'classifiers': [ 'Development Status :: 6 - Mature', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Operating System :: Microsoft :: Windows', 'Operating System :: OS Independent', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: C', 'Topic :: Software Development :: Version Control', ], 'install_requires': [ 'configobj', 'fastbencode', 'patiencediff', # Technically, Breezy works without these two dependencies too. But there's # no way to enable them by default and let users opt out. 'dulwich>=0.20.23', ], 'extras_require': { 'cext': ['cython>=0.29'], 'fastimport': ['fastimport<0.9.8;python_version<"3.0"', 'fastimport;python_version>="3.5"'], 'git': ['dulwich>=0.20.23'], 'launchpad': ['launchpadlib>=1.6.3'], 'workspace': ['pyinotify'], 'doc': ['setuptools<45;python_version<"3.0"', 'sphinx==1.8.5;python_version<"3.0"', 'sphinx_epytext'], }, 'tests_require': [ 'testtools', 'testtools<=2.4.0;python_version<"3.0"', 'python-subunit', 'dulwich>=0.20.29', ], 'python_requires': '>=3.5', } # The list of packages is automatically generated later. Add other things # that are part of BREEZY here. BREEZY = {} PKG_DATA = { # install files from selftest suite 'package_data': {'breezy': ['doc/api/*.txt', 'tests/test_patches_data/*', 'help_topics/en/*.txt', 'tests/ssl_certs/ca.crt', 'tests/ssl_certs/server_without_pass.key', 'tests/ssl_certs/server_with_pass.key', 'tests/ssl_certs/server.crt', ]}, } I18N_FILES = [] for filepath in glob.glob("breezy/locale/*/LC_MESSAGES/*.mo"): langfile = filepath[len("breezy/locale/"):] targetpath = os.path.dirname(os.path.join("share/locale", langfile)) I18N_FILES.append((targetpath, [filepath])) def get_breezy_packages(): """Recurse through the breezy directory, and extract the package names""" packages = [] base_path = os.path.dirname(os.path.abspath(breezy.__file__)) for root, dirs, files in os.walk(base_path): if '__init__.py' in files: assert root.startswith(base_path) # Get just the path below breezy package_path = root[len(base_path):] # Remove leading and trailing slashes package_path = package_path.strip('\\/') if not package_path: package_name = 'breezy' else: package_name = ( 'breezy.' + package_path.replace('/', '.').replace('\\', '.')) packages.append(package_name) return sorted(packages) BREEZY['packages'] = get_breezy_packages() from setuptools import setup from distutils.version import LooseVersion from distutils.command.install_scripts import install_scripts from distutils.command.install_data import install_data from distutils.command.build import build ############################### # Overridden distutils actions ############################### class my_install_scripts(install_scripts): """ Customized install_scripts distutils action. Create brz.bat for win32. """ def run(self): install_scripts.run(self) # standard action if sys.platform == "win32": try: scripts_dir = os.path.join(sys.prefix, 'Scripts') script_path = self._quoted_path(os.path.join(scripts_dir, "brz")) python_exe = self._quoted_path(sys.executable) batch_str = "@%s %s %%*" % (python_exe, script_path) batch_path = os.path.join(self.install_dir, "brz.bat") with open(batch_path, "w") as f: f.write(batch_str) print(("Created: %s" % batch_path)) except Exception: e = sys.exc_info()[1] print(("ERROR: Unable to create %s: %s" % (batch_path, e))) def _quoted_path(self, path): if ' ' in path: return '"' + path + '"' else: return path #/class my_install_scripts class bzr_build(build): """Customized build distutils action. Generate brz.1. """ sub_commands = build.sub_commands + [ ('build_mo', lambda _: True), ] def run(self): build.run(self) from tools import generate_docs generate_docs.main(argv=["brz", "man"]) ######################## ## Setup ######################## from breezy.bzr_distutils import build_mo command_classes = {'install_scripts': my_install_scripts, 'build': bzr_build, 'build_mo': build_mo, } from distutils import log from distutils.errors import CCompilerError, DistutilsPlatformError from distutils.extension import Extension ext_modules = [] try: from Cython.Distutils import build_ext from Cython.Compiler.Version import version as cython_version except ImportError: have_cython = False # try to build the extension from the prior generated source. print("") print("The python package 'Cython' is not available." " If the .c files are available,") print("they will be built," " but modifying the .pyx files will not rebuild them.") print("") from distutils.command.build_ext import build_ext else: minimum_cython_version = '0.29' cython_version_info = LooseVersion(cython_version) if cython_version_info < LooseVersion(minimum_cython_version): print("Version of Cython is too old. " "Current is %s, need at least %s." % (cython_version, minimum_cython_version)) print("If the .c files are available, they will be built," " but modifying the .pyx files will not rebuild them.") have_cython = False else: have_cython = True class build_ext_if_possible(build_ext): user_options = build_ext.user_options + [ ('allow-python-fallback', None, "When an extension cannot be built, allow falling" " back to the pure-python implementation.") ] def initialize_options(self): build_ext.initialize_options(self) self.allow_python_fallback = False def run(self): try: build_ext.run(self) except DistutilsPlatformError: e = sys.exc_info()[1] if not self.allow_python_fallback: log.warn('\n Cannot build extensions.\n' ' Use "build_ext --allow-python-fallback" to use' ' slower python implementations instead.\n') raise log.warn(str(e)) log.warn('\n Extensions cannot be built.\n' ' Using the slower Python implementations instead.\n') def build_extension(self, ext): try: build_ext.build_extension(self, ext) except CCompilerError: if not self.allow_python_fallback: log.warn('\n Cannot build extension "%s".\n' ' Use "build_ext --allow-python-fallback" to use' ' slower python implementations instead.\n' % (ext.name,)) raise log.warn('\n Building of "%s" extension failed.\n' ' Using the slower Python implementation instead.' % (ext.name,)) # Override the build_ext if we have Cython available command_classes['build_ext'] = build_ext_if_possible unavailable_files = [] def add_cython_extension(module_name, libraries=None, extra_source=[]): """Add a cython module to build. This will use Cython to auto-generate the .c file if it is available. Otherwise it will fall back on the .c file. If the .c file is not available, it will warn, and not add anything. You can pass any extra options to Extension through kwargs. One example is 'libraries = []'. :param module_name: The python path to the module. This will be used to determine the .pyx and .c files to use. """ path = module_name.replace('.', '/') cython_name = path + '.pyx' c_name = path + '.c' define_macros = [] if sys.platform == 'win32': # cython uses the macro WIN32 to detect the platform, even though it # should be using something like _WIN32 or MS_WINDOWS, oh well, we can # give it the right value. define_macros.append(('WIN32', None)) if have_cython: source = [cython_name] else: if not os.path.isfile(c_name): unavailable_files.append(c_name) return else: source = [c_name] source.extend(extra_source) include_dirs = ['breezy'] ext_modules.append( Extension( module_name, source, define_macros=define_macros, libraries=libraries, include_dirs=include_dirs)) add_cython_extension('breezy._simple_set_pyx') ext_modules.append(Extension('breezy._static_tuple_c', ['breezy/_static_tuple_c.c'])) add_cython_extension('breezy._annotator_pyx') add_cython_extension('breezy._chunks_to_lines_pyx') add_cython_extension('breezy.bzr._groupcompress_pyx', extra_source=['breezy/bzr/diff-delta.c']) add_cython_extension('breezy.bzr._knit_load_data_pyx') add_cython_extension('breezy._known_graph_pyx') add_cython_extension('breezy._rio_pyx') if sys.platform == 'win32': add_cython_extension('breezy.bzr._dirstate_helpers_pyx', libraries=['Ws2_32']) add_cython_extension('breezy._walkdirs_win32') else: add_cython_extension('breezy.bzr._dirstate_helpers_pyx') add_cython_extension('breezy._readdir_pyx') add_cython_extension('breezy.bzr._chk_map_pyx') add_cython_extension('breezy.bzr._btree_serializer_pyx') if unavailable_files: print('C extension(s) not found:') print((' %s' % ('\n '.join(unavailable_files),))) print('The python versions will be used instead.') print("") def get_tbzr_py2exe_info(includes, excludes, packages, console_targets, gui_targets, data_files): packages.append('tbzrcommands') # ModuleFinder can't handle runtime changes to __path__, but # win32com uses them. Hook this in so win32com.shell is found. import modulefinder import win32com import cPickle as pickle for p in win32com.__path__[1:]: modulefinder.AddPackagePath("win32com", p) for extra in ["win32com.shell"]: __import__(extra) m = sys.modules[extra] for p in m.__path__[1:]: modulefinder.AddPackagePath(extra, p) # TBZR points to the TBZR directory tbzr_root = os.environ["TBZR"] # Ensure tbreezy itself is on sys.path sys.path.append(tbzr_root) packages.append("tbreezy") # collect up our icons. cwd = os.getcwd() ico_root = os.path.join(tbzr_root, 'tbreezy', 'resources') icos = [] # list of (path_root, relative_ico_path) # First always brz's icon and its in the root of the brz tree. # FIXME: There's no such thing as brz.ico #icos.append(('', 'brz.ico')) for root, dirs, files in os.walk(ico_root): icos.extend([(ico_root, os.path.join(root, f)[len(ico_root) + 1:]) for f in files if f.endswith('.ico')]) # allocate an icon ID for each file and the full path to the ico icon_resources = [(rid, os.path.join(ico_dir, ico_name)) for rid, (ico_dir, ico_name) in enumerate(icos)] # create a string resource with the mapping. Might as well save the # runtime some effort and write a pickle. # Runtime expects unicode objects with forward-slash seps. fse = sys.getfilesystemencoding() map_items = [(f.replace('\\', '/').decode(fse), rid) for rid, (_, f) in enumerate(icos)] ico_map = dict(map_items) # Create a new resource type of 'ICON_MAP', and use ID=1 other_resources = [("ICON_MAP", 1, pickle.dumps(ico_map))] excludes.extend("""pywin pywin.dialogs pywin.dialogs.list win32ui crawler.Crawler""".split()) # tbzrcache executables - a "console" version for debugging and a # GUI version that is generally used. tbzrcache = dict( script=os.path.join(tbzr_root, "scripts", "tbzrcache.py"), icon_resources=icon_resources, other_resources=other_resources, ) console_targets.append(tbzrcache) # Make a windows version which is the same except for the base name. tbzrcachew = tbzrcache.copy() tbzrcachew["dest_base"] = "tbzrcachew" gui_targets.append(tbzrcachew) # ditto for the tbzrcommand tool tbzrcommand = dict( script=os.path.join(tbzr_root, "scripts", "tbzrcommand.py"), icon_resources=icon_resources, other_resources=other_resources, ) console_targets.append(tbzrcommand) tbzrcommandw = tbzrcommand.copy() tbzrcommandw["dest_base"] = "tbzrcommandw" gui_targets.append(tbzrcommandw) # A utility to see python output from both C++ and Python based shell # extensions tracer = dict(script=os.path.join(tbzr_root, "scripts", "tbzrtrace.py")) console_targets.append(tracer) # The C++ implemented shell extensions. dist_dir = os.path.join(tbzr_root, "shellext", "build") data_files.append(('', [os.path.join(dist_dir, 'tbzrshellext_x86.dll')])) data_files.append(('', [os.path.join(dist_dir, 'tbzrshellext_x64.dll')])) def get_qbzr_py2exe_info(includes, excludes, packages, data_files): # PyQt4 itself still escapes the plugin detection code for some reason... includes.append('PyQt4.QtCore') includes.append('PyQt4.QtGui') includes.append('PyQt4.QtTest') includes.append('sip') # extension module required for Qt. packages.append('pygments') # colorizer for qbzr packages.append('docutils') # html formatting includes.append('win32event') # for qsubprocess stuff # the qt binaries might not be on PATH... # They seem to install to a place like C:\Python25\PyQt4\* # Which is not the same as C:\Python25\Lib\site-packages\PyQt4 pyqt_dir = os.path.join(sys.prefix, "PyQt4") pyqt_bin_dir = os.path.join(pyqt_dir, "bin") if os.path.isdir(pyqt_bin_dir): path = os.environ.get("PATH", "") if pyqt_bin_dir.lower() not in [p.lower() for p in path.split(os.pathsep)]: os.environ["PATH"] = path + os.pathsep + pyqt_bin_dir # also add all imageformat plugins to distribution # We will look in 2 places, dirname(PyQt4.__file__) and pyqt_dir base_dirs_to_check = [] if os.path.isdir(pyqt_dir): base_dirs_to_check.append(pyqt_dir) try: import PyQt4 except ImportError: pass else: pyqt4_base_dir = os.path.dirname(PyQt4.__file__) if pyqt4_base_dir != pyqt_dir: base_dirs_to_check.append(pyqt4_base_dir) if not base_dirs_to_check: log.warn("Can't find PyQt4 installation -> not including imageformat" " plugins") else: files = [] for base_dir in base_dirs_to_check: plug_dir = os.path.join(base_dir, 'plugins', 'imageformats') if os.path.isdir(plug_dir): for fname in os.listdir(plug_dir): # Include plugin dlls, but not debugging dlls fullpath = os.path.join(plug_dir, fname) if fname.endswith('.dll') and not fname.endswith('d4.dll'): files.append(fullpath) if files: data_files.append(('imageformats', files)) else: log.warn('PyQt4 was found, but we could not find any imageformat' ' plugins. Are you sure your configuration is correct?') def get_svn_py2exe_info(includes, excludes, packages): packages.append('subvertpy') packages.append('sqlite3') def get_git_py2exe_info(includes, excludes, packages): packages.append('dulwich') def get_fastimport_py2exe_info(includes, excludes, packages): # This is the python-fastimport package, not to be confused with the # brz-fastimport plugin. packages.append('fastimport') if 'bdist_wininst' in sys.argv: def find_docs(): docs = [] for root, dirs, files in os.walk('doc'): r = [] for f in files: if (os.path.splitext(f)[1] in ('.html', '.css', '.png', '.pdf') or f == 'quick-start-summary.svg'): r.append(os.path.join(root, f)) if r: relative = root[4:] if relative: target = os.path.join('Doc\\Breezy', relative) else: target = 'Doc\\Breezy' docs.append((target, r)) return docs # python's distutils-based win32 installer ARGS = {'scripts': ['brz', 'tools/win32/brz-win32-bdist-postinstall.py'], 'ext_modules': ext_modules, # help pages 'data_files': find_docs(), # for building cython extensions 'cmdclass': command_classes, } ARGS.update(META_INFO) ARGS.update(BREEZY) PKG_DATA['package_data']['breezy'].append('locale/*/LC_MESSAGES/*.mo') ARGS.update(PKG_DATA) setup(**ARGS) elif 'py2exe' in sys.argv: # py2exe setup from py2exe import distutils_buildexe as py2exe # pick real brz version import breezy version_number = [] for i in breezy.version_info[:4]: try: i = int(i) except ValueError: i = 0 version_number.append(str(i)) version_str = '.'.join(version_number) # An override to install_data used only by py2exe builds, which arranges # to byte-compile any .py files in data_files (eg, our plugins) # Necessary as we can't rely on the user having the relevant permissions # to the "Program Files" directory to generate them on the fly. class install_data_with_bytecompile(install_data): def run(self): from distutils.util import byte_compile install_data.run(self) py2exe = self.distribution.get_command_obj('py2exe', False) # GZ 2010-04-19: Setup has py2exe.optimize as 2, but give plugins # time before living with docstring stripping optimize = 1 compile_names = [f for f in self.outfiles if f.endswith('.py')] # Round mtime to nearest even second so that installing on a FAT # filesystem bytecode internal and script timestamps will match for f in compile_names: mtime = os.stat(f).st_mtime remainder = mtime % 2 if remainder: mtime -= remainder os.utime(f, (mtime, mtime)) byte_compile(compile_names, optimize=optimize, force=self.force, prefix=self.install_dir, dry_run=self.dry_run) self.outfiles.extend([f + 'o' for f in compile_names]) # end of class install_data_with_bytecompile target = py2exe.runtime.Target( script="brz", dest_base="brz", # FIXME: There's no such thing as brz.ico #icon_resources=[(0, 'brz.ico')], name=META_INFO['name'], version=version_str, description=META_INFO['description'], maintainer=META_INFO['maintainer'], copyright=( "Copyright 2005-2012 Canonical Ltd.\n" "Copyright 2017-2021 Breezy developers"), company_name="Canonical Ltd.", comments=META_INFO['description'], ) gui_target = copy.copy(target) gui_target.dest_base = "bzrw" packages = BREEZY['packages'] packages.remove('breezy') packages = [i for i in packages if not i.startswith('breezy.plugins')] includes = [] for i in glob.glob('breezy\\*.py'): module = i[:-3].replace('\\', '.') if module.endswith('__init__'): module = module[:-len('__init__')] includes.append(module) additional_packages = set() # Although we currently can't enforce it, we consider it an error for # py2exe to report any files are "missing". Such modules we know aren't # used should be listed here. excludes = """Tkinter psyco ElementPath r_hmac ImaginaryModule cElementTree elementtree.ElementTree Crypto.PublicKey._fastmath tools resource validate""".split() dll_excludes = [] # email package from std python library use lazy import, # so we need to explicitly add all package additional_packages.add('email') # And it uses funky mappings to conver to 'Oldname' to 'newname'. As # a result, packages like 'email.Parser' show as missing. Tell py2exe # to exclude them. import email for oldname in getattr(email, '_LOWERNAMES', []): excludes.append("email." + oldname) for oldname in getattr(email, '_MIMENAMES', []): excludes.append("email.MIME" + oldname) # text files for help topis text_topics = glob.glob('breezy/help_topics/en/*.txt') topics_files = [('lib/help_topics/en', text_topics)] # built-in plugins plugins_files = [] # XXX - should we consider having the concept of an 'official' build, # which hard-codes the list of plugins, gets more upset if modules are # missing, etc? plugins = None # will be a set after plugin sniffing... for root, dirs, files in os.walk('breezy/plugins'): if root == 'breezy/plugins': plugins = set(dirs) # We ship plugins as normal files on the file-system - however, # the build process can cause *some* of these plugin files to end # up in library.zip. Thus, we saw (eg) "plugins/svn/test" in # library.zip, and then saw import errors related to that as the # rest of the svn plugin wasn't. So we tell py2exe to leave the # plugins out of the .zip file excludes.extend(["breezy.plugins." + d for d in dirs]) # svn plugin requires subvertpy, # and pip cannot install it on Windows. # When subvertpy is not available, remove svn from plugins if "svn" in dirs: try: import subvertpy except ImportError: dirs.remove("svn") x = [] for i in files: # Throw away files we don't want packaged. Note that plugins may # have data files with all sorts of extensions so we need to # be conservative here about what we ditch. ext = os.path.splitext(i)[1] if ext.endswith('~') or ext in [".pyc", ".swp"]: continue if i == '__init__.py' and root == 'breezy/plugins': continue x.append(os.path.join(root, i)) if x: target_dir = root[len('breezy/'):] # install to 'plugins/...' plugins_files.append((target_dir, x)) # find modules required by built-in plugins import tools.package_mf mf = tools.package_mf.CustomModuleFinder('.') mf.load_package_recursive('breezy.plugins') (packs, mods) = mf.get_result() # Don't add the plugins packages and modules, # as they are listed in excluded additional_packages.update( pack for pack in packs if not (pack.startswith('breezy.plugins.') or pack in excludes)) includes.extend( mod for mod in mods if not (mod.startswith('breezy.plugins.') or mod in excludes)) console_targets = [target, 'tools/win32/brz_postinstall.py', ] gui_targets = [gui_target] data_files = topics_files + plugins_files + I18N_FILES if 'qbzr' in plugins: get_qbzr_py2exe_info(includes, excludes, packages, data_files) if 'svn' in plugins: get_svn_py2exe_info(includes, excludes, packages) if 'git' in plugins: get_git_py2exe_info(includes, excludes, packages) if 'fastimport' in plugins: get_fastimport_py2exe_info(includes, excludes, packages) if "TBZR" in os.environ: # TORTOISE_OVERLAYS_MSI_WIN32 must be set to the location of the # TortoiseOverlays MSI installer file. It is in the TSVN svn repo and # can be downloaded from (username=guest, blank password): # http://tortoisesvn.tigris.org/svn/tortoisesvn/TortoiseOverlays # look for: version-1.0.4/bin/TortoiseOverlays-1.0.4.11886-win32.msi # Ditto for TORTOISE_OVERLAYS_MSI_X64, pointing at *-x64.msi. for needed in ('TORTOISE_OVERLAYS_MSI_WIN32', 'TORTOISE_OVERLAYS_MSI_X64'): url = ('http://guest:@tortoisesvn.tigris.org/svn/tortoisesvn' '/TortoiseOverlays') if not os.path.isfile(os.environ.get(needed, '')): raise RuntimeError( "\nPlease set %s to the location of the relevant" "\nTortoiseOverlays .msi installer file." " The installers can be found at" "\n %s" "\ncheck in the version-X.Y.Z/bin/ subdir" % (needed, url)) get_tbzr_py2exe_info(includes, excludes, packages, console_targets, gui_targets, data_files) else: # print this warning to stderr as output is redirected, so it is seen # at build time. Also to stdout so it appears in the log for f in (sys.stderr, sys.stdout): f.write("Skipping TBZR binaries - " "please set TBZR to a directory to enable\n") # MSWSOCK.dll is a system-specific library, which py2exe accidentally pulls # in on Vista. dll_excludes.extend(["MSWSOCK.dll", "MSVCP60.dll", "MSVCP90.dll", "powrprof.dll", "SHFOLDER.dll"]) options_list = {"py2exe": {"packages": packages + list(additional_packages), "includes": includes, "excludes": excludes, "dll_excludes": dll_excludes, "dist_dir": "win32_brz.exe", "optimize": 2, "custom_boot_script": "tools/win32/py2exe_boot_common.py", }, } # We want the libaray.zip to have optimize = 2, but the exe to have # optimize = 1, so that .py files that get compilied at run time # (e.g. user installed plugins) dont have their doc strings removed. class py2exe_no_oo_exe(py2exe.py2exe): def run(self, *args, **kwargs): self.optimize = 1 super(py2exe_no_oo_exe, self).run(*args, **kwargs) self.optimize = 2 if __name__ == '__main__': command_classes['install_data'] = install_data_with_bytecompile command_classes['py2exe'] = py2exe_no_oo_exe setup(options=options_list, console=console_targets, windows=gui_targets, zipfile='lib/library.zip', data_files=data_files, cmdclass=command_classes, ) else: # ad-hoc for easy_install DATA_FILES = [] if 'bdist_egg' not in sys.argv: # generate and install brz.1 only with plain install, not the # easy_install one DATA_FILES = [('man/man1', ['brz.1', 'breezy/git/git-remote-bzr.1'])] DATA_FILES = DATA_FILES + I18N_FILES # std setup ARGS = {'scripts': ['brz', # TODO(jelmer): Only install the git scripts if # Dulwich was found. 'breezy/git/git-remote-bzr', 'breezy/git/bzr-receive-pack', 'breezy/git/bzr-upload-pack'], 'data_files': DATA_FILES, 'cmdclass': command_classes, 'ext_modules': ext_modules, } ARGS.update(META_INFO) ARGS.update(BREEZY) ARGS.update(PKG_DATA) if __name__ == '__main__': setup(**ARGS) ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/tools/0000755000000000000000000000000000000000000012166 5ustar00././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/.github/workflows/0000755000000000000000000000000000000000000014423 5ustar00././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/.github/workflows/pythonpackage.yml0000644000000000000000000000324600000000000020010 0ustar00name: Python package on: [push, pull_request] jobs: build: continue-on-error: ${{ matrix.experimental }} runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest] python-version: [3.6, 3.7, 3.8] experimental: [false] # See https://github.com/actions/toolkit/issues/399 # include: # - os: ubuntu-latest # python-version: pypy3 # experimental: true fail-fast: false steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Install dependencies (apt) run: | sudo apt install quilt if: "matrix.os == 'ubuntu-latest'" - name: Install dependencies run: | python -m pip install --upgrade pip pip install -U pip setuptools pip install -U pip coverage codecov fastbencode flake8 testtools paramiko fastimport configobj cython testscenarios six docutils $TEST_REQUIRE sphinx sphinx_epytext launchpadlib patiencediff pyinotify git+https://github.com/dulwich/dulwich - name: Build docs run: | make docs PYTHON=python - name: Build extensions run: | make extensions PYTHON=python if: "matrix.python-version != 'pypy3'" - name: Test suite run run: | python -Werror -Wignore::ImportWarning -Wignore::PendingDeprecationWarning -Wignore::DeprecationWarning -Wignore::ResourceWarning -Wignore::UserWarning ./brz selftest env: PYTHONHASHSEED: random BRZ_PLUGIN_PATH: -site:-user ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/apport/README0000644000000000000000000000061700000000000013217 0ustar00Bazaar supports semi-automatic bug reporting through Apport . If apport is not installed, an exception is printed to stderr in the usual way. For this to work properly it's suggested that two files be installed when a package of brz is installed: ``brz.conf`` into ``/etc/apport/crashdb.conf.d`` ``source_brz.py`` into ``/usr/share/apport/package-hooks`` ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/apport/brz-crashdb.conf0000644000000000000000000000025600000000000015406 0ustar00brz = { # most brz bugs are upstream bugs; file them there 'impl': 'launchpad', 'project': 'brz', 'bug_pattern_base': 'http://people.canonical.com/~pitti/bugpatterns', } ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/apport/source_brz.py0000644000000000000000000000263500000000000015070 0ustar00'''apport package hook for Breezy''' # Copyright (c) 2009, 2010 Canonical Ltd. # Author: Matt Zimmerman # and others from apport.hookutils import * import os brz_log = os.path.expanduser('~/.brz.log') dot_brz = os.path.expanduser('~/.config/breezy') def _add_log_tail(report): # may have already been added in-process if 'BrzLogTail' in report: return brz_log_lines = open(brz_log).readlines() brz_log_lines.reverse() brz_log_tail = [] blanks = 0 for line in brz_log_lines: if line == '\n': blanks += 1 brz_log_tail.append(line) if blanks >= 2: break brz_log_tail.reverse() report['BrzLogTail'] = ''.join(brz_log_tail) def add_info(report): _add_log_tail(report) if 'BrzPlugins' not in report: # may already be present in-process report['BrzPlugins'] = command_output(['brz', 'plugins', '-v']) # by default assume brz crashes are upstream bugs; this relies on # having a brz entry under /etc/apport/crashdb.conf.d/ report['CrashDB'] = 'brz' # these may contain some sensitive info (smtp_passwords) # TODO: strip that out and attach the rest #attach_file_if_exists(report, # os.path.join(dot_brz, 'breezy.conf', 'BrzConfig') #attach_file_if_exists(report, # os.path.join(dot_brz, 'locations.conf', 'BrzLocations') # vim: expandtab shiftwidth=4 ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/__init__.py0000644000000000000000000002114400000000000014441 0ustar00# Copyright (C) 2005-2013, 2016, 2017 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """All of bzr. Developer documentation is available at https://www.breezy-vcs.org/developers/. Some particularly interesting things in breezy are: * breezy.initialize -- setup the library for use * breezy.plugin.load_plugins -- load all installed plugins * breezy.branch.Branch.open -- open a branch * breezy.workingtree.WorkingTree.open -- open a working tree We hope you enjoy this library. """ import time # Keep track of when breezy was first imported, so that we can give rough # timestamps relative to program start in the log file kept by breezy.trace. _start_time = time.time() import codecs import sys __copyright__ = ( "Copyright 2005-2012 Canonical Ltd.\n" "Copyright 2017-2020 Breezy developers" ) # same format as sys.version_info: "A tuple containing the five components of # the version number: major, minor, micro, releaselevel, and serial. All # values except releaselevel are integers; the release level is 'alpha', # 'beta', 'candidate', or 'final'. The version_info value corresponding to the # Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a # releaselevel of 'dev' for unreleased under-development code. version_info = (3, 2, 1, 'final', 0) def _format_version_tuple(version_info): """Turn a version number 2, 3 or 5-tuple into a short string. This format matches and the typical presentation used in Python output. This also checks that the version is reasonable: the sub-release must be zero for final releases. >>> print(_format_version_tuple((1, 0, 0, 'final', 0))) 1.0.0 >>> print(_format_version_tuple((1, 2, 0, 'dev', 0))) 1.2.0.dev >>> print(_format_version_tuple((1, 2, 0, 'dev', 1))) 1.2.0.dev1 >>> print(_format_version_tuple((1, 1, 1, 'candidate', 2))) 1.1.1.rc2 >>> print(_format_version_tuple((2, 1, 0, 'beta', 1))) 2.1.b1 >>> print(_format_version_tuple((1, 4, 0))) 1.4.0 >>> print(_format_version_tuple((1, 4))) 1.4 >>> print(_format_version_tuple((2, 1, 0, 'final', 42))) 2.1.0.42 >>> print(_format_version_tuple((1, 4, 0, 'wibble', 0))) 1.4.0.wibble.0 """ if len(version_info) == 2: main_version = '%d.%d' % version_info[:2] else: main_version = '%d.%d.%d' % version_info[:3] if len(version_info) <= 3: return main_version release_type = version_info[3] sub = version_info[4] if release_type == 'final' and sub == 0: sub_string = '' elif release_type == 'final': sub_string = '.' + str(sub) elif release_type == 'dev' and sub == 0: sub_string = '.dev' elif release_type == 'dev': sub_string = '.dev' + str(sub) elif release_type in ('alpha', 'beta'): if version_info[2] == 0: main_version = '%d.%d' % version_info[:2] sub_string = '.' + release_type[0] + str(sub) elif release_type == 'candidate': sub_string = '.rc' + str(sub) else: return '.'.join(map(str, version_info)) return main_version + sub_string __version__ = _format_version_tuple(version_info) version_string = __version__ def _patch_filesystem_default_encoding(new_enc): """Change the Python process global encoding for filesystem names The effect is to change how open() and other builtin functions handle unicode filenames on posix systems. This should only be done near startup. The new encoding string passed to this function must survive until process termination, otherwise the interpreter may access uninitialized memory. The use of intern() may defer breakage is but is not enough, the string object should be secure against module reloading and during teardown. """ try: import ctypes pythonapi = getattr(ctypes, 'pythonapi', None) if pythonapi is not None: old_ptr = ctypes.c_void_p.in_dll(pythonapi, "Py_FileSystemDefaultEncoding") has_enc = ctypes.c_int.in_dll(pythonapi, "Py_HasFileSystemDefaultEncoding") as_utf8 = ctypes.PYFUNCTYPE( ctypes.POINTER(ctypes.c_char), ctypes.py_object)( ("PyUnicode_AsUTF8", pythonapi)) except (ImportError, ValueError): return # No ctypes or not CPython implementation, do nothing new_enc = sys.intern(new_enc) enc_ptr = as_utf8(new_enc) has_enc.value = 1 old_ptr.value = ctypes.cast(enc_ptr, ctypes.c_void_p).value if sys.getfilesystemencoding() != new_enc: raise RuntimeError("Failed to change the filesystem default encoding") return new_enc # When running under the brz script, override bad filesystem default encoding. # This is not safe to do for all users of breezy, other scripts should instead # just ensure a usable locale is set via the $LANG variable on posix systems. _fs_enc = sys.getfilesystemencoding() if getattr(sys, "_brz_default_fs_enc", None) is not None: if (_fs_enc is None or codecs.lookup(_fs_enc).name == "ascii"): _fs_enc = _patch_filesystem_default_encoding(sys._brz_default_fs_enc) if _fs_enc is None: _fs_enc = "ascii" else: _fs_enc = codecs.lookup(_fs_enc).name # brz has various bits of global state that are slowly being eliminated. # This variable is intended to permit any new state-like things to be attached # to a library_state.BzrLibraryState object rather than getting new global # variables that need to be hunted down. Accessing the current BzrLibraryState # through this variable is not encouraged: it is better to pass it around as # part of the context of an operation than to look it up directly, but when # that is too hard, it is better to use this variable than to make a brand new # global variable. # If using this variable by looking it up (because it can't be easily obtained) # it is important to store the reference you get, rather than looking it up # repeatedly; that way your code will behave properly in the breezy test suite # and from programs that do use multiple library contexts. _global_state = None def initialize(setup_ui=True, stdin=None, stdout=None, stderr=None): """Set up everything needed for normal use of breezy. Most applications that embed breezy, including brz itself, should call this function to initialize various subsystems. More options may be added in future so callers should use named arguments. The object returned by this function can be used as a contex manager through the 'with' statement to automatically shut down when the process is finished with breezy. However it's not necessary to separately enter the context as well as starting brz: breezy is ready to go when this function returns. :param setup_ui: If true (default) use a terminal UI; otherwise some other ui_factory must be assigned to `breezy.ui.ui_factory` by the caller. :param stdin, stdout, stderr: If provided, use these for terminal IO; otherwise use the files in `sys`. :return: A context manager for the use of breezy. The __exit__ should be called by the caller before exiting their process or otherwise stopping use of breezy. Advanced callers can use BzrLibraryState directly. """ from breezy import library_state, trace if setup_ui: import breezy.ui stdin = stdin or sys.stdin stdout = stdout or sys.stdout stderr = stderr or sys.stderr ui_factory = breezy.ui.make_ui_for_terminal(stdin, stdout, stderr) else: ui_factory = None tracer = trace.DefaultConfig() state = library_state.BzrLibraryState(ui=ui_factory, trace=tracer) # Start automatically in case people don't realize this returns a context. state._start() return state def get_global_state(): if _global_state is None: return initialize() return _global_state def test_suite(): import tests return tests.test_suite() ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/__main__.py0000644000000000000000000000535300000000000014426 0ustar00# Copyright (C) 2005-2013, 2016, 2017 Canonical Ltd # Copyright (C) 2018-2020 Breezy Developers # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA from __future__ import absolute_import """Breezy -- a free distributed version-control tool""" import os import sys import warnings profiling = False if '--profile-imports' in sys.argv: import profile_imports profile_imports.install() profiling = True if os.name == "posix": import locale try: locale.setlocale(locale.LC_ALL, '') except locale.Error as e: sys.stderr.write( 'brz: warning: %s\n' ' bzr could not set the application locale.\n' ' Although this should be no problem for bzr itself, it might\n' ' cause problems with some plugins. To investigate the issue,\n' ' look at the output of the locale(1p) tool.\n' % e) # Use better default than ascii with posix filesystems that deal in bytes # natively even when the C locale or no locale at all is given. Note that # we need an immortal string for the hack, hence the lack of a hyphen. sys._brz_default_fs_enc = "utf8" def main(): import breezy.breakin breezy.breakin.hook_debugger_to_signal() import breezy.commands import breezy.trace with breezy.initialize(): exit_val = breezy.commands.main() if profiling: profile_imports.log_stack_info(sys.stderr) # By this point we really have completed everything we want to do, and # there's no point doing any additional cleanup. Abruptly exiting here # stops any background threads getting into trouble as code is unloaded, # and it may also be slightly faster, through avoiding gc of objects that # are just about to be discarded anyhow. This does mean that atexit hooks # won't run but we don't use them. Also file buffers won't be flushed, # but our policy is to always close files from a finally block. -- mbp 20070215 exitfunc = getattr(sys, "exitfunc", None) if exitfunc is not None: exitfunc() os._exit(exit_val) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_annotator_py.py0000644000000000000000000003236000000000000015560 0ustar00# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Functionality for doing annotations in the 'optimal' way""" from .lazy_import import lazy_import lazy_import(globals(), """ import patiencediff from breezy import ( annotate, # Must be lazy to avoid circular importing graph as _mod_graph, ) """) from . import ( errors, osutils, ui, ) class Annotator(object): """Class that drives performing annotations.""" def __init__(self, vf): """Create a new Annotator from a VersionedFile.""" self._vf = vf self._parent_map = {} self._text_cache = {} # Map from key => number of nexts that will be built from this key self._num_needed_children = {} self._annotations_cache = {} self._heads_provider = None self._ann_tuple_cache = {} def _update_needed_children(self, key, parent_keys): for parent_key in parent_keys: if parent_key in self._num_needed_children: self._num_needed_children[parent_key] += 1 else: self._num_needed_children[parent_key] = 1 def _get_needed_keys(self, key): """Determine the texts we need to get from the backing vf. :return: (vf_keys_needed, ann_keys_needed) vf_keys_needed These are keys that we need to get from the vf ann_keys_needed Texts which we have in self._text_cache but we don't have annotations for. We need to yield these in the proper order so that we can get proper annotations. """ parent_map = self._parent_map # We need 1 extra copy of the node we will be looking at when we are # done self._num_needed_children[key] = 1 vf_keys_needed = set() ann_keys_needed = set() needed_keys = {key} while needed_keys: parent_lookup = [] next_parent_map = {} for key in needed_keys: if key in self._parent_map: # We don't need to lookup this key in the vf if key not in self._text_cache: # Extract this text from the vf vf_keys_needed.add(key) elif key not in self._annotations_cache: # We do need to annotate ann_keys_needed.add(key) next_parent_map[key] = self._parent_map[key] else: parent_lookup.append(key) vf_keys_needed.add(key) needed_keys = set() next_parent_map.update(self._vf.get_parent_map(parent_lookup)) for key, parent_keys in next_parent_map.items(): if parent_keys is None: # No graph versionedfile parent_keys = () next_parent_map[key] = () self._update_needed_children(key, parent_keys) needed_keys.update([key for key in parent_keys if key not in parent_map]) parent_map.update(next_parent_map) # _heads_provider does some graph caching, so it is only valid # while self._parent_map hasn't changed self._heads_provider = None return vf_keys_needed, ann_keys_needed def _get_needed_texts(self, key, pb=None): """Get the texts we need to properly annotate key. :param key: A Key that is present in self._vf :return: Yield (this_key, text, num_lines) 'text' is an opaque object that just has to work with whatever matcher object we are using. Currently it is always 'lines' but future improvements may change this to a simple text string. """ keys, ann_keys = self._get_needed_keys(key) if pb is not None: pb.update('getting stream', 0, len(keys)) stream = self._vf.get_record_stream(keys, 'topological', True) for idx, record in enumerate(stream): if pb is not None: pb.update('extracting', 0, len(keys)) if record.storage_kind == 'absent': raise errors.RevisionNotPresent(record.key, self._vf) this_key = record.key lines = record.get_bytes_as('lines') num_lines = len(lines) self._text_cache[this_key] = lines yield this_key, lines, num_lines for key in ann_keys: lines = self._text_cache[key] num_lines = len(lines) yield key, lines, num_lines def _get_parent_annotations_and_matches(self, key, text, parent_key): """Get the list of annotations for the parent, and the matching lines. :param text: The opaque value given by _get_needed_texts :param parent_key: The key for the parent text :return: (parent_annotations, matching_blocks) parent_annotations is a list as long as the number of lines in parent matching_blocks is a list of (parent_idx, text_idx, len) tuples indicating which lines match between the two texts """ parent_lines = self._text_cache[parent_key] parent_annotations = self._annotations_cache[parent_key] # PatienceSequenceMatcher should probably be part of Policy matcher = patiencediff.PatienceSequenceMatcher( None, parent_lines, text) matching_blocks = matcher.get_matching_blocks() return parent_annotations, matching_blocks def _update_from_first_parent(self, key, annotations, lines, parent_key): """Reannotate this text relative to its first parent.""" (parent_annotations, matching_blocks) = self._get_parent_annotations_and_matches( key, lines, parent_key) for parent_idx, lines_idx, match_len in matching_blocks: # For all matching regions we copy across the parent annotations annotations[lines_idx:lines_idx + match_len] = \ parent_annotations[parent_idx:parent_idx + match_len] def _update_from_other_parents(self, key, annotations, lines, this_annotation, parent_key): """Reannotate this text relative to a second (or more) parent.""" (parent_annotations, matching_blocks) = self._get_parent_annotations_and_matches( key, lines, parent_key) last_ann = None last_parent = None last_res = None # TODO: consider making all annotations unique and then using 'is' # everywhere. Current results claim that isn't any faster, # because of the time spent deduping # deduping also saves a bit of memory. For NEWS it saves ~1MB, # but that is out of 200-300MB for extracting everything, so a # fairly trivial amount for parent_idx, lines_idx, match_len in matching_blocks: # For lines which match this parent, we will now resolve whether # this parent wins over the current annotation ann_sub = annotations[lines_idx:lines_idx + match_len] par_sub = parent_annotations[parent_idx:parent_idx + match_len] if ann_sub == par_sub: continue for idx in range(match_len): ann = ann_sub[idx] par_ann = par_sub[idx] ann_idx = lines_idx + idx if ann == par_ann: # Nothing to change continue if ann == this_annotation: # Originally claimed 'this', but it was really in this # parent annotations[ann_idx] = par_ann continue # Resolve the fact that both sides have a different value for # last modified if ann == last_ann and par_ann == last_parent: annotations[ann_idx] = last_res else: new_ann = set(ann) new_ann.update(par_ann) new_ann = tuple(sorted(new_ann)) annotations[ann_idx] = new_ann last_ann = ann last_parent = par_ann last_res = new_ann def _record_annotation(self, key, parent_keys, annotations): self._annotations_cache[key] = annotations for parent_key in parent_keys: num = self._num_needed_children[parent_key] num -= 1 if num == 0: del self._text_cache[parent_key] del self._annotations_cache[parent_key] # Do we want to clean up _num_needed_children at this point as # well? self._num_needed_children[parent_key] = num def _annotate_one(self, key, text, num_lines): this_annotation = (key,) # Note: annotations will be mutated by calls to _update_from* annotations = [this_annotation] * num_lines parent_keys = self._parent_map[key] if parent_keys: self._update_from_first_parent(key, annotations, text, parent_keys[0]) for parent in parent_keys[1:]: self._update_from_other_parents(key, annotations, text, this_annotation, parent) self._record_annotation(key, parent_keys, annotations) def add_special_text(self, key, parent_keys, text): """Add a specific text to the graph. This is used to add a text which is not otherwise present in the versioned file. (eg. a WorkingTree injecting 'current:' into the graph to annotate the edited content.) :param key: The key to use to request this text be annotated :param parent_keys: The parents of this text :param text: A string containing the content of the text """ self._parent_map[key] = parent_keys self._text_cache[key] = osutils.split_lines(text) self._heads_provider = None def annotate(self, key): """Return annotated fulltext for the given key. :param key: A tuple defining the text to annotate :return: ([annotations], [lines]) annotations is a list of tuples of keys, one for each line in lines each key is a possible source for the given line. lines the text of "key" as a list of lines """ with ui.ui_factory.nested_progress_bar() as pb: for text_key, text, num_lines in self._get_needed_texts( key, pb=pb): self._annotate_one(text_key, text, num_lines) try: annotations = self._annotations_cache[key] except KeyError: raise errors.RevisionNotPresent(key, self._vf) return annotations, self._text_cache[key] def _get_heads_provider(self): if self._heads_provider is None: self._heads_provider = _mod_graph.KnownGraph(self._parent_map) return self._heads_provider def _resolve_annotation_tie(self, the_heads, line, tiebreaker): if tiebreaker is None: head = sorted(the_heads)[0] else: # Backwards compatibility, break up the heads into pairs and # resolve the result next_head = iter(the_heads) head = next(next_head) for possible_head in next_head: annotated_lines = ((head, line), (possible_head, line)) head = tiebreaker(annotated_lines)[0] return head def annotate_flat(self, key): """Determine the single-best-revision to source for each line. This is meant as a compatibility thunk to how annotate() used to work. :return: [(ann_key, line)] A list of tuples with a single annotation key for each line. """ custom_tiebreaker = annotate._break_annotation_tie annotations, lines = self.annotate(key) out = [] heads = self._get_heads_provider().heads append = out.append for annotation, line in zip(annotations, lines): if len(annotation) == 1: head = annotation[0] else: the_heads = heads(annotation) if len(the_heads) == 1: for head in the_heads: break # get the item out of the set else: head = self._resolve_annotation_tie(the_heads, line, custom_tiebreaker) append((head, line)) return out ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_annotator_pyx.pyx0000644000000000000000000002752400000000000016146 0ustar00# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # cython: language_level=3 """Functionality for doing annotations in the 'optimal' way""" cdef extern from "python-compat.h": pass from cpython.dict cimport ( PyDict_GetItem, PyDict_SetItem, ) from cpython.list cimport ( PyList_Append, PyList_CheckExact, PyList_GET_ITEM, PyList_GET_SIZE, PyList_SetItem, PyList_Sort, ) from cpython.object cimport ( Py_EQ, Py_LT, PyObject, PyObject_RichCompareBool, ) from cpython.ref cimport ( Py_INCREF, ) from cpython.tuple cimport ( PyTuple_CheckExact, PyTuple_GET_ITEM, PyTuple_GET_SIZE, PyTuple_New, PyTuple_SET_ITEM, ) cdef extern from "Python.h": ctypedef struct PyListObject: PyObject **ob_item void PyTuple_SET_ITEM_ptr "PyTuple_SET_ITEM" (object, Py_ssize_t, PyObject *) void Py_INCREF_ptr "Py_INCREF" (PyObject *) void Py_DECREF_ptr "Py_DECREF" (PyObject *) int PyObject_RichCompareBool_ptr "PyObject_RichCompareBool" ( PyObject *, PyObject *, int opid) from . import _annotator_py cdef int _check_annotations_are_lists(annotations, parent_annotations) except -1: if not PyList_CheckExact(annotations): raise TypeError('annotations must be a list') if not PyList_CheckExact(parent_annotations): raise TypeError('parent_annotations must be a list') return 0 cdef int _check_match_ranges(parent_annotations, annotations, Py_ssize_t parent_idx, Py_ssize_t lines_idx, Py_ssize_t match_len) except -1: if parent_idx + match_len > PyList_GET_SIZE(parent_annotations): raise ValueError('Match length exceeds len of' ' parent_annotations %s > %s' % (parent_idx + match_len, PyList_GET_SIZE(parent_annotations))) if lines_idx + match_len > PyList_GET_SIZE(annotations): raise ValueError('Match length exceeds len of' ' annotations %s > %s' % (lines_idx + match_len, PyList_GET_SIZE(annotations))) return 0 cdef PyObject *_next_tuple_entry(object tpl, Py_ssize_t *pos): # cannot_raise """Return the next entry from this tuple. :param tpl: The tuple we are investigating, *must* be a PyTuple :param pos: The last item we found. Will be updated to the new position. This cannot raise an exception, as it does no error checking. """ pos[0] = pos[0] + 1 if pos[0] >= PyTuple_GET_SIZE(tpl): return NULL return PyTuple_GET_ITEM(tpl, pos[0]) cdef object _combine_annotations(ann_one, ann_two, cache): """Combine the annotations from both sides.""" cdef Py_ssize_t pos_one, pos_two, len_one, len_two cdef Py_ssize_t out_pos cdef PyObject *temp cdef PyObject *left cdef PyObject *right if (PyObject_RichCompareBool(ann_one, ann_two, Py_LT)): cache_key = (ann_one, ann_two) else: cache_key = (ann_two, ann_one) temp = PyDict_GetItem(cache, cache_key) if temp != NULL: return temp if not PyTuple_CheckExact(ann_one) or not PyTuple_CheckExact(ann_two): raise TypeError('annotations must be tuples') # We know that annotations are tuples, and that both sides are already # sorted, so we can just walk and update a new list. pos_one = -1 pos_two = -1 out_pos = 0 left = _next_tuple_entry(ann_one, &pos_one) right = _next_tuple_entry(ann_two, &pos_two) new_ann = PyTuple_New(PyTuple_GET_SIZE(ann_one) + PyTuple_GET_SIZE(ann_two)) while left != NULL and right != NULL: # left == right is done by PyObject_RichCompareBool_ptr, however it # avoids a function call for a very common case. Drops 'time bzr # annotate NEWS' from 7.25s to 7.16s, so it *is* a visible impact. if (left == right or PyObject_RichCompareBool_ptr(left, right, Py_EQ)): # Identical values, step both Py_INCREF_ptr(left) PyTuple_SET_ITEM_ptr(new_ann, out_pos, left) left = _next_tuple_entry(ann_one, &pos_one) right = _next_tuple_entry(ann_two, &pos_two) elif (PyObject_RichCompareBool_ptr(left, right, Py_LT)): # left < right or right == NULL Py_INCREF_ptr(left) PyTuple_SET_ITEM_ptr(new_ann, out_pos, left) left = _next_tuple_entry(ann_one, &pos_one) else: # right < left or left == NULL Py_INCREF_ptr(right) PyTuple_SET_ITEM_ptr(new_ann, out_pos, right) right = _next_tuple_entry(ann_two, &pos_two) out_pos = out_pos + 1 while left != NULL: Py_INCREF_ptr(left) PyTuple_SET_ITEM_ptr(new_ann, out_pos, left) left = _next_tuple_entry(ann_one, &pos_one) out_pos = out_pos + 1 while right != NULL: Py_INCREF_ptr(right) PyTuple_SET_ITEM_ptr(new_ann, out_pos, right) right = _next_tuple_entry(ann_two, &pos_two) out_pos = out_pos + 1 if out_pos != PyTuple_GET_SIZE(new_ann): # Timing _PyTuple_Resize was not significantly faster that slicing # PyTuple_Resize((new_ann), out_pos) new_ann = new_ann[0:out_pos] PyDict_SetItem(cache, cache_key, new_ann) return new_ann cdef int _apply_parent_annotations(annotations, parent_annotations, matching_blocks) except -1: """Apply the annotations from parent_annotations into annotations. matching_blocks defines the ranges that match. """ cdef Py_ssize_t parent_idx, lines_idx, match_len, idx cdef PyListObject *par_list cdef PyListObject *ann_list cdef PyObject **par_temp cdef PyObject **ann_temp _check_annotations_are_lists(annotations, parent_annotations) par_list = parent_annotations ann_list = annotations # For NEWS and breezy/builtins.py, over 99% of the lines are simply copied # across from the parent entry. So this routine is heavily optimized for # that. Would be interesting if we could use memcpy() but we have to incref # and decref for parent_idx, lines_idx, match_len in matching_blocks: _check_match_ranges(parent_annotations, annotations, parent_idx, lines_idx, match_len) par_temp = par_list.ob_item + parent_idx ann_temp = ann_list.ob_item + lines_idx for idx from 0 <= idx < match_len: Py_INCREF_ptr(par_temp[idx]) Py_DECREF_ptr(ann_temp[idx]) ann_temp[idx] = par_temp[idx] return 0 cdef int _merge_annotations(this_annotation, annotations, parent_annotations, matching_blocks, ann_cache) except -1: cdef Py_ssize_t parent_idx, ann_idx, lines_idx, match_len, idx cdef Py_ssize_t pos cdef PyObject *ann_temp cdef PyObject *par_temp _check_annotations_are_lists(annotations, parent_annotations) last_ann = None last_parent = None last_res = None for parent_idx, lines_idx, match_len in matching_blocks: _check_match_ranges(parent_annotations, annotations, parent_idx, lines_idx, match_len) # For lines which match this parent, we will now resolve whether # this parent wins over the current annotation for idx from 0 <= idx < match_len: ann_idx = lines_idx + idx ann_temp = PyList_GET_ITEM(annotations, ann_idx) par_temp = PyList_GET_ITEM(parent_annotations, parent_idx + idx) if (ann_temp == par_temp): # This is parent, do nothing # Pointer comparison is fine here. Value comparison would # be ok, but it will be handled in the final if clause by # merging the two tuples into the same tuple # Avoiding the Py_INCREF and function call to # PyObject_RichCompareBool using pointer comparison drops # timing from 215ms => 125ms continue par_ann = par_temp ann = ann_temp if (ann is this_annotation): # Originally claimed 'this', but it was really in this # parent Py_INCREF(par_ann) PyList_SetItem(annotations, ann_idx, par_ann) continue # Resolve the fact that both sides have a different value for # last modified if (ann is last_ann and par_ann is last_parent): Py_INCREF(last_res) PyList_SetItem(annotations, ann_idx, last_res) else: new_ann = _combine_annotations(ann, par_ann, ann_cache) Py_INCREF(new_ann) PyList_SetItem(annotations, ann_idx, new_ann) last_ann = ann last_parent = par_ann last_res = new_ann return 0 class Annotator(_annotator_py.Annotator): """Class that drives performing annotations.""" def _update_from_first_parent(self, key, annotations, lines, parent_key): """Reannotate this text relative to its first parent.""" (parent_annotations, matching_blocks) = self._get_parent_annotations_and_matches( key, lines, parent_key) _apply_parent_annotations(annotations, parent_annotations, matching_blocks) def _update_from_other_parents(self, key, annotations, lines, this_annotation, parent_key): """Reannotate this text relative to a second (or more) parent.""" (parent_annotations, matching_blocks) = self._get_parent_annotations_and_matches( key, lines, parent_key) _merge_annotations(this_annotation, annotations, parent_annotations, matching_blocks, self._ann_tuple_cache) def annotate_flat(self, key): """Determine the single-best-revision to source for each line. This is meant as a compatibility thunk to how annotate() used to work. """ cdef Py_ssize_t pos, num_lines from . import annotate custom_tiebreaker = annotate._break_annotation_tie annotations, lines = self.annotate(key) num_lines = len(lines) out = [] heads = self._get_heads_provider().heads for pos from 0 <= pos < num_lines: annotation = annotations[pos] line = lines[pos] if len(annotation) == 1: head = annotation[0] else: the_heads = heads(annotation) if len(the_heads) == 1: for head in the_heads: break # get the item out of the set else: # We need to resolve the ambiguity, for now just pick the # sorted smallest head = self._resolve_annotation_tie(the_heads, line, custom_tiebreaker) PyList_Append(out, (head, line)) return out ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_chunks_to_lines_py.py0000644000000000000000000000445700000000000016750 0ustar00# Copyright (C) 2008 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """The python implementation of chunks_to_lines""" def chunks_to_lines(chunks): """Re-split chunks into simple lines. Each entry in the result should contain a single newline at the end. Except for the last entry which may not have a final newline. If chunks is already a simple list of lines, we return it directly. :param chunks: An list/tuple of strings. If chunks is already a list of lines, then we will return it as-is. :return: A list of strings. """ # Optimize for a very common case when chunks are already lines last_no_newline = False for chunk in chunks: if last_no_newline: # Only the last chunk is allowed to not have a trailing newline # Getting here means the last chunk didn't have a newline, and we # have a chunk following it break if not chunk: # Empty strings are never valid lines break elif b'\n' in chunk[:-1]: # This chunk has an extra '\n', so we will have to split it break elif chunk[-1:] != b'\n': # This chunk does not have a trailing newline last_no_newline = True else: # All of the lines (but possibly the last) have a single newline at the # end of the string. # For the last one, we allow it to not have a trailing newline, but it # is not allowed to be an empty string. return chunks # These aren't simple lines, just join and split again. from breezy import osutils return osutils._split_lines(b''.join(chunks)) ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_chunks_to_lines_pyx.pyx0000644000000000000000000001064700000000000017326 0ustar00# Copyright (C) 2008 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # cython: language_level=3 """Pyrex extensions for converting chunks to lines.""" cdef extern from "python-compat.h": pass from cpython.bytes cimport ( PyBytes_CheckExact, PyBytes_FromStringAndSize, PyBytes_AS_STRING, PyBytes_GET_SIZE, ) from cpython.list cimport ( PyList_Append, ) from libc.string cimport memchr def chunks_to_lines(chunks): """Re-split chunks into simple lines. Each entry in the result should contain a single newline at the end. Except for the last entry which may not have a final newline. If chunks is already a simple list of lines, we return it directly. :param chunks: An list/tuple of strings. If chunks is already a list of lines, then we will return it as-is. :return: A list of strings. """ cdef char *c_str cdef char *newline cdef char *c_last cdef Py_ssize_t the_len cdef int last_no_newline # Check to see if the chunks are already lines last_no_newline = 0 for chunk in chunks: if last_no_newline: # We have a chunk which followed a chunk without a newline, so this # is not a simple list of lines. break # Switching from PyBytes_AsStringAndSize to PyBytes_CheckExact and # then the macros GET_SIZE and AS_STRING saved us 40us / 470us. # It seems PyBytes_AsStringAndSize can actually trigger a conversion, # which we don't want anyway. if not PyBytes_CheckExact(chunk): raise TypeError('chunk is not a string') the_len = PyBytes_GET_SIZE(chunk) if the_len == 0: # An empty string is never a valid line break c_str = PyBytes_AS_STRING(chunk) c_last = c_str + the_len - 1 newline = memchr(c_str, c'\n', the_len) if newline != c_last: if newline == NULL: # Missing a newline. Only valid as the last line last_no_newline = 1 else: # There is a newline in the middle, we must resplit break else: # Everything was already a list of lines return chunks # We know we need to create a new list of lines lines = [] tail = None # Any remainder from the previous chunk for chunk in chunks: if tail is not None: chunk = tail + chunk tail = None if not PyBytes_CheckExact(chunk): raise TypeError('chunk is not a string') the_len = PyBytes_GET_SIZE(chunk) if the_len == 0: # An empty string is never a valid line, and we don't need to # append anything continue c_str = PyBytes_AS_STRING(chunk) c_last = c_str + the_len - 1 newline = memchr(c_str, c'\n', the_len) if newline == c_last: # A simple line PyList_Append(lines, chunk) elif newline == NULL: # A chunk without a newline, if this is the last entry, then we # allow it tail = chunk else: # We have a newline in the middle, loop until we've consumed all # lines while newline != NULL: line = PyBytes_FromStringAndSize(c_str, newline - c_str + 1) PyList_Append(lines, line) c_str = newline + 1 if c_str > c_last: # We are done break the_len = c_last - c_str + 1 newline = memchr(c_str, c'\n', the_len) if newline == NULL: tail = PyBytes_FromStringAndSize(c_str, the_len) break if tail is not None: PyList_Append(lines, tail) return lines ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_export_c_api.h0000644000000000000000000000645000000000000015317 0ustar00/* Copyright (C) 2009 Canonical Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* This file contains helper functions for exporting a C API for a CPython * extension module. */ #ifndef _EXPORT_C_API_H_ #define _EXPORT_C_API_H_ static const char *_C_API_NAME = "_C_API"; /** * Add a C function to the modules _C_API * This wraps the function in a PyCObject, and inserts that into a dict. * The key of the dict is the function name, and the description is the * signature of the function. * This is generally called during a modules init_MODULE function. * * @param module A Python module (the one being initialized) * @param funcname The name of the function being exported * @param func A pointer to the function * @param signature The C signature of the function * @return 0 if everything is successful, -1 if there is a problem. An * exception should also be set */ static int _export_function(PyObject *module, char *funcname, void *func, char *signature) { PyObject *d = NULL; PyObject *capsule = NULL; d = PyObject_GetAttrString(module, _C_API_NAME); if (!d) { PyErr_Clear(); d = PyDict_New(); if (!d) goto bad; Py_INCREF(d); if (PyModule_AddObject(module, _C_API_NAME, d) < 0) goto bad; } capsule = PyCapsule_New(func, signature, 0); if (!capsule) goto bad; if (PyDict_SetItemString(d, funcname, capsule) < 0) goto bad; Py_DECREF(d); return 0; bad: Py_XDECREF(capsule); Py_XDECREF(d); return -1; } /* Note: * It feels like more could be done here. Specifically, if you look at * _static_tuple_c.h you can see some boilerplate where we have: * #ifdef STATIC_TUPLE_MODULE // are we exporting or importing * static RETVAL FUNCNAME PROTO; * #else * static RETVAL (*FUNCNAME) PROTO; * #endif * * And then in _static_tuple_c.c we have * int setup_c_api() * { * _export_function(module, #FUNCNAME, FUNCNAME, #PROTO); * } * * And then in _static_tuple_c.h import_##MODULE * struct function_definition functions[] = { * {#FUNCNAME, (void **)&FUNCNAME, #RETVAL #PROTO}, * ... * {NULL}}; * * And some similar stuff for types. However, this would mean that we would * need a way for the C preprocessor to build up a list of definitions to be * generated, and then expand that list at the appropriate time. * I would guess there would be a way to do this, but probably not without a * lot of magic, and the end result probably wouldn't be very pretty to * maintain. Perhaps python's dynamic nature has left me jaded about writing * boilerplate.... */ #endif // _EXPORT_C_API_H_ ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_import_c_api.h0000644000000000000000000001320000000000000015277 0ustar00/* Copyright (C) 2009 Canonical Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _IMPORT_C_API_H_ #define _IMPORT_C_API_H_ /** * Helper functions to eliminate some of the boilerplate when importing a C API * from a CPython extension module. * * For more information see _export_c_api.h */ static const char *_C_API_NAME = "_C_API"; /** * Import a function from the _C_API_NAME dict that is part of module. * * @param module The Python module we are importing from * the attribute _C_API_NAME will be used as a dictionary * containing the function pointer we are looking for. * @param funcname Name of the function we want to import * @param func A pointer to the function handle where we will store the * function. * @param signature The C signature of the function. This is validated * against the signature stored in the C api, to make sure * there is no versioning skew. */ static int _import_function(PyObject *module, const char *funcname, void **func, const char *signature) { PyObject *d = NULL; PyObject *capsule = NULL; void *pointer; d = PyObject_GetAttrString(module, _C_API_NAME); if (!d) { // PyObject_GetAttrString sets an appropriate exception goto bad; } capsule = PyDict_GetItemString(d, funcname); if (!capsule) { // PyDict_GetItemString does not set an exception PyErr_Format(PyExc_AttributeError, "Module %s did not export a function named %s\n", PyModule_GetName(module), funcname); goto bad; } pointer = PyCapsule_GetPointer(capsule, signature); if (!pointer) { // PyCapsule_GetPointer sets an error with a little context goto bad; } *func = pointer; Py_DECREF(d); return 0; bad: Py_XDECREF(d); return -1; } /** * Get a pointer to an exported PyTypeObject. * * @param module The Python module we are importing from * @param class_name Attribute of the module that should reference the * Type object. Note that a PyTypeObject is the python * description of the type, not the raw C structure. * @return A Pointer to the requested type object. On error NULL will be * returned and an exception will be set. */ static PyTypeObject * _import_type(PyObject *module, const char *class_name) { PyObject *type = NULL; type = PyObject_GetAttrString(module, (char *)class_name); if (!type) { goto bad; } if (!PyType_Check(type)) { PyErr_Format(PyExc_TypeError, "%s.%s is not a type object", PyModule_GetName(module), class_name); goto bad; } return (PyTypeObject *)type; bad: Py_XDECREF(type); return NULL; } struct function_description { const char *name; void **pointer; const char *signature; }; struct type_description { const char *name; PyTypeObject **pointer; }; /** * Helper for importing several functions and types in a data-driven manner. * * @param module The name of the module we will be importing * @param functions A list of function_description objects, describing the * functions being imported. * The list should be terminated with {NULL} to indicate * there are no more functions to import. * @param types A list of type_description objects describing type * objects that we want to import. The list should be * terminated with {NULL} to indicate there are no more * types to import. * @return 0 on success, -1 on error and an exception should be set. */ static int _import_extension_module(const char *module_name, struct function_description *functions, struct type_description *types) { PyObject *module = NULL; struct function_description *cur_func; struct type_description *cur_type; int ret_code; module = PyImport_ImportModule((char *)module_name); if (!module) goto bad; if (functions != NULL) { cur_func = functions; while (cur_func->name != NULL) { ret_code = _import_function(module, cur_func->name, cur_func->pointer, cur_func->signature); if (ret_code < 0) goto bad; cur_func++; } } if (types != NULL) { PyTypeObject *type_p = NULL; cur_type = types; while (cur_type->name != NULL) { type_p = _import_type(module, cur_type->name); if (type_p == NULL) goto bad; *(cur_type->pointer) = type_p; cur_type++; } } Py_XDECREF(module); return 0; bad: Py_XDECREF(module); return -1; } #endif // _IMPORT_C_API_H_ ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_known_graph_py.py0000644000000000000000000003436500000000000016077 0ustar00# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Implementation of Graph algorithms when we have already loaded everything. """ try: from collections.abc import deque except ImportError: # python < 3.7 from collections import deque from . import ( errors, revision, ) class _KnownGraphNode(object): """Represents a single object in the known graph.""" __slots__ = ('key', 'parent_keys', 'child_keys', 'gdfo') def __init__(self, key, parent_keys): self.key = key self.parent_keys = parent_keys self.child_keys = [] # Greatest distance from origin self.gdfo = None def __repr__(self): return '%s(%s gdfo:%s par:%s child:%s)' % ( self.__class__.__name__, self.key, self.gdfo, self.parent_keys, self.child_keys) class _MergeSortNode(object): """Information about a specific node in the merge graph.""" __slots__ = ('key', 'merge_depth', 'revno', 'end_of_merge') def __init__(self, key, merge_depth, revno, end_of_merge): self.key = key self.merge_depth = merge_depth self.revno = revno self.end_of_merge = end_of_merge class KnownGraph(object): """This is a class which assumes we already know the full graph.""" def __init__(self, parent_map, do_cache=True): """Create a new KnownGraph instance. :param parent_map: A dictionary mapping key => parent_keys """ self._nodes = {} # Maps {frozenset(revision_id, revision_id): heads} self._known_heads = {} self.do_cache = do_cache self._initialize_nodes(parent_map) self._find_gdfo() def _initialize_nodes(self, parent_map): """Populate self._nodes. After this has finished: - self._nodes will have an entry for every entry in parent_map. - ghosts will have a parent_keys = None, - all nodes found will also have .child_keys populated with all known child_keys, """ nodes = self._nodes for key, parent_keys in parent_map.items(): if key in nodes: node = nodes[key] node.parent_keys = parent_keys else: node = _KnownGraphNode(key, parent_keys) nodes[key] = node for parent_key in parent_keys: try: parent_node = nodes[parent_key] except KeyError: parent_node = _KnownGraphNode(parent_key, None) nodes[parent_key] = parent_node parent_node.child_keys.append(key) def _find_tails(self): return [node for node in self._nodes.values() if not node.parent_keys] def _find_tips(self): return [node for node in self._nodes.values() if not node.child_keys] def _find_gdfo(self): nodes = self._nodes known_parent_gdfos = {} pending = [] for node in self._find_tails(): node.gdfo = 1 pending.append(node) while pending: node = pending.pop() for child_key in node.child_keys: child = nodes[child_key] if child_key in known_parent_gdfos: known_gdfo = known_parent_gdfos[child_key] + 1 present = True else: known_gdfo = 1 present = False if child.gdfo is None or node.gdfo + 1 > child.gdfo: child.gdfo = node.gdfo + 1 if known_gdfo == len(child.parent_keys): # We are the last parent updating that node, we can # continue from there pending.append(child) if present: del known_parent_gdfos[child_key] else: # Update known_parent_gdfos for a key we couldn't process known_parent_gdfos[child_key] = known_gdfo def add_node(self, key, parent_keys): """Add a new node to the graph. If this fills in a ghost, then the gdfos of all children will be updated accordingly. :param key: The node being added. If this is a duplicate, this is a no-op. :param parent_keys: The parents of the given node. :return: None (should we return if this was a ghost, etc?) """ nodes = self._nodes if key in nodes: node = nodes[key] if node.parent_keys is None: node.parent_keys = parent_keys # A ghost is being added, we can no-longer trust the heads # cache, so clear it self._known_heads.clear() else: # Make sure we compare a list to a list, as tuple != list. parent_keys = list(parent_keys) existing_parent_keys = list(node.parent_keys) if parent_keys == existing_parent_keys: return # Identical content else: raise ValueError( 'Parent key mismatch, existing node %s' ' has parents of %s not %s' % (key, existing_parent_keys, parent_keys)) else: node = _KnownGraphNode(key, parent_keys) nodes[key] = node parent_gdfo = 0 for parent_key in parent_keys: try: parent_node = nodes[parent_key] except KeyError: parent_node = _KnownGraphNode(parent_key, None) # Ghosts and roots have gdfo 1 parent_node.gdfo = 1 nodes[parent_key] = parent_node if parent_gdfo < parent_node.gdfo: parent_gdfo = parent_node.gdfo parent_node.child_keys.append(key) node.gdfo = parent_gdfo + 1 # Now fill the gdfo to all children # Note that this loop is slightly inefficient, in that we may visit the # same child (and its decendents) more than once, however, it is # 'efficient' in that we only walk to nodes that would be updated, # rather than all nodes # We use a deque rather than a simple list stack, to go for BFD rather # than DFD. So that if a longer path is possible, we walk it before we # get to the final child pending = deque([node]) while pending: node = pending.popleft() next_gdfo = node.gdfo + 1 for child_key in node.child_keys: child = nodes[child_key] if child.gdfo < next_gdfo: # This child is being updated, we need to check its # children child.gdfo = next_gdfo pending.append(child) def heads(self, keys): """Return the heads from amongst keys. This is done by searching the ancestries of each key. Any key that is reachable from another key is not returned; all the others are. This operation scales with the relative depth between any two keys. It uses gdfo to avoid walking all ancestry. :param keys: An iterable of keys. :return: A set of the heads. Note that as a set there is no ordering information. Callers will need to filter their input to create order if they need it. """ candidate_nodes = dict((key, self._nodes[key]) for key in keys) if revision.NULL_REVISION in candidate_nodes: # NULL_REVISION is only a head if it is the only entry candidate_nodes.pop(revision.NULL_REVISION) if not candidate_nodes: return frozenset([revision.NULL_REVISION]) if len(candidate_nodes) < 2: # No or only one candidate return frozenset(candidate_nodes) heads_key = frozenset(candidate_nodes) # Do we have a cached result ? try: heads = self._known_heads[heads_key] return heads except KeyError: pass # Let's compute the heads seen = set() pending = [] min_gdfo = None for node in candidate_nodes.values(): if node.parent_keys: pending.extend(node.parent_keys) if min_gdfo is None or node.gdfo < min_gdfo: min_gdfo = node.gdfo nodes = self._nodes while pending: node_key = pending.pop() if node_key in seen: # node already appears in some ancestry continue seen.add(node_key) node = nodes[node_key] if node.gdfo <= min_gdfo: continue if node.parent_keys: pending.extend(node.parent_keys) heads = heads_key.difference(seen) if self.do_cache: self._known_heads[heads_key] = heads return heads def topo_sort(self): """Return the nodes in topological order. All parents must occur before all children. """ for node in self._nodes.values(): if node.gdfo is None: raise errors.GraphCycleError(self._nodes) pending = self._find_tails() pending_pop = pending.pop pending_append = pending.append topo_order = [] topo_order_append = topo_order.append num_seen_parents = dict.fromkeys(self._nodes, 0) while pending: node = pending_pop() if node.parent_keys is not None: # We don't include ghost parents topo_order_append(node.key) for child_key in node.child_keys: child_node = self._nodes[child_key] seen_parents = num_seen_parents[child_key] + 1 if seen_parents == len(child_node.parent_keys): # All parents have been processed, enqueue this child pending_append(child_node) # This has been queued up, stop tracking it del num_seen_parents[child_key] else: num_seen_parents[child_key] = seen_parents # We started from the parents, so we don't need to do anymore work return topo_order def gc_sort(self): """Return a reverse topological ordering which is 'stable'. There are a few constraints: 1) Reverse topological (all children before all parents) 2) Grouped by prefix 3) 'stable' sorting, so that we get the same result, independent of machine, or extra data. To do this, we use the same basic algorithm as topo_sort, but when we aren't sure what node to access next, we sort them lexicographically. """ tips = self._find_tips() # Split the tips based on prefix prefix_tips = {} for node in tips: if node.key.__class__ is str or len(node.key) == 1: prefix = '' else: prefix = node.key[0] prefix_tips.setdefault(prefix, []).append(node) num_seen_children = dict.fromkeys(self._nodes, 0) result = [] for prefix in sorted(prefix_tips): pending = sorted(prefix_tips[prefix], key=lambda n: n.key, reverse=True) while pending: node = pending.pop() if node.parent_keys is None: # Ghost node, skip it continue result.append(node.key) for parent_key in sorted(node.parent_keys, reverse=True): parent_node = self._nodes[parent_key] seen_children = num_seen_children[parent_key] + 1 if seen_children == len(parent_node.child_keys): # All children have been processed, enqueue this parent pending.append(parent_node) # This has been queued up, stop tracking it del num_seen_children[parent_key] else: num_seen_children[parent_key] = seen_children return result def merge_sort(self, tip_key): """Compute the merge sorted graph output.""" from breezy import tsort as_parent_map = dict((node.key, node.parent_keys) for node in self._nodes.values() if node.parent_keys is not None) # We intentionally always generate revnos and never force the # mainline_revisions # Strip the sequence_number that merge_sort generates return [_MergeSortNode(key, merge_depth, revno, end_of_merge) for _, key, merge_depth, revno, end_of_merge in tsort.merge_sort(as_parent_map, tip_key, mainline_revisions=None, generate_revno=True)] def get_parent_keys(self, key): """Get the parents for a key Returns a list containg the parents keys. If the key is a ghost, None is returned. A KeyError will be raised if the key is not in the graph. :param keys: Key to check (eg revision_id) :return: A list of parents """ return self._nodes[key].parent_keys def get_child_keys(self, key): """Get the children for a key Returns a list containg the children keys. A KeyError will be raised if the key is not in the graph. :param keys: Key to check (eg revision_id) :return: A list of children """ return self._nodes[key].child_keys ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_known_graph_pyx.pyx0000644000000000000000000011166300000000000016454 0ustar00# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # cython: language_level=3 """Implementation of Graph algorithms when we have already loaded everything. """ cdef extern from "python-compat.h": pass from cpython.bytes cimport ( PyBytes_CheckExact, ) from cpython.dict cimport ( PyDict_CheckExact, PyDict_DelItem, PyDict_GetItem, PyDict_Next, PyDict_SetItem, PyDict_Size, ) from cpython.list cimport ( PyList_Append, PyList_CheckExact, PyList_GET_SIZE, PyList_GET_ITEM, PyList_SetItem, ) from cpython.object cimport ( Py_LT, PyObject, PyObject_RichCompareBool, ) from cpython.ref cimport ( Py_INCREF, ) from cpython.tuple cimport ( PyTuple_CheckExact, PyTuple_GET_SIZE, PyTuple_GET_ITEM, PyTuple_New, PyTuple_SET_ITEM, ) import collections import gc from . import errors, revision cdef object NULL_REVISION NULL_REVISION = revision.NULL_REVISION cdef class _KnownGraphNode: """Represents a single object in the known graph.""" cdef object key cdef object parents cdef object children cdef public long gdfo cdef int seen cdef object extra def __init__(self, key): self.key = key self.parents = None self.children = [] # Greatest distance from origin self.gdfo = -1 self.seen = 0 self.extra = None property child_keys: def __get__(self): cdef _KnownGraphNode child keys = [] for child in self.children: PyList_Append(keys, child.key) return keys property parent_keys: def __get__(self): if self.parents is None: return None cdef _KnownGraphNode parent keys = [] for parent in self.parents: PyList_Append(keys, parent.key) return keys cdef clear_references(self): self.parents = None self.children = None def __repr__(self): cdef _KnownGraphNode node parent_keys = [] if self.parents is not None: for node in self.parents: parent_keys.append(node.key) child_keys = [] if self.children is not None: for node in self.children: child_keys.append(node.key) return '%s(%s gdfo:%s par:%s child:%s)' % ( self.__class__.__name__, self.key, self.gdfo, parent_keys, child_keys) cdef _KnownGraphNode _get_list_node(lst, Py_ssize_t pos): cdef PyObject *temp_node temp_node = PyList_GET_ITEM(lst, pos) return <_KnownGraphNode>temp_node cdef _KnownGraphNode _get_tuple_node(tpl, Py_ssize_t pos): cdef PyObject *temp_node temp_node = PyTuple_GET_ITEM(tpl, pos) return <_KnownGraphNode>temp_node def get_key(node): cdef _KnownGraphNode real_node real_node = node return real_node.key cdef object _sort_list_nodes(object lst_or_tpl, int reverse): """Sort a list of _KnownGraphNode objects. If lst_or_tpl is a list, it is allowed to mutate in place. It may also just return the input list if everything is already sorted. """ cdef _KnownGraphNode node1, node2 cdef int do_swap, is_tuple cdef Py_ssize_t length is_tuple = PyTuple_CheckExact(lst_or_tpl) if not (is_tuple or PyList_CheckExact(lst_or_tpl)): raise TypeError('lst_or_tpl must be a list or tuple.') length = len(lst_or_tpl) if length == 0 or length == 1: return lst_or_tpl if length == 2: if is_tuple: node1 = _get_tuple_node(lst_or_tpl, 0) node2 = _get_tuple_node(lst_or_tpl, 1) else: node1 = _get_list_node(lst_or_tpl, 0) node2 = _get_list_node(lst_or_tpl, 1) if reverse: do_swap = PyObject_RichCompareBool(node1.key, node2.key, Py_LT) else: do_swap = PyObject_RichCompareBool(node2.key, node1.key, Py_LT) if not do_swap: return lst_or_tpl if is_tuple: return (node2, node1) else: # Swap 'in-place', since lists are mutable Py_INCREF(node1) PyList_SetItem(lst_or_tpl, 1, node1) Py_INCREF(node2) PyList_SetItem(lst_or_tpl, 0, node2) return lst_or_tpl # For all other sizes, we just use 'sorted()' if is_tuple: # Note that sorted() is just list(iterable).sort() lst_or_tpl = list(lst_or_tpl) lst_or_tpl.sort(key=get_key, reverse=reverse) return lst_or_tpl cdef class _MergeSorter cdef class KnownGraph: """This is a class which assumes we already know the full graph.""" cdef public object _nodes cdef public object _known_heads cdef public int do_cache def __init__(self, parent_map, do_cache=True): """Create a new KnownGraph instance. :param parent_map: A dictionary mapping key => parent_keys """ # tests at pre-allocating the node dict actually slowed things down self._nodes = {} # Maps {sorted(revision_id, revision_id): heads} self._known_heads = {} self.do_cache = int(do_cache) # TODO: consider disabling gc since we are allocating a lot of nodes # that won't be collectable anyway. real world testing has not # shown a specific impact, yet. self._initialize_nodes(parent_map) self._find_gdfo() def __dealloc__(self): cdef _KnownGraphNode child cdef Py_ssize_t pos cdef PyObject *temp_node while PyDict_Next(self._nodes, &pos, NULL, &temp_node): child = <_KnownGraphNode>temp_node child.clear_references() cdef _KnownGraphNode _get_or_create_node(self, key): cdef PyObject *temp_node cdef _KnownGraphNode node temp_node = PyDict_GetItem(self._nodes, key) if temp_node == NULL: node = _KnownGraphNode(key) PyDict_SetItem(self._nodes, key, node) else: node = <_KnownGraphNode>temp_node return node cdef _populate_parents(self, _KnownGraphNode node, parent_keys): cdef Py_ssize_t num_parent_keys, pos cdef _KnownGraphNode parent_node num_parent_keys = len(parent_keys) # We know how many parents, so we pre allocate the tuple parent_nodes = PyTuple_New(num_parent_keys) for pos from 0 <= pos < num_parent_keys: # Note: it costs us 10ms out of 40ms to lookup all of these # parents, it doesn't seem to be an allocation overhead, # but rather a lookup overhead. There doesn't seem to be # a way around it, and that is one reason why # KnownGraphNode maintains a direct pointer to the parent # node. # We use [] because parent_keys may be a tuple or list parent_node = self._get_or_create_node(parent_keys[pos]) # PyTuple_SET_ITEM will steal a reference, so INCREF first Py_INCREF(parent_node) PyTuple_SET_ITEM(parent_nodes, pos, parent_node) PyList_Append(parent_node.children, node) node.parents = parent_nodes def _initialize_nodes(self, parent_map): """Populate self._nodes. After this has finished: - self._nodes will have an entry for every entry in parent_map. - ghosts will have a parent_keys = None, - all nodes found will also have child_keys populated with all known child keys, """ cdef PyObject *temp_key cdef PyObject *temp_parent_keys cdef PyObject *temp_node cdef Py_ssize_t pos cdef _KnownGraphNode node cdef _KnownGraphNode parent_node if not PyDict_CheckExact(parent_map): raise TypeError('parent_map should be a dict of {key:parent_keys}') # for key, parent_keys in parent_map.iteritems(): pos = 0 while PyDict_Next(parent_map, &pos, &temp_key, &temp_parent_keys): key = temp_key parent_keys = temp_parent_keys node = self._get_or_create_node(key) self._populate_parents(node, parent_keys) def _find_tails(self): cdef PyObject *temp_node cdef _KnownGraphNode node cdef Py_ssize_t pos tails = [] pos = 0 while PyDict_Next(self._nodes, &pos, NULL, &temp_node): node = <_KnownGraphNode>temp_node if node.parents is None or PyTuple_GET_SIZE(node.parents) == 0: node.gdfo = 1 PyList_Append(tails, node) return tails def _find_tips(self): cdef PyObject *temp_node cdef _KnownGraphNode node cdef Py_ssize_t pos tips = [] pos = 0 while PyDict_Next(self._nodes, &pos, NULL, &temp_node): node = <_KnownGraphNode>temp_node if PyList_GET_SIZE(node.children) == 0: PyList_Append(tips, node) return tips def _find_gdfo(self): cdef _KnownGraphNode node cdef _KnownGraphNode child cdef PyObject *temp cdef Py_ssize_t pos cdef int replace cdef Py_ssize_t last_item cdef long next_gdfo pending = self._find_tails() last_item = PyList_GET_SIZE(pending) - 1 while last_item >= 0: # Avoid pop followed by push, instead, peek, and replace # timing shows this is 930ms => 770ms for OOo node = _get_list_node(pending, last_item) last_item = last_item - 1 next_gdfo = node.gdfo + 1 for pos from 0 <= pos < PyList_GET_SIZE(node.children): child = _get_list_node(node.children, pos) if next_gdfo > child.gdfo: child.gdfo = next_gdfo child.seen = child.seen + 1 if child.seen == PyTuple_GET_SIZE(child.parents): # This child is populated, queue it to be walked last_item = last_item + 1 if last_item < PyList_GET_SIZE(pending): Py_INCREF(child) # SetItem steals a ref PyList_SetItem(pending, last_item, child) else: PyList_Append(pending, child) # We have queued this node, we don't need to track it # anymore child.seen = 0 def add_node(self, key, parent_keys): """Add a new node to the graph. If this fills in a ghost, then the gdfos of all children will be updated accordingly. :param key: The node being added. If this is a duplicate, this is a no-op. :param parent_keys: The parents of the given node. :return: None (should we return if this was a ghost, etc?) """ cdef PyObject *maybe_node cdef _KnownGraphNode node, parent_node, child_node cdef long parent_gdfo, next_gdfo maybe_node = PyDict_GetItem(self._nodes, key) if maybe_node != NULL: node = <_KnownGraphNode>maybe_node if node.parents is None: # We are filling in a ghost self._populate_parents(node, parent_keys) # We can't trust cached heads anymore self._known_heads.clear() else: # Ensure that the parent_key list matches existing_parent_keys = [] for parent_node in node.parents: existing_parent_keys.append(parent_node.key) # Make sure we use a list for the comparison, in case it was a # tuple, etc parent_keys = list(parent_keys) if existing_parent_keys == parent_keys: # Exact match, nothing more to do return else: raise ValueError('Parent key mismatch, existing node %s' ' has parents of %s not %s' % (key, existing_parent_keys, parent_keys)) else: node = _KnownGraphNode(key) PyDict_SetItem(self._nodes, key, node) self._populate_parents(node, parent_keys) parent_gdfo = 0 for parent_node in node.parents: if parent_node.gdfo == -1: # This is a newly introduced ghost, so it gets gdfo of 1 parent_node.gdfo = 1 if parent_gdfo < parent_node.gdfo: parent_gdfo = parent_node.gdfo node.gdfo = parent_gdfo + 1 # Now fill the gdfo to all children # Note that this loop is slightly inefficient, in that we may visit the # same child (and its decendents) more than once, however, it is # 'efficient' in that we only walk to nodes that would be updated, # rather than all nodes # We use a deque rather than a simple list stack, to go for BFD rather # than DFD. So that if a longer path is possible, we walk it before we # get to the final child pending = collections.deque([node]) pending_popleft = pending.popleft pending_append = pending.append while pending: node = pending_popleft() next_gdfo = node.gdfo + 1 for child_node in node.children: if child_node.gdfo < next_gdfo: # This child is being updated, we need to check its # children child_node.gdfo = next_gdfo pending_append(child_node) def heads(self, keys): """Return the heads from amongst keys. This is done by searching the ancestries of each key. Any key that is reachable from another key is not returned; all the others are. This operation scales with the relative depth between any two keys. It uses gdfo to avoid walking all ancestry. :param keys: An iterable of keys. :return: A set of the heads. Note that as a set there is no ordering information. Callers will need to filter their input to create order if they need it. """ cdef PyObject *maybe_node cdef PyObject *maybe_heads cdef PyObject *temp_node cdef _KnownGraphNode node cdef Py_ssize_t pos, last_item cdef long min_gdfo heads_key = frozenset(keys) maybe_heads = PyDict_GetItem(self._known_heads, heads_key) if maybe_heads != NULL: return maybe_heads # Not cached, compute it ourselves candidate_nodes = {} for key in keys: maybe_node = PyDict_GetItem(self._nodes, key) if maybe_node == NULL: raise KeyError('key %s not in nodes' % (key,)) PyDict_SetItem(candidate_nodes, key, maybe_node) maybe_node = PyDict_GetItem(candidate_nodes, NULL_REVISION) if maybe_node != NULL: # NULL_REVISION is only a head if it is the only entry candidate_nodes.pop(NULL_REVISION) if not candidate_nodes: return frozenset([NULL_REVISION]) # The keys changed, so recalculate heads_key heads_key = frozenset(candidate_nodes) if PyDict_Size(candidate_nodes) < 2: return heads_key cleanup = [] pending = [] # we know a gdfo cannot be longer than a linear chain of all nodes min_gdfo = PyDict_Size(self._nodes) + 1 # Build up nodes that need to be walked, note that starting nodes are # not added to seen() pos = 0 while PyDict_Next(candidate_nodes, &pos, NULL, &temp_node): node = <_KnownGraphNode>temp_node if node.parents is not None: pending.extend(node.parents) if node.gdfo < min_gdfo: min_gdfo = node.gdfo # Now do all the real work last_item = PyList_GET_SIZE(pending) - 1 while last_item >= 0: node = _get_list_node(pending, last_item) last_item = last_item - 1 if node.seen: # node already appears in some ancestry continue PyList_Append(cleanup, node) node.seen = 1 if node.gdfo <= min_gdfo: continue if node.parents is not None and PyTuple_GET_SIZE(node.parents) > 0: for pos from 0 <= pos < PyTuple_GET_SIZE(node.parents): parent_node = _get_tuple_node(node.parents, pos) last_item = last_item + 1 if last_item < PyList_GET_SIZE(pending): Py_INCREF(parent_node) # SetItem steals a ref PyList_SetItem(pending, last_item, parent_node) else: PyList_Append(pending, parent_node) heads = [] pos = 0 while PyDict_Next(candidate_nodes, &pos, NULL, &temp_node): node = <_KnownGraphNode>temp_node if not node.seen: PyList_Append(heads, node.key) heads = frozenset(heads) for pos from 0 <= pos < PyList_GET_SIZE(cleanup): node = _get_list_node(cleanup, pos) node.seen = 0 if self.do_cache: PyDict_SetItem(self._known_heads, heads_key, heads) return heads def topo_sort(self): """Return the nodes in topological order. All parents must occur before all children. """ # This is, for the most part, the same iteration order that we used for # _find_gdfo, consider finding a way to remove the duplication # In general, we find the 'tails' (nodes with no parents), and then # walk to the children. For children that have all of their parents # yielded, we queue up the child to be yielded as well. cdef _KnownGraphNode node cdef _KnownGraphNode child cdef PyObject *temp cdef Py_ssize_t pos cdef int replace cdef Py_ssize_t last_item pending = self._find_tails() if PyList_GET_SIZE(pending) == 0 and len(self._nodes) > 0: raise errors.GraphCycleError(self._nodes) topo_order = [] last_item = PyList_GET_SIZE(pending) - 1 while last_item >= 0: # Avoid pop followed by push, instead, peek, and replace # timing shows this is 930ms => 770ms for OOo node = _get_list_node(pending, last_item) last_item = last_item - 1 if node.parents is not None: # We don't include ghost parents PyList_Append(topo_order, node.key) for pos from 0 <= pos < PyList_GET_SIZE(node.children): child = _get_list_node(node.children, pos) if child.gdfo == -1: # We know we have a graph cycle because a node has a parent # which we couldn't find raise errors.GraphCycleError(self._nodes) child.seen = child.seen + 1 if child.seen == PyTuple_GET_SIZE(child.parents): # All parents of this child have been yielded, queue this # one to be yielded as well last_item = last_item + 1 if last_item < PyList_GET_SIZE(pending): Py_INCREF(child) # SetItem steals a ref PyList_SetItem(pending, last_item, child) else: PyList_Append(pending, child) # We have queued this node, we don't need to track it # anymore child.seen = 0 # We started from the parents, so we don't need to do anymore work return topo_order def gc_sort(self): """Return a reverse topological ordering which is 'stable'. There are a few constraints: 1) Reverse topological (all children before all parents) 2) Grouped by prefix 3) 'stable' sorting, so that we get the same result, independent of machine, or extra data. To do this, we use the same basic algorithm as topo_sort, but when we aren't sure what node to access next, we sort them lexicographically. """ cdef PyObject *temp cdef Py_ssize_t pos, last_item cdef _KnownGraphNode node, node2, parent_node tips = self._find_tips() # Split the tips based on prefix prefix_tips = {} for pos from 0 <= pos < PyList_GET_SIZE(tips): node = _get_list_node(tips, pos) if PyBytes_CheckExact(node.key) or len(node.key) == 1: prefix = '' else: prefix = node.key[0] temp = PyDict_GetItem(prefix_tips, prefix) if temp == NULL: prefix_tips[prefix] = [node] else: tip_nodes = temp PyList_Append(tip_nodes, node) result = [] for prefix in sorted(prefix_tips): temp = PyDict_GetItem(prefix_tips, prefix) assert temp != NULL tip_nodes = temp pending = _sort_list_nodes(tip_nodes, 1) last_item = PyList_GET_SIZE(pending) - 1 while last_item >= 0: node = _get_list_node(pending, last_item) last_item = last_item - 1 if node.parents is None: # Ghost continue PyList_Append(result, node.key) # Sorting the parent keys isn't strictly necessary for stable # sorting of a given graph. But it does help minimize the # differences between graphs # For bzr.dev ancestry: # 4.73ms no sort # 7.73ms RichCompareBool sort parents = _sort_list_nodes(node.parents, 1) for pos from 0 <= pos < len(parents): if PyTuple_CheckExact(parents): parent_node = _get_tuple_node(parents, pos) else: parent_node = _get_list_node(parents, pos) # TODO: GraphCycle detection parent_node.seen = parent_node.seen + 1 if (parent_node.seen == PyList_GET_SIZE(parent_node.children)): # All children have been processed, queue up this # parent last_item = last_item + 1 if last_item < PyList_GET_SIZE(pending): Py_INCREF(parent_node) # SetItem steals a ref PyList_SetItem(pending, last_item, parent_node) else: PyList_Append(pending, parent_node) parent_node.seen = 0 return result def merge_sort(self, tip_key): """Compute the merge sorted graph output.""" cdef _MergeSorter sorter # TODO: consider disabling gc since we are allocating a lot of nodes # that won't be collectable anyway. real world testing has not # shown a specific impact, yet. sorter = _MergeSorter(self, tip_key) return sorter.topo_order() def get_parent_keys(self, key): """Get the parents for a key Returns a list containing the parents keys. If the key is a ghost, None is returned. A KeyError will be raised if the key is not in the graph. :param keys: Key to check (eg revision_id) :return: A list of parents """ return self._nodes[key].parent_keys def get_child_keys(self, key): """Get the children for a key Returns a list containing the children keys. A KeyError will be raised if the key is not in the graph. :param keys: Key to check (eg revision_id) :return: A list of children """ return self._nodes[key].child_keys cdef class _MergeSortNode: """Tracks information about a node during the merge_sort operation.""" # Public api cdef public object key cdef public long merge_depth cdef public object end_of_merge # True/False Is this the end of the current merge # Private api, used while computing the information cdef _KnownGraphNode left_parent cdef _KnownGraphNode left_pending_parent cdef object pending_parents # list of _KnownGraphNode for non-left parents cdef long _revno_first cdef long _revno_second cdef long _revno_last # TODO: turn these into flag/bit fields rather than individual members cdef int is_first_child # Is this the first child? cdef int seen_by_child # A child node has seen this parent cdef int completed # Fully Processed def __init__(self, key): self.key = key self.merge_depth = -1 self.left_parent = None self.left_pending_parent = None self.pending_parents = None self._revno_first = -1 self._revno_second = -1 self._revno_last = -1 self.is_first_child = 0 self.seen_by_child = 0 self.completed = 0 def __repr__(self): return '%s(%s depth:%s rev:%s,%s,%s first:%s seen:%s)' % ( self.__class__.__name__, self.key, self.merge_depth, self._revno_first, self._revno_second, self._revno_last, self.is_first_child, self.seen_by_child) cdef int has_pending_parents(self): # cannot_raise if self.left_pending_parent is not None or self.pending_parents: return 1 return 0 cdef object _revno(self): if self._revno_first == -1: if self._revno_second != -1: raise RuntimeError('Something wrong with: %s' % (self,)) return (self._revno_last,) else: return (self._revno_first, self._revno_second, self._revno_last) property revno: def __get__(self): return self._revno() cdef class _MergeSorter: """This class does the work of computing the merge_sort ordering. We have some small advantages, in that we get all the extra information that KnownGraph knows, like knowing the child lists, etc. """ # Current performance numbers for merge_sort(bzr_dev_parent_map): # 302ms tsort.merge_sort() # 91ms graph.KnownGraph().merge_sort() # 40ms kg.merge_sort() cdef KnownGraph graph cdef object _depth_first_stack # list cdef Py_ssize_t _last_stack_item # offset to last item on stack # cdef object _ms_nodes # dict of key => _MergeSortNode cdef object _revno_to_branch_count # {revno => num child branches} cdef object _scheduled_nodes # List of nodes ready to be yielded def __init__(self, known_graph, tip_key): cdef _KnownGraphNode node self.graph = known_graph # self._ms_nodes = {} self._revno_to_branch_count = {} self._depth_first_stack = [] self._last_stack_item = -1 self._scheduled_nodes = [] if (tip_key is not None and tip_key != NULL_REVISION and tip_key != (NULL_REVISION,)): node = self.graph._nodes[tip_key] self._push_node(node, 0) cdef _MergeSortNode _get_ms_node(self, _KnownGraphNode node): cdef PyObject *temp_node cdef _MergeSortNode ms_node if node.extra is None: ms_node = _MergeSortNode(node.key) node.extra = ms_node else: ms_node = <_MergeSortNode>node.extra return ms_node cdef _push_node(self, _KnownGraphNode node, long merge_depth): cdef _KnownGraphNode parent_node cdef _MergeSortNode ms_node, ms_parent_node cdef Py_ssize_t pos ms_node = self._get_ms_node(node) ms_node.merge_depth = merge_depth if node.parents is None: raise RuntimeError('ghost nodes should not be pushed' ' onto the stack: %s' % (node,)) if PyTuple_GET_SIZE(node.parents) > 0: parent_node = _get_tuple_node(node.parents, 0) ms_node.left_parent = parent_node if parent_node.parents is None: # left-hand ghost ms_node.left_pending_parent = None ms_node.left_parent = None else: ms_node.left_pending_parent = parent_node if PyTuple_GET_SIZE(node.parents) > 1: ms_node.pending_parents = [] for pos from 1 <= pos < PyTuple_GET_SIZE(node.parents): parent_node = _get_tuple_node(node.parents, pos) if parent_node.parents is None: # ghost continue PyList_Append(ms_node.pending_parents, parent_node) ms_node.is_first_child = 1 if ms_node.left_parent is not None: ms_parent_node = self._get_ms_node(ms_node.left_parent) if ms_parent_node.seen_by_child: ms_node.is_first_child = 0 ms_parent_node.seen_by_child = 1 self._last_stack_item = self._last_stack_item + 1 if self._last_stack_item < PyList_GET_SIZE(self._depth_first_stack): Py_INCREF(node) # SetItem steals a ref PyList_SetItem(self._depth_first_stack, self._last_stack_item, node) else: PyList_Append(self._depth_first_stack, node) cdef _pop_node(self): cdef PyObject *temp cdef _MergeSortNode ms_node, ms_parent_node, ms_prev_node cdef _KnownGraphNode node, parent_node, prev_node node = _get_list_node(self._depth_first_stack, self._last_stack_item) ms_node = <_MergeSortNode>node.extra self._last_stack_item = self._last_stack_item - 1 if ms_node.left_parent is not None: # Assign the revision number from the left-hand parent ms_parent_node = <_MergeSortNode>ms_node.left_parent.extra if ms_node.is_first_child: # First child just increments the final digit ms_node._revno_first = ms_parent_node._revno_first ms_node._revno_second = ms_parent_node._revno_second ms_node._revno_last = ms_parent_node._revno_last + 1 else: # Not the first child, make a new branch # (mainline_revno, branch_count, 1) if ms_parent_node._revno_first == -1: # Mainline ancestor, the increment is on the last digit base_revno = ms_parent_node._revno_last else: base_revno = ms_parent_node._revno_first temp = PyDict_GetItem(self._revno_to_branch_count, base_revno) if temp == NULL: branch_count = 1 else: branch_count = (temp) + 1 PyDict_SetItem(self._revno_to_branch_count, base_revno, branch_count) ms_node._revno_first = base_revno ms_node._revno_second = branch_count ms_node._revno_last = 1 else: temp = PyDict_GetItem(self._revno_to_branch_count, 0) if temp == NULL: # The first root node doesn't have a 3-digit revno root_count = 0 ms_node._revno_first = -1 ms_node._revno_second = -1 ms_node._revno_last = 1 else: root_count = (temp) + 1 ms_node._revno_first = 0 ms_node._revno_second = root_count ms_node._revno_last = 1 PyDict_SetItem(self._revno_to_branch_count, 0, root_count) ms_node.completed = 1 if PyList_GET_SIZE(self._scheduled_nodes) == 0: # The first scheduled node is always the end of merge ms_node.end_of_merge = True else: prev_node = _get_list_node(self._scheduled_nodes, PyList_GET_SIZE(self._scheduled_nodes) - 1) ms_prev_node = <_MergeSortNode>prev_node.extra if ms_prev_node.merge_depth < ms_node.merge_depth: # The previously pushed node is to our left, so this is the end # of this right-hand chain ms_node.end_of_merge = True elif (ms_prev_node.merge_depth == ms_node.merge_depth and prev_node not in node.parents): # The next node is not a direct parent of this node ms_node.end_of_merge = True else: ms_node.end_of_merge = False PyList_Append(self._scheduled_nodes, node) cdef _schedule_stack(self): cdef _KnownGraphNode last_node, next_node cdef _MergeSortNode ms_node, ms_last_node, ms_next_node cdef long next_merge_depth ordered = [] while self._last_stack_item >= 0: # Peek at the last item on the stack last_node = _get_list_node(self._depth_first_stack, self._last_stack_item) if last_node.gdfo == -1: # if _find_gdfo skipped a node, that means there is a graph # cycle, error out now raise errors.GraphCycleError(self.graph._nodes) ms_last_node = <_MergeSortNode>last_node.extra if not ms_last_node.has_pending_parents(): # Processed all parents, pop this node self._pop_node() continue while ms_last_node.has_pending_parents(): if ms_last_node.left_pending_parent is not None: # recurse depth first into the primary parent next_node = ms_last_node.left_pending_parent ms_last_node.left_pending_parent = None else: # place any merges in right-to-left order for scheduling # which gives us left-to-right order after we reverse # the scheduled queue. # Note: This has the effect of allocating common-new # revisions to the right-most subtree rather than the # left most, which will display nicely (you get # smaller trees at the top of the combined merge). next_node = ms_last_node.pending_parents.pop() ms_next_node = self._get_ms_node(next_node) if ms_next_node.completed: # this parent was completed by a child on the # call stack. skip it. continue # otherwise transfer it from the source graph into the # top of the current depth first search stack. if next_node is ms_last_node.left_parent: next_merge_depth = ms_last_node.merge_depth else: next_merge_depth = ms_last_node.merge_depth + 1 self._push_node(next_node, next_merge_depth) # and do not continue processing parents until this 'call' # has recursed. break cdef topo_order(self): cdef _MergeSortNode ms_node cdef _KnownGraphNode node cdef Py_ssize_t pos cdef PyObject *temp_key cdef PyObject *temp_node # Note: allocating a _MergeSortNode and deallocating it for all nodes # costs approx 8.52ms (21%) of the total runtime # We might consider moving the attributes into the base # KnownGraph object. self._schedule_stack() # We've set up the basic schedule, now we can continue processing the # output. # Note: This final loop costs us 40.0ms => 28.8ms (11ms, 25%) on # bzr.dev, to convert the internal Object representation into a # Tuple representation... # 2ms is walking the data and computing revno tuples # 7ms is computing the return tuple # 4ms is PyList_Append() ordered = [] # output the result in reverse order, and separate the generated info for pos from PyList_GET_SIZE(self._scheduled_nodes) > pos >= 0: node = _get_list_node(self._scheduled_nodes, pos) ms_node = <_MergeSortNode>node.extra PyList_Append(ordered, ms_node) node.extra = None # Clear out the scheduled nodes now that we're done self._scheduled_nodes = [] return ordered ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_readdir_py.py0000644000000000000000000000274700000000000015173 0ustar00# Copyright (C) 2006, 2008 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Python implementation of readdir interface.""" import stat _directory = 'directory' _chardev = 'chardev' _block = 'block' _file = 'file' _fifo = 'fifo' _symlink = 'symlink' _socket = 'socket' _unknown = 'unknown' _formats = { stat.S_IFDIR: 'directory', stat.S_IFCHR: 'chardev', stat.S_IFBLK: 'block', stat.S_IFREG: 'file', stat.S_IFIFO: 'fifo', stat.S_IFLNK: 'symlink', stat.S_IFSOCK: 'socket', } def _kind_from_mode(stat_mode, _formats=_formats, _unknown='unknown'): """Generate a file kind from a stat mode. This is used in walkdirs. It's performance is critical: Do not mutate without careful benchmarking. """ try: return _formats[stat_mode & 0o170000] except KeyError: return _unknown ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_readdir_pyx.pyx0000644000000000000000000003005300000000000015542 0ustar00# Copyright (C) 2006, 2008, 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # cython: language_level=3 """Wrapper for readdir which returns files ordered by inode.""" import os import sys cdef extern from "python-compat.h": pass cdef extern from 'errno.h': int ENOENT int ENOTDIR int EAGAIN int EINTR char *strerror(int errno) # not necessarily a real variable, but this should be close enough int errno cdef extern from 'unistd.h': int chdir(char *path) int close(int fd) int fchdir(int fd) char *getcwd(char *, int size) cdef extern from 'stdlib.h': void *malloc(int) void free(void *) cdef extern from 'sys/types.h': ctypedef long ssize_t ctypedef unsigned long size_t ctypedef long time_t ctypedef unsigned long ino_t ctypedef unsigned long long off_t ctypedef int mode_t cdef extern from 'sys/stat.h': cdef struct stat: int st_mode off_t st_size int st_dev ino_t st_ino int st_mtime int st_ctime int lstat(char *path, stat *buf) int S_ISDIR(int mode) int S_ISCHR(int mode) int S_ISBLK(int mode) int S_ISREG(int mode) int S_ISFIFO(int mode) int S_ISLNK(int mode) int S_ISSOCK(int mode) cdef extern from 'fcntl.h': int O_RDONLY int open(char *pathname, int flags, mode_t mode) cdef extern from 'Python.h': int PyErr_CheckSignals() except -1 char * PyBytes_AS_STRING(object) ctypedef struct PyObject: pass Py_ssize_t PyBytes_Size(object s) object PyList_GetItem(object lst, Py_ssize_t index) void *PyList_GetItem_object_void "PyList_GET_ITEM" (object lst, int index) int PyList_Append(object lst, object item) except -1 void *PyTuple_GetItem_void_void "PyTuple_GET_ITEM" (void* tpl, int index) int PyTuple_SetItem(void *, Py_ssize_t pos, object item) except -1 int PyTuple_SetItem_obj "PyTuple_SetItem" (void *, Py_ssize_t pos, PyObject * item) except -1 void Py_INCREF(object o) void Py_DECREF(object o) void PyBytes_Concat(PyObject **string, object newpart) cdef extern from 'dirent.h': ctypedef struct dirent: char d_name[256] ino_t d_ino # the opaque C library DIR type. ctypedef struct DIR # should be DIR *, pyrex barfs. DIR * opendir(char * name) int closedir(DIR * dir) dirent *readdir(DIR *dir) cdef object _directory _directory = 'directory' cdef object _chardev _chardev = 'chardev' cdef object _block _block = 'block' cdef object _file _file = 'file' cdef object _fifo _fifo = 'fifo' cdef object _symlink _symlink = 'symlink' cdef object _socket _socket = 'socket' cdef object _unknown _unknown = 'unknown' # add a typedef struct dirent dirent to workaround pyrex cdef extern from 'readdir.h': pass cdef class _Stat: """Represent a 'stat' result.""" cdef stat _st property st_dev: def __get__(self): return self._st.st_dev property st_ino: def __get__(self): return self._st.st_ino property st_mode: def __get__(self): return self._st.st_mode property st_ctime: def __get__(self): return self._st.st_ctime property st_mtime: def __get__(self): return self._st.st_mtime property st_size: def __get__(self): return self._st.st_size def __repr__(self): """Repr is the same as a Stat object. (mode, ino, dev, nlink, uid, gid, size, None(atime), mtime, ctime) """ return repr((self.st_mode, 0, 0, 0, 0, 0, self.st_size, None, self.st_mtime, self.st_ctime)) from . import osutils cdef object _safe_utf8 _safe_utf8 = osutils.safe_utf8 cdef class UTF8DirReader: """A dir reader for utf8 file systems.""" def kind_from_mode(self, int mode): """Get the kind of a path from a mode status.""" return self._kind_from_mode(mode) cdef _kind_from_mode(self, int mode): # Files and directories are the most common - check them first. if S_ISREG(mode): return _file if S_ISDIR(mode): return _directory if S_ISCHR(mode): return _chardev if S_ISBLK(mode): return _block if S_ISLNK(mode): return _symlink if S_ISFIFO(mode): return _fifo if S_ISSOCK(mode): return _socket return _unknown def top_prefix_to_starting_dir(self, top, prefix=""): """See DirReader.top_prefix_to_starting_dir.""" return (_safe_utf8(prefix), None, None, None, _safe_utf8(top)) def read_dir(self, prefix, top): """Read a single directory from a utf8 file system. All paths in and out are utf8. This sub-function is called when we know the filesystem is already in utf8 encoding. So we don't need to transcode filenames. See DirReader.read_dir for details. """ #cdef char *_prefix = prefix #cdef char *_top = top # Use C accelerated directory listing. cdef object newval cdef int index cdef int length cdef void * atuple cdef object name cdef PyObject * new_val_obj if PyBytes_Size(prefix): relprefix = prefix + b'/' else: relprefix = b'' top_slash = top + b'/' # read_dir supplies in should-stat order. # for _, name in sorted(_listdir(top)): result = _read_dir(top) length = len(result) # result.sort() for index from 0 <= index < length: atuple = PyList_GetItem_object_void(result, index) name = PyTuple_GetItem_void_void(atuple, 1) # We have a tuple with (inode, name, None, statvalue, None) # Now edit it: # inode -> path_from_top # direct concat - faster than operator +. new_val_obj = relprefix Py_INCREF(relprefix) PyBytes_Concat(&new_val_obj, name) if NULL == new_val_obj: # PyBytes_Concat will have setup an exception, but how to get # at it? raise Exception("failed to strcat") PyTuple_SetItem_obj(atuple, 0, new_val_obj) # 1st None -> kind newval = self._kind_from_mode( (<_Stat>PyTuple_GetItem_void_void(atuple, 3)).st_mode) Py_INCREF(newval) PyTuple_SetItem(atuple, 2, newval) # 2nd None -> abspath # for all - the caller may need to stat files # etc. # direct concat - faster than operator +. new_val_obj = top_slash Py_INCREF(top_slash) PyBytes_Concat(&new_val_obj, name) if NULL == new_val_obj: # PyBytes_Concat will have setup an exception, but how to get # at it? raise Exception("failed to strcat") PyTuple_SetItem_obj(atuple, 4, new_val_obj) return result cdef raise_os_error(int errnum, char *msg_prefix, path): if errnum == EINTR: PyErr_CheckSignals() raise OSError(errnum, msg_prefix + strerror(errnum), path) cdef _read_dir(path): """Like os.listdir, this reads the contents of a directory. :param path: the directory to list. :return: a list of single-owner (the list) tuples ready for editing into the result tuples walkdirs needs to yield. They contain (inode, name, None, statvalue, None). """ cdef DIR *the_dir # currently this needs a fixup - the C code says 'dirent' but should say # 'struct dirent' cdef dirent * entry cdef dirent sentinel cdef char *name cdef int stat_result cdef _Stat statvalue global errno cdef int orig_dir_fd # Avoid chdir('') because it causes problems on Sun OS, and avoid this if # staying in . if path != b"" and path != b'.': # we change into the requested directory before reading, and back at the # end, because that turns out to make the stat calls measurably faster than # passing full paths every time. orig_dir_fd = open(".", O_RDONLY, 0) if orig_dir_fd == -1: raise_os_error(errno, "open: ", ".") if -1 == chdir(path): # Ignore the return value, because we are already raising an # exception close(orig_dir_fd) raise_os_error(errno, "chdir: ", path) else: orig_dir_fd = -1 try: the_dir = opendir(b".") if NULL == the_dir: raise_os_error(errno, "opendir: ", path) try: result = [] entry = &sentinel while entry != NULL: # Unlike most libc functions, readdir needs errno set to 0 # beforehand so that eof can be distinguished from errors. See # while True: errno = 0 entry = readdir(the_dir) if entry == NULL and (errno == EAGAIN or errno == EINTR): if errno == EINTR: PyErr_CheckSignals() # try again continue else: break if entry == NULL: if errno == ENOTDIR or errno == 0: # We see ENOTDIR at the end of a normal directory. # As ENOTDIR for read_dir(file) is triggered on opendir, # we consider ENOTDIR to be 'no error'. continue else: raise_os_error(errno, "readdir: ", path) name = entry.d_name if not (name[0] == c"." and ( (name[1] == 0) or (name[1] == c"." and name[2] == 0)) ): statvalue = _Stat() stat_result = lstat(entry.d_name, &statvalue._st) if stat_result != 0: if errno != ENOENT: raise_os_error(errno, "lstat: ", path + b"/" + entry.d_name) else: # the file seems to have disappeared after being # seen by readdir - perhaps a transient temporary # file. there's no point returning it. continue # We append a 5-tuple that can be modified in-place by the C # api: # inode to sort on (to replace with top_path) # name (to keep) # kind (None, to set) # statvalue (to keep) # abspath (None, to set) PyList_Append(result, (entry.d_ino, entry.d_name, None, statvalue, None)) finally: if -1 == closedir(the_dir): raise_os_error(errno, "closedir: ", path) finally: if -1 != orig_dir_fd: failed = False if -1 == fchdir(orig_dir_fd): # try to close the original directory anyhow failed = True if -1 == close(orig_dir_fd) or failed: raise_os_error(errno, "return to orig_dir: ", "") return result # vim: tw=79 ai expandtab sw=4 sts=4 ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_rio_py.py0000644000000000000000000000474600000000000014353 0ustar00# Copyright (C) 2009 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Python implementation of _read_stanza_*.""" import re from .rio import ( Stanza, ) _tag_re = re.compile(r'^[-a-zA-Z0-9_]+$') def _valid_tag(tag): if not isinstance(tag, str): raise TypeError(tag) return bool(_tag_re.match(tag)) def _read_stanza_utf8(line_iter): stanza = Stanza() tag = None accum_value = None # TODO: jam 20060922 This code should raise real errors rather than # using 'assert' to process user input, or raising ValueError # rather than a more specific error. for bline in line_iter: if not isinstance(bline, bytes): raise TypeError(bline) line = bline.decode('utf-8', 'surrogateescape') if line is None or line == u'': break # end of file if line == u'\n': break # end of stanza real_l = line if line[0] == u'\t': # continues previous value if tag is None: raise ValueError('invalid continuation line %r' % real_l) accum_value.append(u'\n' + line[1:-1]) else: # new tag:value line if tag is not None: stanza.add(tag, u''.join(accum_value)) try: colon_index = line.index(u': ') except ValueError: raise ValueError('tag/value separator not found in line %r' % real_l) tag = str(line[:colon_index]) if not _valid_tag(tag): raise ValueError("invalid rio tag %r" % (tag,)) accum_value = [line[colon_index + 2:-1]] if tag is not None: # add last tag-value stanza.add(tag, u''.join(accum_value)) return stanza else: # didn't see any content return None ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_rio_pyx.pyx0000644000000000000000000001433200000000000014723 0ustar00# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # cython: language_level=3 """Pyrex implementation of _read_stanza_*.""" cdef extern from "python-compat.h": pass from cpython.bytes cimport ( PyBytes_CheckExact, PyBytes_FromStringAndSize, PyBytes_AS_STRING, PyBytes_GET_SIZE, ) from cpython.unicode cimport ( PyUnicode_CheckExact, PyUnicode_DecodeUTF8, # Deprecated after PEP 393 changes PyUnicode_AS_UNICODE, PyUnicode_FromUnicode, PyUnicode_GET_SIZE, ) from cpython.list cimport ( PyList_Append, ) from cpython.mem cimport ( PyMem_Free, PyMem_Malloc, PyMem_Realloc, ) from cpython.version cimport ( PY_MAJOR_VERSION, ) cdef extern from "Python.h": ctypedef int Py_UNICODE object PyUnicode_EncodeASCII(Py_UNICODE *, int, char *) int Py_UNICODE_ISLINEBREAK(Py_UNICODE) # GZ 2017-09-11: Not sure why cython unicode module lacks this? object PyUnicode_FromStringAndSize(const char *u, Py_ssize_t size) # Python 3.3 or later unicode handling char* PyUnicode_AsUTF8AndSize(object unicode, Py_ssize_t *size) from libc.string cimport ( memcpy, ) from .rio import Stanza cdef int _valid_tag_char(char c): # cannot_raise return (c == c'_' or c == c'-' or (c >= c'a' and c <= c'z') or (c >= c'A' and c <= c'Z') or (c >= c'0' and c <= c'9')) def _valid_tag(tag): cdef char *c_tag cdef Py_ssize_t c_len cdef int i # GZ 2017-09-11: Encapsulate native string as ascii tag somewhere neater if PY_MAJOR_VERSION >= 3: if not PyUnicode_CheckExact(tag): raise TypeError(tag) c_tag = PyUnicode_AsUTF8AndSize(tag, &c_len) else: if not PyBytes_CheckExact(tag): raise TypeError(tag) c_tag = PyBytes_AS_STRING(tag) c_len = PyBytes_GET_SIZE(tag) if c_len < 1: return False for i from 0 <= i < c_len: if not _valid_tag_char(c_tag[i]): return False return True cdef object _split_first_line_utf8(char *line, int len, char *value, Py_ssize_t *value_len): cdef int i for i from 0 <= i < len: if line[i] == c':': if line[i+1] != c' ': raise ValueError("invalid tag in line %r" % line) memcpy(value, line+i+2, len-i-2) value_len[0] = len-i-2 if PY_MAJOR_VERSION >= 3: return PyUnicode_FromStringAndSize(line, i) return PyBytes_FromStringAndSize(line, i) raise ValueError('tag/value separator not found in line %r' % line) cdef object _split_first_line_unicode(Py_UNICODE *line, int len, Py_UNICODE *value, Py_ssize_t *value_len): cdef int i for i from 0 <= i < len: if line[i] == c':': if line[i+1] != c' ': raise ValueError("invalid tag in line %r" % PyUnicode_FromUnicode(line, len)) memcpy(value, &line[i+2], (len-i-2) * sizeof(Py_UNICODE)) value_len[0] = len-i-2 if PY_MAJOR_VERSION >= 3: return PyUnicode_FromUnicode(line, i) return PyUnicode_EncodeASCII(line, i, "strict") raise ValueError("tag/value separator not found in line %r" % PyUnicode_FromUnicode(line, len)) def _read_stanza_utf8(line_iter): cdef char *c_line cdef Py_ssize_t c_len cdef char *accum_value cdef char *new_accum_value cdef Py_ssize_t accum_len, accum_size pairs = [] tag = None accum_len = 0 accum_size = 4096 accum_value = PyMem_Malloc(accum_size) if accum_value == NULL: raise MemoryError try: for line in line_iter: if line is None: break # end of file if not PyBytes_CheckExact(line): raise TypeError("%r is not a plain string" % line) c_line = PyBytes_AS_STRING(line) c_len = PyBytes_GET_SIZE(line) if c_len < 1: break # end of file if c_len == 1 and c_line[0] == c"\n": break # end of stanza if accum_len + c_len > accum_size: accum_size = (accum_len + c_len) new_accum_value = PyMem_Realloc(accum_value, accum_size) if new_accum_value == NULL: raise MemoryError else: accum_value = new_accum_value if c_line[0] == c'\t': # continues previous value if tag is None: raise ValueError('invalid continuation line %r' % line) memcpy(accum_value+accum_len, c_line+1, c_len-1) accum_len = accum_len + c_len-1 else: # new tag:value line if tag is not None: PyList_Append(pairs, (tag, PyUnicode_DecodeUTF8(accum_value, accum_len-1, "strict"))) tag = _split_first_line_utf8(c_line, c_len, accum_value, &accum_len) if not _valid_tag(tag): raise ValueError("invalid rio tag %r" % (tag,)) if tag is not None: # add last tag-value PyList_Append(pairs, (tag, PyUnicode_DecodeUTF8(accum_value, accum_len-1, "surrogateescape"))) return Stanza.from_pairs(pairs) else: # didn't see any content return None finally: PyMem_Free(accum_value) ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_simple_set_pyx.pxd0000644000000000000000000001046700000000000016256 0ustar00# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # cython: language_level=3 """Interface definition of a class like PySet but without caching the hash. This is generally useful when you want to 'intern' objects, etc. Note that this differs from Set in that we: 1) Don't have all of the .intersection, .difference, etc functions 2) Do return the object from the set via queries eg. SimpleSet.add(key) => saved_key and SimpleSet[key] => saved_key """ from cpython.object cimport PyObject cdef public api class SimpleSet [object SimpleSetObject, type SimpleSet_Type]: """A class similar to PySet, but with simpler implementation. The main advantage is that this class uses only 2N memory to store N objects rather than 4N memory. The main trade-off is that we do not cache the hash value of saved objects. As such, it is assumed that computing the hash will be cheap (such as strings or tuples of strings, etc.) This also differs in that you can get back the objects that are stored (like a dict), but we also don't implement the complete list of 'set' operations (difference, intersection, etc). """ # Data structure definition: # This is a basic hash table using open addressing. # http://en.wikipedia.org/wiki/Open_addressing # Basically that means we keep an array of pointers to Python objects # (called a table). Each location in the array is called a 'slot'. # # An empty slot holds a NULL pointer, a slot where there was an item # which was then deleted will hold a pointer to _dummy, and a filled slot # points at the actual object which fills that slot. # # The table is always a power of two, and the default location where an # object is inserted is at hash(object) & (table_size - 1) # # If there is a collision, then we search for another location. The # specific algorithm is in _lookup. We search until we: # find the object # find an equivalent object (by tp_richcompare(obj1, obj2, Py_EQ)) # find a NULL slot # # When an object is deleted, we set its slot to _dummy. this way we don't # have to track whether there was a collision, and find the corresponding # keys. (The collision resolution algorithm makes that nearly impossible # anyway, because it depends on the upper bits of the hash.) # The main effect of this, is that if we find _dummy, then we can insert # an object there, but we have to keep searching until we find NULL to # know that the object is not present elsewhere. cdef Py_ssize_t _used # active cdef Py_ssize_t _fill # active + dummy cdef Py_ssize_t _mask # Table contains (mask+1) slots, a power of 2 cdef PyObject **_table # Pyrex/Cython doesn't support arrays to 'object' # so we manage it manually cdef PyObject *_get(self, object key) except? NULL cpdef object add(self, key) cpdef bint discard(self, key) except -1 cdef int _insert_clean(self, PyObject *key) except -1 cdef Py_ssize_t _resize(self, Py_ssize_t min_unused) except -1 # TODO: might want to export the C api here, though it is all available from # the class object... cdef api SimpleSet SimpleSet_New() cdef api object SimpleSet_Add(object self, object key) cdef api int SimpleSet_Contains(object self, object key) except -1 cdef api int SimpleSet_Discard(object self, object key) except -1 cdef api PyObject *SimpleSet_Get(SimpleSet self, object key) except? NULL cdef api Py_ssize_t SimpleSet_Size(object self) except -1 cdef api int SimpleSet_Next(object self, Py_ssize_t *pos, PyObject **key) except -1 ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_simple_set_pyx.pyx0000644000000000000000000004765500000000000016314 0ustar00# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # cython: language_level=3 """Definition of a class that is similar to Set with some small changes.""" from cpython.object cimport ( hashfunc, Py_EQ, PyObject_Hash, PyTypeObject, Py_TYPE, richcmpfunc, traverseproc, visitproc, ) from cpython.mem cimport ( PyMem_Malloc, PyMem_Free, ) from cpython.ref cimport ( Py_INCREF, Py_DECREF, ) from libc.string cimport memset # Dummy is an object used to mark nodes that have been deleted. Since # collisions require us to move a node to an alternative location, if we just # set an entry to NULL on delete, we won't find any relocated nodes. # We have to use _dummy_obj because we need to keep a refcount to it, but we # also use _dummy as a pointer, because it avoids having to put all # over the code base. cdef object _dummy_obj cdef PyObject *_dummy _dummy_obj = object() _dummy = _dummy_obj cdef object _NotImplemented _NotImplemented = NotImplemented cdef int _is_equal(object this, long this_hash, object other) except -1: cdef long other_hash other_hash = PyObject_Hash(other) if other_hash != this_hash: return 0 # This implements a subset of the PyObject_RichCompareBool functionality. # Namely it: # 1) Doesn't try to do anything with old-style classes # 2) Assumes that both objects have a tp_richcompare implementation, and # that if that is not enough to compare equal, then they are not # equal. (It doesn't try to cast them both to some intermediate form # that would compare equal.) res = Py_TYPE(this).tp_richcompare(this, other, Py_EQ) if res is _NotImplemented: res = Py_TYPE(other).tp_richcompare(other, this, Py_EQ) if res is _NotImplemented: return 0 if res: return 1 return 0 cdef public api class SimpleSet [object SimpleSetObject, type SimpleSet_Type]: """This class can be used to track canonical forms for objects. It is similar in function to the interned dictionary that is used by strings. However: 1) It assumes that hash(obj) is cheap, so does not need to inline a copy of it 2) It only stores one reference to the object, rather than 2 (key vs key:value) As such, it uses 1/3rd the amount of memory to store a pointer to the interned object. """ # Attributes are defined in the .pxd file DEF DEFAULT_SIZE=1024 def __init__(self): cdef Py_ssize_t size, n_bytes size = DEFAULT_SIZE self._mask = size - 1 self._used = 0 self._fill = 0 n_bytes = sizeof(PyObject*) * size; self._table = PyMem_Malloc(n_bytes) if self._table == NULL: raise MemoryError() memset(self._table, 0, n_bytes) def __sizeof__(self): # Note: Pyrex doesn't allow sizeof(class) so we re-implement it here. # Bits are: # 1: PyObject # 2: vtable * # 3: 3 Py_ssize_t # 4: PyObject** # Note that we might get alignment, etc, wrong, but at least this is # better than no estimate at all # return sizeof(SimpleSet) + (self._mask + 1) * (sizeof(PyObject*)) return (sizeof(PyObject) + sizeof(void*) + 3*sizeof(Py_ssize_t) + sizeof(PyObject**) + (self._mask + 1) * sizeof(PyObject*)) def __dealloc__(self): if self._table != NULL: PyMem_Free(self._table) self._table = NULL property used: def __get__(self): return self._used property fill: def __get__(self): return self._fill property mask: def __get__(self): return self._mask def _memory_size(self): """Return the number of bytes of memory consumed by this class.""" return sizeof(self) + (sizeof(PyObject*)*(self._mask + 1)) def __len__(self): return self._used def _test_lookup(self, key): cdef PyObject **slot slot = _lookup(self, key) if slot[0] == NULL: res = '' elif slot[0] == _dummy: res = '' else: res = slot[0] return (slot - self._table), res def __contains__(self, key): """Is key present in this SimpleSet.""" cdef PyObject **slot slot = _lookup(self, key) if slot[0] == NULL or slot[0] == _dummy: return False return True cdef PyObject *_get(self, object key) except? NULL: """Return the object (or nothing) define at the given location.""" cdef PyObject **slot slot = _lookup(self, key) if slot[0] == NULL or slot[0] == _dummy: return NULL return slot[0] def __getitem__(self, key): """Return a stored item that is equivalent to key.""" cdef PyObject *py_val py_val = self._get(key) if py_val == NULL: raise KeyError("Key %s is not present" % key) val = (py_val) return val cdef int _insert_clean(self, PyObject *key) except -1: """Insert a key into self.table. This is only meant to be used during times like '_resize', as it makes a lot of assuptions about keys not already being present, and there being no dummy entries. """ cdef size_t i, n_lookup cdef long the_hash cdef PyObject **table cdef PyObject **slot cdef Py_ssize_t mask mask = self._mask table = self._table the_hash = PyObject_Hash(key) i = the_hash for n_lookup from 0 <= n_lookup <= mask: # Don't loop forever slot = &table[i & mask] if slot[0] == NULL: slot[0] = key self._fill = self._fill + 1 self._used = self._used + 1 return 1 i = i + 1 + n_lookup raise RuntimeError('ran out of slots.') def _py_resize(self, min_used): """Do not use this directly, it is only exposed for testing.""" return self._resize(min_used) cdef Py_ssize_t _resize(self, Py_ssize_t min_used) except -1: """Resize the internal table. The final table will be big enough to hold at least min_used entries. We will copy the data from the existing table over, leaving out dummy entries. :return: The new size of the internal table """ cdef Py_ssize_t new_size, n_bytes, remaining cdef PyObject **new_table cdef PyObject **old_table cdef PyObject **slot new_size = DEFAULT_SIZE while new_size <= min_used and new_size > 0: new_size = new_size << 1 # We rolled over our signed size field if new_size <= 0: raise MemoryError() # Even if min_used == self._mask + 1, and we aren't changing the actual # size, we will still run the algorithm so that dummy entries are # removed # TODO: Test this # if new_size < self._used: # raise RuntimeError('cannot shrink SimpleSet to something' # ' smaller than the number of used slots.') n_bytes = sizeof(PyObject*) * new_size; new_table = PyMem_Malloc(n_bytes) if new_table == NULL: raise MemoryError() old_table = self._table self._table = new_table memset(self._table, 0, n_bytes) self._mask = new_size - 1 self._used = 0 remaining = self._fill self._fill = 0 # Moving everything to the other table is refcount neutral, so we don't # worry about it. slot = old_table while remaining > 0: if slot[0] == NULL: # unused slot pass elif slot[0] == _dummy: # dummy slot remaining = remaining - 1 else: # active slot remaining = remaining - 1 self._insert_clean(slot[0]) slot = slot + 1 PyMem_Free(old_table) return new_size cpdef object add(self, key): """Similar to set.add(), start tracking this key. There is one small difference, which is that we return the object that is stored at the given location. (which is closer to the dict.setdefault() functionality.) """ cdef PyObject **slot cdef bint added if (Py_TYPE(key).tp_richcompare == NULL or Py_TYPE(key).tp_hash == NULL): raise TypeError('Types added to SimpleSet must implement' ' both tp_richcompare and tp_hash') added = 0 # We need at least one empty slot assert self._used < self._mask slot = _lookup(self, key) if (slot[0] == NULL): Py_INCREF(key) self._fill = self._fill + 1 self._used = self._used + 1 slot[0] = key added = 1 elif (slot[0] == _dummy): Py_INCREF(key) self._used = self._used + 1 slot[0] = key added = 1 # No else: clause. If _lookup returns a pointer to # a live object, then we already have a value at this location. retval = (slot[0]) # PySet and PyDict use a 2-3rds full algorithm, we'll follow suit if added and (self._fill * 3) >= ((self._mask + 1) * 2): # However, we always work for a load factor of 2:1 self._resize(self._used * 2) # Even if we resized and ended up moving retval into a different slot, # it is still the value that is held at the slot equivalent to 'key', # so we can still return it return retval cpdef bint discard(self, key) except -1: """Remove key from the set, whether it exists or not. :return: False if the item did not exist, True if it did """ cdef PyObject **slot slot = _lookup(self, key) if slot[0] == NULL or slot[0] == _dummy: return 0 self._used = self._used - 1 Py_DECREF(slot[0]) slot[0] = _dummy # PySet uses the heuristic: If more than 1/5 are dummies, then resize # them away # if ((so->_fill - so->_used) * 5 < so->mask) # However, we are planning on using this as an interning structure, in # which we will be putting a lot of objects. And we expect that large # groups of them are going to have the same lifetime. # Dummy entries hurt a little bit because they cause the lookup to keep # searching, but resizing is also rather expensive # For now, we'll just use their algorithm, but we may want to revisit # it if ((self._fill - self._used) * 5 > self._mask): self._resize(self._used * 2) return 1 def __iter__(self): return _SimpleSet_iterator(self) cdef class _SimpleSet_iterator: """Iterator over the SimpleSet structure.""" cdef Py_ssize_t pos cdef SimpleSet set cdef Py_ssize_t _used # track if things have been mutated while iterating cdef Py_ssize_t len # number of entries left def __init__(self, obj): self.set = obj self.pos = 0 self._used = self.set._used self.len = self.set._used def __iter__(self): return self def __next__(self): cdef Py_ssize_t mask, i cdef PyObject *key if self.set is None: raise StopIteration if self.set._used != self._used: # Force this exception to continue to be raised self._used = -1 raise RuntimeError("Set size changed during iteration") if not SimpleSet_Next(self.set, &self.pos, &key): self.set = None raise StopIteration # we found something the_key = key # INCREF self.len = self.len - 1 return the_key def __length_hint__(self): if self.set is not None and self._used == self.set._used: return self.len return 0 cdef api SimpleSet SimpleSet_New(): """Create a new SimpleSet object.""" return SimpleSet() cdef SimpleSet _check_self(object self): """Check that the parameter is not None. Pyrex/Cython will do type checking, but only to ensure that an object is either the right type or None. You can say "object foo not None" for pure python functions, but not for C functions. So this is just a helper for all the apis that need to do the check. """ cdef SimpleSet true_self if self is None: raise TypeError('self must not be None') true_self = self return true_self cdef PyObject **_lookup(SimpleSet self, object key) except NULL: """Find the slot where 'key' would fit. This is the same as a dicts 'lookup' function. :param key: An object we are looking up :param hash: The hash for key :return: The location in self.table where key should be put. location == NULL is an exception, but (*location) == NULL just indicates the slot is empty and can be used. """ # This uses Quadratic Probing: # http://en.wikipedia.org/wiki/Quadratic_probing # with c1 = c2 = 1/2 # This leads to probe locations at: # h0 = hash(k1) # h1 = h0 + 1 # h2 = h0 + 3 = h1 + 1 + 1 # h3 = h0 + 6 = h2 + 1 + 2 # h4 = h0 + 10 = h2 + 1 + 3 # Note that all of these are '& mask', but that is computed *after* the # offset. # This differs from the algorithm used by Set and Dict. Which, effectively, # use double-hashing, and a step size that starts large, but dwindles to # stepping one-by-one. # This gives more 'locality' in that if you have a collision at offset X, # the first fallback is X+1, which is fast to check. However, that means # that an object w/ hash X+1 will also check there, and then X+2 next. # However, for objects with differing hashes, their chains are different. # The former checks X, X+1, X+3, ... the latter checks X+1, X+2, X+4, ... # So different hashes diverge quickly. # A bigger problem is that we *only* ever use the lowest bits of the hash # So all integers (x + SIZE*N) will resolve into the same bucket, and all # use the same collision resolution. We may want to try to find a way to # incorporate the upper bits of the hash with quadratic probing. (For # example, X, X+1, X+3+some_upper_bits, X+6+more_upper_bits, etc.) cdef size_t i, n_lookup cdef Py_ssize_t mask cdef long key_hash cdef PyObject **table cdef PyObject **slot cdef PyObject *cur cdef PyObject **free_slot key_hash = PyObject_Hash(key) i = key_hash mask = self._mask table = self._table free_slot = NULL for n_lookup from 0 <= n_lookup <= mask: # Don't loop forever slot = &table[i & mask] cur = slot[0] if cur == NULL: # Found a blank spot if free_slot != NULL: # Did we find an earlier _dummy entry? return free_slot else: return slot if cur == key: # Found an exact pointer to the key return slot if cur == _dummy: if free_slot == NULL: free_slot = slot elif _is_equal(key, key_hash, cur): # Both py_key and cur belong in this slot, return it return slot i = i + 1 + n_lookup raise AssertionError('should never get here') cdef api PyObject **_SimpleSet_Lookup(object self, object key) except NULL: """Find the slot where 'key' would fit. This is the same as a dicts 'lookup' function. This is a private api because mutating what you get without maintaing the other invariants is a 'bad thing'. :param key: An object we are looking up :param hash: The hash for key :return: The location in self._table where key should be put should never be NULL, but may reference a NULL (PyObject*) """ return _lookup(_check_self(self), key) cdef api object SimpleSet_Add(object self, object key): """Add a key to the SimpleSet (set). :param self: The SimpleSet to add the key to. :param key: The key to be added. If the key is already present, self will not be modified :return: The current key stored at the location defined by 'key'. This may be the same object, or it may be an equivalent object. (consider dict.setdefault(key, key)) """ return _check_self(self).add(key) cdef api int SimpleSet_Contains(object self, object key) except -1: """Is key present in self?""" return (key in _check_self(self)) cdef api int SimpleSet_Discard(object self, object key) except -1: """Remove the object referenced at location 'key'. :param self: The SimpleSet being modified :param key: The key we are checking on :return: 1 if there was an object present, 0 if there was not, and -1 on error. """ return _check_self(self).discard(key) cdef api PyObject *SimpleSet_Get(SimpleSet self, object key) except? NULL: """Get a pointer to the object present at location 'key'. This returns an object which is equal to key which was previously added to self. This returns a borrowed reference, as it may also return NULL if no value is present at that location. :param key: The value we are looking for :return: The object present at that location """ return _check_self(self)._get(key) cdef api Py_ssize_t SimpleSet_Size(object self) except -1: """Get the number of active entries in 'self'""" return _check_self(self)._used cdef api int SimpleSet_Next(object self, Py_ssize_t *pos, PyObject **key) except -1: """Walk over items in a SimpleSet. :param pos: should be initialized to 0 by the caller, and will be updated by this function :param key: Will return a borrowed reference to key :return: 0 if nothing left, 1 if we are returning a new value """ cdef Py_ssize_t i, mask cdef SimpleSet true_self cdef PyObject **table true_self = _check_self(self) i = pos[0] if (i < 0): return 0 mask = true_self._mask table= true_self._table while (i <= mask and (table[i] == NULL or table[i] == _dummy)): i = i + 1 pos[0] = i + 1 if (i > mask): return 0 # All done if (key != NULL): key[0] = table[i] return 1 cdef int SimpleSet_traverse(SimpleSet self, visitproc visit, void *arg) except -1: """This is an implementation of 'tp_traverse' that hits the whole table. Cython/Pyrex don't seem to let you define a tp_traverse, and they only define one for you if you have an 'object' attribute. Since they don't support C arrays of objects, we access the PyObject * directly. """ cdef Py_ssize_t pos cdef PyObject *next_key cdef int ret pos = 0 while SimpleSet_Next(self, &pos, &next_key): ret = visit(next_key, arg) if ret: return ret return 0 # It is a little bit ugly to do this, but it works, and means that Meliae can # dump the total memory consumed by all child objects. (SimpleSet).tp_traverse = SimpleSet_traverse ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_static_tuple_c.c0000644000000000000000000007701100000000000015641 0ustar00/* Copyright (C) 2009, 2010 Canonical Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* Must be defined before importing _static_tuple_c.h so that we get the right * linkage. */ #define STATIC_TUPLE_MODULE #include #include "python-compat.h" #include "_static_tuple_c.h" #include "_export_c_api.h" #include "_simple_set_pyx_api.h" #if defined(__GNUC__) # define inline __inline__ #elif defined(_MSC_VER) # define inline __inline #else # define inline #endif /* The one and only StaticTuple with no values */ static StaticTuple *_empty_tuple = NULL; static PyObject *_interned_tuples = NULL; static inline int _StaticTuple_is_interned(StaticTuple *self) { return self->flags & STATIC_TUPLE_INTERNED_FLAG; } static PyObject * StaticTuple_as_tuple(StaticTuple *self) { PyObject *tpl = NULL, *obj = NULL; int i, len; len = self->size; tpl = PyTuple_New(len); if (!tpl) { /* Malloc failure */ return NULL; } for (i = 0; i < len; ++i) { obj = (PyObject *)self->items[i]; Py_INCREF(obj); PyTuple_SET_ITEM(tpl, i, obj); } return tpl; } static char StaticTuple_as_tuple_doc[] = "as_tuple() => tuple"; static StaticTuple * StaticTuple_Intern(StaticTuple *self) { PyObject *canonical_tuple = NULL; if (_interned_tuples == NULL || _StaticTuple_is_interned(self)) { Py_INCREF(self); return self; } /* SimpleSet_Add returns whatever object is present at self * or the new object if it needs to add it. */ canonical_tuple = SimpleSet_Add(_interned_tuples, (PyObject *)self); if (!canonical_tuple) { // Some sort of exception, propogate it. return NULL; } if (canonical_tuple != (PyObject *)self) { // There was already a tuple with that value return (StaticTuple *)canonical_tuple; } self->flags |= STATIC_TUPLE_INTERNED_FLAG; // The two references in the dict do not count, so that the StaticTuple // object does not become immortal just because it was interned. Py_SET_REFCNT(self, Py_REFCNT(self) - 1); return self; } static char StaticTuple_Intern_doc[] = "intern() => unique StaticTuple\n" "Return a 'canonical' StaticTuple object.\n" "Similar to intern() for strings, this makes sure there\n" "is only one StaticTuple object for a given value\n." "Common usage is:\n" " key = StaticTuple('foo', 'bar').intern()\n"; static void StaticTuple_dealloc(StaticTuple *self) { int i, len; if (_StaticTuple_is_interned(self)) { /* revive dead object temporarily for Discard */ Py_SET_REFCNT(self, 2); if (SimpleSet_Discard(_interned_tuples, (PyObject*)self) != 1) Py_FatalError("deletion of interned StaticTuple failed"); self->flags &= ~STATIC_TUPLE_INTERNED_FLAG; } len = self->size; for (i = 0; i < len; ++i) { Py_XDECREF(self->items[i]); } Py_TYPE(self)->tp_free((PyObject *)self); } /* Similar to PyTuple_New() */ static StaticTuple * StaticTuple_New(Py_ssize_t size) { StaticTuple *stuple; if (size < 0 || size > 255) { /* Too big or too small */ PyErr_SetString(PyExc_ValueError, "StaticTuple(...)" " takes from 0 to 255 items"); return NULL; } if (size == 0 && _empty_tuple != NULL) { Py_INCREF(_empty_tuple); return _empty_tuple; } /* Note that we use PyObject_NewVar because we want to allocate a variable * width entry. However we *aren't* truly a PyVarObject because we don't * use a long for ob_size. Instead we use a plain 'size' that is an int, * and will be overloaded with flags in the future. * As such we do the alloc, and then have to clean up anything it does * incorrectly. */ stuple = PyObject_NewVar(StaticTuple, &StaticTuple_Type, size); if (stuple == NULL) { return NULL; } stuple->size = size; stuple->flags = 0; stuple->_unused0 = 0; stuple->_unused1 = 0; if (size > 0) { memset(stuple->items, 0, sizeof(PyObject *) * size); } #if STATIC_TUPLE_HAS_HASH stuple->hash = -1; #endif return stuple; } static StaticTuple * StaticTuple_FromSequence(PyObject *sequence) { StaticTuple *new = NULL; PyObject *as_tuple = NULL; PyObject *item; Py_ssize_t i, size; if (StaticTuple_CheckExact(sequence)) { Py_INCREF(sequence); return (StaticTuple *)sequence; } if (!PySequence_Check(sequence)) { as_tuple = PySequence_Tuple(sequence); if (as_tuple == NULL) goto done; sequence = as_tuple; } size = PySequence_Size(sequence); if (size == -1) { goto done; } new = StaticTuple_New(size); if (new == NULL) { goto done; } for (i = 0; i < size; ++i) { // This returns a new reference, which we then 'steal' with // StaticTuple_SET_ITEM item = PySequence_GetItem(sequence, i); if (item == NULL) { Py_DECREF(new); new = NULL; goto done; } StaticTuple_SET_ITEM(new, i, item); } done: Py_XDECREF(as_tuple); return (StaticTuple *)new; } static StaticTuple * StaticTuple_from_sequence(PyObject *self, PyObject *args, PyObject *kwargs) { PyObject *sequence; if (!PyArg_ParseTuple(args, "O", &sequence)) return NULL; return StaticTuple_FromSequence(sequence); } /* Check that all items we point to are 'valid' */ static int StaticTuple_check_items(StaticTuple *self) { int i; PyObject *obj; for (i = 0; i < self->size; ++i) { obj = self->items[i]; if (obj == NULL) { PyErr_SetString(PyExc_RuntimeError, "StaticTuple(...)" " should not have a NULL entry."); return 0; } if (PyBytes_CheckExact(obj) || StaticTuple_CheckExact(obj) || obj == Py_None || PyBool_Check(obj) #if PY_MAJOR_VERSION >= 3 #else || PyInt_CheckExact(obj) #endif || PyLong_CheckExact(obj) || PyFloat_CheckExact(obj) || PyUnicode_CheckExact(obj) ) continue; PyErr_Format(PyExc_TypeError, "StaticTuple(...)" " requires that all items are one of" " str, StaticTuple, None, bool, int, long, float, or unicode" " not %s.", Py_TYPE(obj)->tp_name); return 0; } return 1; } static PyObject * StaticTuple_new_constructor(PyTypeObject *type, PyObject *args, PyObject *kwds) { StaticTuple *self; PyObject *obj = NULL; Py_ssize_t i, len = 0; if (type != &StaticTuple_Type) { PyErr_SetString(PyExc_TypeError, "we only support creating StaticTuple"); return NULL; } if (!PyTuple_CheckExact(args)) { PyErr_SetString(PyExc_TypeError, "args must be a tuple"); return NULL; } len = PyTuple_GET_SIZE(args); if (len < 0 || len > 255) { /* Check the length here so we can raise a TypeError instead of * StaticTuple_New's ValueError. */ PyErr_SetString(PyExc_TypeError, "StaticTuple(...)" " takes from 0 to 255 items"); return NULL; } self = (StaticTuple *)StaticTuple_New(len); if (self == NULL) { return NULL; } for (i = 0; i < len; ++i) { obj = PyTuple_GET_ITEM(args, i); Py_INCREF(obj); self->items[i] = obj; } if (!StaticTuple_check_items(self)) { type->tp_dealloc((PyObject *)self); return NULL; } return (PyObject *)self; } static PyObject * StaticTuple_repr(StaticTuple *self) { PyObject *as_tuple, *tuple_repr, *result; as_tuple = StaticTuple_as_tuple(self); if (as_tuple == NULL) { return NULL; } tuple_repr = PyObject_Repr(as_tuple); Py_DECREF(as_tuple); if (tuple_repr == NULL) { return NULL; } #if PY_MAJOR_VERSION >= 3 result = PyUnicode_FromFormat("StaticTuple%U", tuple_repr); #else result = PyString_FromFormat("StaticTuple%s", PyString_AsString(tuple_repr)); #endif return result; } /* adapted from tuplehash(), is the specific hash value considered * 'stable'? */ #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 8) /* Hash for tuples. This is a slightly simplified version of the xxHash non-cryptographic hash: - we do not use any parallellism, there is only 1 accumulator. - we drop the final mixing since this is just a permutation of the output space: it does not help against collisions. - at the end, we mangle the length with a single constant. For the xxHash specification, see https://github.com/Cyan4973/xxHash/blob/master/doc/xxhash_spec.md Below are the official constants from the xxHash specification. Optimizing compilers should emit a single "rotate" instruction for the _PyHASH_XXROTATE() expansion. If that doesn't happen for some important platform, the macro could be changed to expand to a platform-specific rotate spelling instead. */ #if SIZEOF_PY_UHASH_T > 4 #define _PyHASH_XXPRIME_1 ((Py_uhash_t)11400714785074694791ULL) #define _PyHASH_XXPRIME_2 ((Py_uhash_t)14029467366897019727ULL) #define _PyHASH_XXPRIME_5 ((Py_uhash_t)2870177450012600261ULL) #define _PyHASH_XXROTATE(x) ((x << 31) | (x >> 33)) /* Rotate left 31 bits */ #else #define _PyHASH_XXPRIME_1 ((Py_uhash_t)2654435761UL) #define _PyHASH_XXPRIME_2 ((Py_uhash_t)2246822519UL) #define _PyHASH_XXPRIME_5 ((Py_uhash_t)374761393UL) #define _PyHASH_XXROTATE(x) ((x << 13) | (x >> 19)) /* Rotate left 13 bits */ #endif /* Tests have shown that it's not worth to cache the hash value, see https://bugs.python.org/issue9685 */ static Py_hash_t StaticTuple_hash(StaticTuple *self) { Py_ssize_t i, len = self->size; PyObject **item = self->items; #if STATIC_TUPLE_HAS_HASH if (self->hash != -1) { return self->hash; } #endif Py_uhash_t acc = _PyHASH_XXPRIME_5; for (i = 0; i < len; i++) { Py_uhash_t lane = PyObject_Hash(item[i]); if (lane == (Py_uhash_t)-1) { return -1; } acc += lane * _PyHASH_XXPRIME_2; acc = _PyHASH_XXROTATE(acc); acc *= _PyHASH_XXPRIME_1; } /* Add input length, mangled to keep the historical value of hash(()). */ acc += len ^ (_PyHASH_XXPRIME_5 ^ 3527539UL); if (acc == (Py_uhash_t)-1) { acc = 1546275796; } #if STATIC_TUPLE_HAS_HASH self->hash = acc; #endif return acc; } #else static long StaticTuple_hash(StaticTuple *self) { /* adapted from tuplehash(), is the specific hash value considered * 'stable'? */ register long x, y; Py_ssize_t len = self->size; PyObject **p; long mult = 1000003L; #if STATIC_TUPLE_HAS_HASH if (self->hash != -1) { return self->hash; } #endif x = 0x345678L; p = self->items; // TODO: We could set specific flags if we know that, for example, all the // items are strings. I haven't seen a real-world benefit to that // yet, though. while (--len >= 0) { y = PyObject_Hash(*p++); if (y == -1) /* failure */ return -1; x = (x ^ y) * mult; /* the cast might truncate len; that doesn't change hash stability */ mult += (long)(82520L + len + len); } x += 97531L; if (x == -1) x = -2; #if STATIC_TUPLE_HAS_HASH self->hash = x; #endif return x; } #endif static PyObject * StaticTuple_richcompare_to_tuple(StaticTuple *v, PyObject *wt, int op) { PyObject *vt; PyObject *result = NULL; vt = StaticTuple_as_tuple((StaticTuple *)v); if (vt == NULL) { goto done; } if (!PyTuple_Check(wt)) { PyErr_BadInternalCall(); goto done; } /* Now we have 2 tuples to compare, do it */ result = PyTuple_Type.tp_richcompare(vt, wt, op); done: Py_XDECREF(vt); return result; } /** Compare two objects to determine if they are equivalent. * The basic flow is as follows * 1) First make sure that both objects are StaticTuple instances. If they * aren't then cast self to a tuple, and have the tuple do the comparison. * 2) Special case comparison to Py_None, because it happens to occur fairly * often in the test suite. * 3) Special case when v and w are the same pointer. As we know the answer to * all queries without walking individual items. * 4) For all operations, we then walk the items to find the first paired * items that are not equal. * 5) If all items found are equal, we then check the length of self and * other to determine equality. * 6) If an item differs, then we apply "op" to those last two items. (eg. * StaticTuple(A, B) > StaticTuple(A, C) iff B > C) */ static PyObject * StaticTuple_richcompare(PyObject *v, PyObject *w, int op) { StaticTuple *v_st, *w_st; Py_ssize_t vlen, wlen, min_len, i; PyObject *v_obj, *w_obj; richcmpfunc string_richcompare; if (!StaticTuple_CheckExact(v)) { /* This has never triggered, according to python-dev it seems this * might trigger if '__op__' is defined but '__rop__' is not, sort of * case. Such as "None == StaticTuple()" */ fprintf(stderr, "self is not StaticTuple\n"); Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } v_st = (StaticTuple *)v; if (StaticTuple_CheckExact(w)) { /* The most common case */ w_st = (StaticTuple*)w; } else if (PyTuple_Check(w)) { /* One of v or w is a tuple, so we go the 'slow' route and cast up to * tuples to compare. */ /* TODO: This seems to be triggering more than I thought it would... * We probably want to optimize comparing self to other when * other is a tuple. */ return StaticTuple_richcompare_to_tuple(v_st, w, op); } else if (w == Py_None) { // None is always less than the object switch (op) { case Py_NE: #if PY_MAJOR_VERSION >= 3 #else case Py_GT:case Py_GE: #endif Py_INCREF(Py_True); return Py_True; case Py_EQ: #if PY_MAJOR_VERSION >= 3 #else case Py_LT:case Py_LE: #endif Py_INCREF(Py_False); return Py_False; default: // Should only happen on Python 3 return Py_NotImplemented; } } else { /* We don't special case this comparison, we just let python handle * it. */ Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } /* Now we know that we have 2 StaticTuple objects, so let's compare them. * This code is inspired from tuplerichcompare, except we know our * objects are limited in scope, so we can inline some comparisons. */ if (v == w) { /* Identical pointers, we can shortcut this easily. */ switch (op) { case Py_EQ:case Py_LE:case Py_GE: Py_INCREF(Py_True); return Py_True; case Py_NE:case Py_LT:case Py_GT: Py_INCREF(Py_False); return Py_False; } } if (op == Py_EQ && _StaticTuple_is_interned(v_st) && _StaticTuple_is_interned(w_st)) { /* If both objects are interned, we know they are different if the * pointer is not the same, which would have been handled by the * previous if. No need to compare the entries. */ Py_INCREF(Py_False); return Py_False; } /* The only time we are likely to compare items of different lengths is in * something like the interned_keys set. However, the hash is good enough * that it is rare. Note that 'tuple_richcompare' also does not compare * lengths here. */ vlen = v_st->size; wlen = w_st->size; min_len = (vlen < wlen) ? vlen : wlen; string_richcompare = PyBytes_Type.tp_richcompare; for (i = 0; i < min_len; i++) { PyObject *result = NULL; v_obj = StaticTuple_GET_ITEM(v_st, i); w_obj = StaticTuple_GET_ITEM(w_st, i); if (v_obj == w_obj) { /* Shortcut case, these must be identical */ continue; } if (PyBytes_CheckExact(v_obj) && PyBytes_CheckExact(w_obj)) { result = string_richcompare(v_obj, w_obj, Py_EQ); } else if (StaticTuple_CheckExact(v_obj) && StaticTuple_CheckExact(w_obj)) { /* Both are StaticTuple types, so recurse */ result = StaticTuple_richcompare(v_obj, w_obj, Py_EQ); } else { /* Fall back to generic richcompare */ result = PyObject_RichCompare(v_obj, w_obj, Py_EQ); } if (result == NULL) { return NULL; /* There seems to be an error */ } if (result == Py_False) { // This entry is not identical, Shortcut for Py_EQ if (op == Py_EQ) { return result; } Py_DECREF(result); break; } if (result != Py_True) { /* We don't know *what* richcompare is returning, but it * isn't something we recognize */ PyErr_BadInternalCall(); Py_DECREF(result); return NULL; } Py_DECREF(result); } if (i >= min_len) { /* We walked off one of the lists, but everything compared equal so * far. Just compare the size. */ int cmp; PyObject *res; switch (op) { case Py_LT: cmp = vlen < wlen; break; case Py_LE: cmp = vlen <= wlen; break; case Py_EQ: cmp = vlen == wlen; break; case Py_NE: cmp = vlen != wlen; break; case Py_GT: cmp = vlen > wlen; break; case Py_GE: cmp = vlen >= wlen; break; default: return NULL; /* cannot happen */ } if (cmp) res = Py_True; else res = Py_False; Py_INCREF(res); return res; } /* The last item differs, shortcut the Py_NE case */ if (op == Py_NE) { Py_INCREF(Py_True); return Py_True; } /* It is some other comparison, go ahead and do the real check. */ if (PyBytes_CheckExact(v_obj) && PyBytes_CheckExact(w_obj)) { return string_richcompare(v_obj, w_obj, op); } else if (StaticTuple_CheckExact(v_obj) && StaticTuple_CheckExact(w_obj)) { /* Both are StaticTuple types, so recurse */ return StaticTuple_richcompare(v_obj, w_obj, op); } else { return PyObject_RichCompare(v_obj, w_obj, op); } } static Py_ssize_t StaticTuple_length(StaticTuple *self) { return self->size; } static PyObject * StaticTuple__is_interned(StaticTuple *self) { if (_StaticTuple_is_interned(self)) { Py_INCREF(Py_True); return Py_True; } Py_INCREF(Py_False); return Py_False; } static char StaticTuple__is_interned_doc[] = "_is_interned() => True/False\n" "Check to see if this tuple has been interned.\n"; static PyObject * StaticTuple_reduce(StaticTuple *self) { PyObject *result = NULL, *as_tuple = NULL; result = PyTuple_New(2); if (!result) { return NULL; } as_tuple = StaticTuple_as_tuple(self); if (as_tuple == NULL) { Py_DECREF(result); return NULL; } Py_INCREF(&StaticTuple_Type); PyTuple_SET_ITEM(result, 0, (PyObject *)&StaticTuple_Type); PyTuple_SET_ITEM(result, 1, as_tuple); return result; } static char StaticTuple_reduce_doc[] = "__reduce__() => tuple\n"; static PyObject * StaticTuple_add(PyObject *v, PyObject *w) { Py_ssize_t i, len_v, len_w; PyObject *item; StaticTuple *result; /* StaticTuples and plain tuples may be added (concatenated) to * StaticTuples. */ if (StaticTuple_CheckExact(v)) { len_v = ((StaticTuple*)v)->size; } else if (PyTuple_Check(v)) { len_v = PyTuple_GET_SIZE(v); } else { Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } if (StaticTuple_CheckExact(w)) { len_w = ((StaticTuple*)w)->size; } else if (PyTuple_Check(w)) { len_w = PyTuple_GET_SIZE(w); } else { Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } result = StaticTuple_New(len_v + len_w); if (result == NULL) return NULL; for (i = 0; i < len_v; ++i) { // This returns a new reference, which we then 'steal' with // StaticTuple_SET_ITEM item = PySequence_GetItem(v, i); if (item == NULL) { Py_DECREF(result); return NULL; } StaticTuple_SET_ITEM(result, i, item); } for (i = 0; i < len_w; ++i) { item = PySequence_GetItem(w, i); if (item == NULL) { Py_DECREF(result); return NULL; } StaticTuple_SET_ITEM(result, i+len_v, item); } if (!StaticTuple_check_items(result)) { Py_DECREF(result); return NULL; } return (PyObject *)result; } static PyObject * StaticTuple_item(StaticTuple *self, Py_ssize_t offset) { PyObject *obj; /* We cast to (int) to avoid worrying about whether Py_ssize_t is a * long long, etc. offsets should never be >2**31 anyway. */ if (offset < 0) { PyErr_Format(PyExc_IndexError, "StaticTuple_item does not support" " negative indices: %d\n", (int)offset); } else if (offset >= self->size) { PyErr_Format(PyExc_IndexError, "StaticTuple index out of range" " %d >= %d", (int)offset, (int)self->size); return NULL; } obj = (PyObject *)self->items[offset]; Py_INCREF(obj); return obj; } #if PY_MAJOR_VERSION >= 3 #else static PyObject * StaticTuple_slice(StaticTuple *self, Py_ssize_t ilow, Py_ssize_t ihigh) { PyObject *as_tuple, *result; as_tuple = StaticTuple_as_tuple(self); if (as_tuple == NULL) { return NULL; } result = PyTuple_Type.tp_as_sequence->sq_slice(as_tuple, ilow, ihigh); Py_DECREF(as_tuple); return result; } #endif static PyObject * StaticTuple_subscript(StaticTuple *self, PyObject *key) { PyObject *as_tuple, *result; as_tuple = StaticTuple_as_tuple(self); if (as_tuple == NULL) { return NULL; } result = PyTuple_Type.tp_as_mapping->mp_subscript(as_tuple, key); Py_DECREF(as_tuple); return result; } static int StaticTuple_traverse(StaticTuple *self, visitproc visit, void *arg) { Py_ssize_t i; for (i = self->size; --i >= 0;) { Py_VISIT(self->items[i]); } return 0; } static PyObject * StaticTuple_sizeof(StaticTuple *self) { Py_ssize_t res; res = _PyObject_SIZE(&StaticTuple_Type) + (int)self->size * sizeof(void*); return PyInt_FromSsize_t(res); } static char StaticTuple_doc[] = "C implementation of a StaticTuple structure." "\n This is used as StaticTuple(item1, item2, item3)" "\n This is similar to tuple, less flexible in what it" "\n supports, but also lighter memory consumption." "\n Note that the constructor mimics the () form of tuples" "\n Rather than the 'tuple()' constructor." "\n eg. StaticTuple(a, b) == (a, b) == tuple((a, b))"; static PyMethodDef StaticTuple_methods[] = { {"as_tuple", (PyCFunction)StaticTuple_as_tuple, METH_NOARGS, StaticTuple_as_tuple_doc}, {"intern", (PyCFunction)StaticTuple_Intern, METH_NOARGS, StaticTuple_Intern_doc}, {"_is_interned", (PyCFunction)StaticTuple__is_interned, METH_NOARGS, StaticTuple__is_interned_doc}, {"from_sequence", (PyCFunction)StaticTuple_from_sequence, METH_STATIC | METH_VARARGS, "Create a StaticTuple from a given sequence. This functions" " the same as the tuple() constructor."}, {"__reduce__", (PyCFunction)StaticTuple_reduce, METH_NOARGS, StaticTuple_reduce_doc}, {"__sizeof__", (PyCFunction)StaticTuple_sizeof, METH_NOARGS}, {NULL, NULL} /* sentinel */ }; static PyNumberMethods StaticTuple_as_number = { (binaryfunc) StaticTuple_add, /* nb_add */ 0, /* nb_subtract */ 0, /* nb_multiply */ 0, /* nb_divide */ 0, /* nb_remainder */ 0, /* nb_divmod */ 0, /* nb_power */ 0, /* nb_negative */ 0, /* nb_positive */ 0, /* nb_absolute */ 0, /* nb_nonzero */ 0, /* nb_invert */ 0, /* nb_lshift */ 0, /* nb_rshift */ 0, /* nb_and */ 0, /* nb_xor */ 0, /* nb_or */ 0, /* nb_coerce */ }; static PySequenceMethods StaticTuple_as_sequence = { (lenfunc)StaticTuple_length, /* sq_length */ 0, /* sq_concat */ 0, /* sq_repeat */ (ssizeargfunc)StaticTuple_item, /* sq_item */ #if PY_MAJOR_VERSION >= 3 #else (ssizessizeargfunc)StaticTuple_slice, /* sq_slice */ #endif 0, /* sq_ass_item */ 0, /* sq_ass_slice */ 0, /* sq_contains */ #if PY_MAJOR_VERSION >= 3 0, /* sq_inplace_concat */ 0, /* sq_inplace_repeat */ #endif }; static PyMappingMethods StaticTuple_as_mapping = { (lenfunc)StaticTuple_length, /* mp_length */ (binaryfunc)StaticTuple_subscript, /* mp_subscript */ 0, /* mp_ass_subscript */ }; PyTypeObject StaticTuple_Type = { PyVarObject_HEAD_INIT(NULL, 0) "breezy._static_tuple_c.StaticTuple", /* tp_name */ sizeof(StaticTuple), /* tp_basicsize */ sizeof(PyObject *), /* tp_itemsize */ (destructor)StaticTuple_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ (reprfunc)StaticTuple_repr, /* tp_repr */ &StaticTuple_as_number, /* tp_as_number */ &StaticTuple_as_sequence, /* tp_as_sequence */ &StaticTuple_as_mapping, /* tp_as_mapping */ (hashfunc)StaticTuple_hash, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ /* Py_TPFLAGS_CHECKTYPES tells the number operations that they shouldn't * try to 'coerce' but instead stuff like 'add' will check it arguments. */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES, /* tp_flags*/ StaticTuple_doc, /* tp_doc */ /* gc.get_referents checks the IS_GC flag before it calls tp_traverse * And we don't include this object in the garbage collector because we * know it doesn't create cycles. However, 'meliae' will follow * tp_traverse, even if the object isn't GC, and we want that. */ (traverseproc)StaticTuple_traverse, /* tp_traverse */ 0, /* tp_clear */ StaticTuple_richcompare, /* tp_richcompare */ 0, /* tp_weaklistoffset */ // without implementing tp_iter, Python will fall back to PySequence* // which seems to work ok, we may need something faster/lighter in the // future. 0, /* tp_iter */ 0, /* tp_iternext */ StaticTuple_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ StaticTuple_new_constructor, /* tp_new */ }; static PyMethodDef static_tuple_c_methods[] = { {NULL, NULL} }; static void setup_interned_tuples(PyObject *m) { _interned_tuples = (PyObject *)SimpleSet_New(); if (_interned_tuples != NULL) { Py_INCREF(_interned_tuples); PyModule_AddObject(m, "_interned_tuples", _interned_tuples); } } static void setup_empty_tuple(PyObject *m) { StaticTuple *stuple; if (_interned_tuples == NULL) { fprintf(stderr, "You need to call setup_interned_tuples() before" " setup_empty_tuple, because we intern it.\n"); } // We need to create the empty tuple stuple = (StaticTuple *)StaticTuple_New(0); _empty_tuple = StaticTuple_Intern(stuple); assert(_empty_tuple == stuple); // At this point, refcnt is 2: 1 from New(), and 1 from the return from // intern(). We will keep 1 for the _empty_tuple global, and use the other // for the module reference. PyModule_AddObject(m, "_empty_tuple", (PyObject *)_empty_tuple); } static int _StaticTuple_CheckExact(PyObject *obj) { return StaticTuple_CheckExact(obj); } static void setup_c_api(PyObject *m) { _export_function(m, "StaticTuple_New", StaticTuple_New, "StaticTuple *(Py_ssize_t)"); _export_function(m, "StaticTuple_Intern", StaticTuple_Intern, "StaticTuple *(StaticTuple *)"); _export_function(m, "StaticTuple_FromSequence", StaticTuple_FromSequence, "StaticTuple *(PyObject *)"); _export_function(m, "_StaticTuple_CheckExact", _StaticTuple_CheckExact, "int(PyObject *)"); } PYMOD_INIT_FUNC(_static_tuple_c) { PyObject* m; StaticTuple_Type.tp_getattro = PyObject_GenericGetAttr; if (PyType_Ready(&StaticTuple_Type) < 0) { return PYMOD_ERROR; } PYMOD_CREATE(m, "_static_tuple_c", "C implementation of a StaticTuple structure", static_tuple_c_methods); if (m == NULL) { return PYMOD_ERROR; } Py_INCREF(&StaticTuple_Type); PyModule_AddObject(m, "StaticTuple", (PyObject *)&StaticTuple_Type); if (import_breezy___simple_set_pyx() == -1) { return PYMOD_ERROR; } setup_interned_tuples(m); setup_empty_tuple(m); setup_c_api(m); return PYMOD_SUCCESS(m); } // vim: tabstop=4 sw=4 expandtab ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_static_tuple_c.h0000644000000000000000000001015600000000000015643 0ustar00/* Copyright (C) 2009, 2010 Canonical Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _STATIC_TUPLE_H_ #define _STATIC_TUPLE_H_ #include #include #define STATIC_TUPLE_HAS_HASH 0 /* Caching the hash adds memory, but allows us to save a little time during * lookups. TIMEIT hash(key) shows it as * 0.108usec w/ hash * 0.160usec w/o hash * Note that the entries themselves are strings, which already cache their * hashes. So while there is a 1.5:1 difference in the time for hash(), it is * already a function which is quite fast. Probably the only reason we might * want to do so, is if we customized SimpleSet to the point that the item * pointers were exactly certain types, and then accessed table[i]->hash * directly. So far StaticTuple_hash() is fast enough to not warrant the memory * difference. */ /* This defines a single variable-width key. * It is basically the same as a tuple, but * 1) Lighter weight in memory * 2) Only supports strings or other static types (that don't reference other * objects.) */ #define STATIC_TUPLE_INTERNED_FLAG 0x01 typedef struct { PyObject_HEAD // We could go with unsigned short here, and support 64k width tuples // without any memory impact, might be worthwhile unsigned char size; unsigned char flags; unsigned char _unused0; unsigned char _unused1; // Note that on 64-bit, we actually have 4-more unused bytes // because items will always be aligned to a 64-bit boundary #if STATIC_TUPLE_HAS_HASH long hash; #endif PyObject *items[0]; } StaticTuple; extern PyTypeObject StaticTuple_Type; typedef struct { PyObject_VAR_HEAD PyObject *table[0]; } KeyIntern; #define StaticTuple_SET_ITEM(key, offset, val) \ ((((StaticTuple*)(key))->items[(offset)]) = ((PyObject *)(val))) #define StaticTuple_GET_ITEM(key, offset) (((StaticTuple*)key)->items[offset]) #define StaticTuple_GET_SIZE(key) (((StaticTuple*)key)->size) #ifdef STATIC_TUPLE_MODULE /* Used when compiling _static_tuple_c.c */ static StaticTuple * StaticTuple_New(Py_ssize_t); static StaticTuple * StaticTuple_Intern(StaticTuple *self); static StaticTuple * StaticTuple_FromSequence(PyObject *); #define StaticTuple_CheckExact(op) (Py_TYPE(op) == &StaticTuple_Type) #else /* Used as the foreign api */ #include "_import_c_api.h" static StaticTuple *(*StaticTuple_New)(Py_ssize_t); static StaticTuple *(*StaticTuple_Intern)(StaticTuple *); static StaticTuple *(*StaticTuple_FromSequence)(PyObject *); static PyTypeObject *_p_StaticTuple_Type; #define StaticTuple_CheckExact(op) (Py_TYPE(op) == _p_StaticTuple_Type) static int (*_StaticTuple_CheckExact)(PyObject *); /* Return -1 and set exception on error, 0 on success */ static int import_static_tuple_c(void) { struct function_description functions[] = { {"StaticTuple_New", (void **)&StaticTuple_New, "StaticTuple *(Py_ssize_t)"}, {"StaticTuple_Intern", (void **)&StaticTuple_Intern, "StaticTuple *(StaticTuple *)"}, {"StaticTuple_FromSequence", (void **)&StaticTuple_FromSequence, "StaticTuple *(PyObject *)"}, {"_StaticTuple_CheckExact", (void **)&_StaticTuple_CheckExact, "int(PyObject *)"}, {NULL}}; struct type_description types[] = { {"StaticTuple", &_p_StaticTuple_Type}, {NULL}}; return _import_extension_module("breezy._static_tuple_c", functions, types); } #endif // !STATIC_TUPLE_MODULE #endif // !_STATIC_TUPLE_H_ ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_static_tuple_c.pxd0000644000000000000000000000372000000000000016206 0ustar00# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # cython: language_level=3 """The interface definition file for the StaticTuple class.""" cdef extern from "Python.h": ctypedef struct PyObject: pass cdef extern from "_static_tuple_c.h": ctypedef class breezy._static_tuple_c.StaticTuple [object StaticTuple]: cdef unsigned char size cdef unsigned char flags cdef PyObject *items[0] # Must be called before using any of the C api, as it sets the function # pointers in memory. int import_static_tuple_c() except -1 StaticTuple StaticTuple_New(Py_ssize_t) StaticTuple StaticTuple_Intern(StaticTuple) StaticTuple StaticTuple_FromSequence(object) # Steals a reference and val must be a valid type, no checking is done void StaticTuple_SET_ITEM(StaticTuple key, Py_ssize_t offset, object val) # We would normally use PyObject * here. However it seems that cython/pyrex # treat the PyObject defined in this header as something different than one # defined in a .pyx file. And since we don't INCREF, we need a raw pointer, # not an 'object' return value. void *StaticTuple_GET_ITEM(StaticTuple key, Py_ssize_t offset) int StaticTuple_CheckExact(object) Py_ssize_t StaticTuple_GET_SIZE(StaticTuple key) ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_static_tuple_py.py0000644000000000000000000000536600000000000016261 0ustar00# Copyright (C) 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """The pure-python implementation of the StaticTuple type. Note that it is generally just implemented as using tuples of tuples of strings. """ import sys class StaticTuple(tuple): """A static type, similar to a tuple of strings.""" __slots__ = () def __new__(cls, *args): # Make the empty StaticTuple a singleton if not args and _empty_tuple is not None: return _empty_tuple return tuple.__new__(cls, args) def __init__(self, *args): """Create a new 'StaticTuple'""" num_keys = len(args) if num_keys < 0 or num_keys > 255: raise TypeError('StaticTuple(...) takes from 0 to 255 items') for bit in args: if type(bit) not in _valid_types: raise TypeError('StaticTuple can only point to' ' StaticTuple, str, unicode, int, float, bool, or' ' None not %s' % (type(bit),)) # We don't need to pass args to tuple.__init__, because that was # already handled in __new__. tuple.__init__(self) def __repr__(self): return '%s%s' % (self.__class__.__name__, tuple.__repr__(self)) def __reduce__(self): return (StaticTuple, tuple(self)) def __add__(self, other): """Concatenate self with other""" return StaticTuple.from_sequence(tuple.__add__(self, other)) def as_tuple(self): return tuple(self) def intern(self): return _interned_tuples.setdefault(self, self) @staticmethod def from_sequence(seq): """Convert a sequence object into a StaticTuple instance.""" if isinstance(seq, StaticTuple): # it already is return seq return StaticTuple(*seq) _valid_types = (bytes, str, StaticTuple, int, float, None.__class__, bool) # Have to set it to None first, so that __new__ can determine whether # the _empty_tuple singleton has been created yet or not. _empty_tuple = None _empty_tuple = StaticTuple() _interned_tuples = {} ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_termcolor.py0000644000000000000000000000367600000000000015061 0ustar00# Copyright (C) 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import os import sys class FG(object): """Unix terminal foreground color codes (16-color).""" RED = '\033[31m' GREEN = '\033[32m' YELLOW = '\033[33m' BLUE = '\033[34m' MAGENTA = '\033[35m' CYAN = '\033[36m' WHITE = '\033[37m' # Bold Foreground BOLD_RED = '\033[1;31m' BOLD_GREEN = '\033[1;32m' BOLD_YELLOW = '\033[1;33m' BOLD_BLUE = '\033[1;34m' BOLD_MAGENTA = '\033[1;35m' BOLD_CYAN = '\033[1;36m' BOLD_WHITE = '\033[1;37m' NONE = '\033[0m' class BG(object): """Unix terminal background color codes (16-color).""" BLACK = '\033[40m' RED = '\033[41m' GREEN = '\033[42m' YELLOW = '\033[43m' BLUE = '\033[44m' MAGENTA = '\033[45m' CYAN = '\033[46m' WHITE = '\033[47m' NONE = '\033[0m' def color_string(s, fg, bg=''): return fg + bg + s + FG.NONE def re_color_string(compiled_pattern, s, fg): return compiled_pattern.sub(fg + r'\1' + FG.NONE, s) def allow_color(): if os.name != 'posix': return False if not sys.stdout.isatty(): return False try: import curses curses.setupterm() return curses.tigetnum('colors') > 2 except curses.error: return False ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/_walkdirs_win32.pyx0000644000000000000000000002363700000000000016104 0ustar00# Copyright (C) 2008-2012 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # cython: language_level=3 """Helper functions for Walkdirs on win32.""" cdef extern from "python-compat.h": struct _HANDLE: pass ctypedef _HANDLE *HANDLE ctypedef unsigned long DWORD ctypedef long long __int64 ctypedef unsigned short WCHAR struct _FILETIME: DWORD dwHighDateTime DWORD dwLowDateTime ctypedef _FILETIME FILETIME struct _WIN32_FIND_DATAW: DWORD dwFileAttributes FILETIME ftCreationTime FILETIME ftLastAccessTime FILETIME ftLastWriteTime DWORD nFileSizeHigh DWORD nFileSizeLow # Some reserved stuff here WCHAR cFileName[260] # MAX_PATH WCHAR cAlternateFilename[14] # We have to use the typedef trick, otherwise pyrex uses: # struct WIN32_FIND_DATAW # which fails due to 'incomplete type' ctypedef _WIN32_FIND_DATAW WIN32_FIND_DATAW HANDLE INVALID_HANDLE_VALUE HANDLE FindFirstFileW(WCHAR *path, WIN32_FIND_DATAW *data) int FindNextFileW(HANDLE search, WIN32_FIND_DATAW *data) int FindClose(HANDLE search) DWORD FILE_ATTRIBUTE_READONLY DWORD FILE_ATTRIBUTE_DIRECTORY int ERROR_NO_MORE_FILES int GetLastError() # Wide character functions DWORD wcslen(WCHAR *) cdef extern from "Python.h": WCHAR *PyUnicode_AS_UNICODE(object) Py_ssize_t PyUnicode_GET_SIZE(object) object PyUnicode_FromUnicode(WCHAR *, Py_ssize_t) int PyList_Append(object, object) except -1 object PyUnicode_AsUTF8String(object) import operator import os import stat from . import _readdir_py cdef object osutils osutils = None cdef class _Win32Stat: """Represent a 'stat' result generated from WIN32_FIND_DATA""" cdef readonly int st_mode cdef readonly double st_ctime cdef readonly double st_mtime cdef readonly double st_atime # We can't just declare this as 'readonly' because python2.4 doesn't define # T_LONGLONG as a structure member. So instead we just use a property that # will convert it correctly anyway. cdef __int64 _st_size property st_size: def __get__(self): return self._st_size # os.stat always returns 0, so we hard code it here property st_dev: def __get__(self): return 0 property st_ino: def __get__(self): return 0 # st_uid and st_gid required for some external tools like bzr-git & dulwich property st_uid: def __get__(self): return 0 property st_gid: def __get__(self): return 0 def __repr__(self): """Repr is the same as a Stat object. (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) """ return repr((self.st_mode, 0, 0, 0, 0, 0, self.st_size, self.st_atime, self.st_mtime, self.st_ctime)) cdef object _get_name(WIN32_FIND_DATAW *data): """Extract the Unicode name for this file/dir.""" return PyUnicode_FromUnicode(data.cFileName, wcslen(data.cFileName)) cdef int _get_mode_bits(WIN32_FIND_DATAW *data): # cannot_raise cdef int mode_bits mode_bits = 0100666 # writeable file, the most common if data.dwFileAttributes & FILE_ATTRIBUTE_READONLY == FILE_ATTRIBUTE_READONLY: mode_bits = mode_bits ^ 0222 # remove the write bits if data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY == FILE_ATTRIBUTE_DIRECTORY: # Remove the FILE bit, set the DIR bit, and set the EXEC bits mode_bits = mode_bits ^ 0140111 return mode_bits cdef __int64 _get_size(WIN32_FIND_DATAW *data): # cannot_raise # Pyrex casts a DWORD into a PyLong anyway, so it is safe to do << 32 # on a DWORD return ((<__int64>data.nFileSizeHigh) << 32) + data.nFileSizeLow cdef double _ftime_to_timestamp(FILETIME *ft): # cannot_raise """Convert from a FILETIME struct into a floating point timestamp. The fields of a FILETIME structure are the hi and lo part of a 64-bit value expressed in 100 nanosecond units. 1e7 is one second in such units; 1e-7 the inverse. 429.4967296 is 2**32 / 1e7 or 2**32 * 1e-7. It also uses the epoch 1601-01-01 rather than 1970-01-01 (taken from posixmodule.c) """ cdef __int64 val # NB: This gives slightly different results versus casting to a 64-bit # integer and doing integer math before casting into a floating # point number. But the difference is in the sub millisecond range, # which doesn't seem critical here. # secs between epochs: 11,644,473,600 val = ((<__int64>ft.dwHighDateTime) << 32) + ft.dwLowDateTime return (val * 1.0e-7) - 11644473600.0 cdef int _should_skip(WIN32_FIND_DATAW *data): # cannot_raise """Is this '.' or '..' so we should skip it?""" if (data.cFileName[0] != c'.'): return 0 if data.cFileName[1] == c'\0': return 1 if data.cFileName[1] == c'.' and data.cFileName[2] == c'\0': return 1 return 0 cdef class Win32ReadDir: """Read directories on win32.""" cdef object _directory_kind cdef object _file_kind def __init__(self): self._directory_kind = _readdir_py._directory self._file_kind = _readdir_py._file def top_prefix_to_starting_dir(self, top, prefix=""): """See DirReader.top_prefix_to_starting_dir.""" global osutils if osutils is None: from . import osutils return (osutils.safe_utf8(prefix), None, None, None, osutils.safe_unicode(top)) cdef object _get_kind(self, WIN32_FIND_DATAW *data): if data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY: return self._directory_kind return self._file_kind cdef _Win32Stat _get_stat_value(self, WIN32_FIND_DATAW *data): """Get the filename and the stat information.""" cdef _Win32Stat statvalue statvalue = _Win32Stat() statvalue.st_mode = _get_mode_bits(data) statvalue.st_ctime = _ftime_to_timestamp(&data.ftCreationTime) statvalue.st_mtime = _ftime_to_timestamp(&data.ftLastWriteTime) statvalue.st_atime = _ftime_to_timestamp(&data.ftLastAccessTime) statvalue._st_size = _get_size(data) return statvalue def read_dir(self, prefix, top): """Win32 implementation of DirReader.read_dir. :seealso: DirReader.read_dir """ cdef WIN32_FIND_DATAW search_data cdef HANDLE hFindFile cdef int last_err cdef WCHAR *query cdef int result if prefix: relprefix = prefix + '/' else: relprefix = '' top_slash = top + '/' top_star = top_slash + '*' dirblock = [] query = PyUnicode_AS_UNICODE(top_star) hFindFile = FindFirstFileW(query, &search_data) if hFindFile == INVALID_HANDLE_VALUE: # Raise an exception? This path doesn't seem to exist raise WindowsError(GetLastError(), top_star) try: result = 1 while result: # Skip '.' and '..' if _should_skip(&search_data): result = FindNextFileW(hFindFile, &search_data) continue name_unicode = _get_name(&search_data) name_utf8 = PyUnicode_AsUTF8String(name_unicode) PyList_Append(dirblock, (relprefix + name_utf8, name_utf8, self._get_kind(&search_data), self._get_stat_value(&search_data), top_slash + name_unicode)) result = FindNextFileW(hFindFile, &search_data) # FindNextFileW sets GetLastError() == ERROR_NO_MORE_FILES when it # actually finishes. If we have anything else, then we have a # genuine problem last_err = GetLastError() if last_err != ERROR_NO_MORE_FILES: raise WindowsError(last_err) finally: result = FindClose(hFindFile) if result == 0: last_err = GetLastError() # TODO: We should probably raise an exception if FindClose # returns an error, however, I don't want to supress an # earlier Exception, so for now, I'm ignoring this dirblock.sort(key=operator.itemgetter(1)) return dirblock def lstat(path): """Equivalent to os.lstat, except match Win32ReadDir._get_stat_value. """ return wrap_stat(os.lstat(path)) def fstat(fd): """Like os.fstat, except match Win32ReadDir._get_stat_value :seealso: wrap_stat """ return wrap_stat(os.fstat(fd)) def wrap_stat(st): """Return a _Win32Stat object, based on the given stat result. On Windows, os.fstat(open(fname).fileno()) != os.lstat(fname). This is generally because os.lstat and os.fstat differ in what they put into st_ino and st_dev. What gets set where seems to also be dependent on the python version. So we always set it to 0 to avoid worrying about it. """ cdef _Win32Stat statvalue statvalue = _Win32Stat() statvalue.st_mode = st.st_mode statvalue.st_ctime = st.st_ctime statvalue.st_mtime = st.st_mtime statvalue.st_atime = st.st_atime statvalue._st_size = st.st_size return statvalue ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/add.py0000644000000000000000000001227300000000000013435 0ustar00# Copyright (C) 2005-2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Helper functions for adding files to working trees.""" import sys import os from . import ( errors, osutils, ui, ) from .i18n import gettext class AddAction(object): """A class which defines what action to take when adding a file.""" def __init__(self, to_file=None, should_print=None): """Initialize an action which prints added files to an output stream. :param to_file: The stream to write into. This is expected to take Unicode paths. If not supplied, it will default to ``sys.stdout``. :param should_print: If False, printing will be suppressed. """ self._to_file = to_file if to_file is None: self._to_file = sys.stdout self.should_print = False if should_print is not None: self.should_print = should_print def __call__(self, inv, parent_ie, path, kind, _quote=osutils.quotefn): """Add path to inventory. The default action does nothing. :param inv: The inventory we are working with. :param path: The FastPath being added :param kind: The kind of the object being added. """ if self.should_print: self._to_file.write('adding %s\n' % _quote(path)) return None def skip_file(self, tree, path, kind, stat_value=None): """Test whether the given file should be skipped or not. The default action never skips. Note this is only called during recursive adds :param tree: The tree we are working in :param path: The path being added :param kind: The kind of object being added. :param stat: Stat result for this file, if available already :return bool. True if the file should be skipped (not added) """ return False class AddWithSkipLargeAction(AddAction): """A class that can decide to skip a file if it's considered too large""" _maxSize = None def skip_file(self, tree, path, kind, stat_value=None): if kind != 'file': return False opt_name = 'add.maximum_file_size' if self._maxSize is None: config = tree.get_config_stack() self._maxSize = config.get(opt_name) if stat_value is None: file_size = os.path.getsize(path) else: file_size = stat_value.st_size if self._maxSize > 0 and file_size > self._maxSize: ui.ui_factory.show_warning(gettext( "skipping {0} (larger than {1} of {2} bytes)").format( path, opt_name, self._maxSize)) return True return False class AddFromBaseAction(AddAction): """This class will try to extract file ids from another tree.""" def __init__(self, base_tree, base_path, to_file=None, should_print=None): super(AddFromBaseAction, self).__init__(to_file=to_file, should_print=should_print) self.base_tree = base_tree self.base_path = base_path def __call__(self, inv, parent_ie, path, kind): # Place the parent call # Now check to see if we can extract an id for this file file_id, base_path = self._get_base_file_id(path, parent_ie) if file_id is not None: if self.should_print: self._to_file.write('adding %s w/ file id from %s\n' % (path, base_path)) else: # we aren't doing anything special, so let the default # reporter happen file_id = super(AddFromBaseAction, self).__call__( inv, parent_ie, path, kind) return file_id def _get_base_file_id(self, path, parent_ie): """Look for a file id in the base branch. First, if the base tree has the parent directory, we look for a file with the same name in that directory. Else, we look for an entry in the base tree with the same path. """ try: parent_path = self.base_tree.id2path(parent_ie.file_id) except errors.NoSuchId: pass else: base_path = osutils.pathjoin(parent_path, osutils.basename(path)) base_id = self.base_tree.path2id(base_path) if base_id is not None: return (base_id, base_path) full_base_path = osutils.pathjoin(self.base_path, path) # This may return None, but it is our last attempt return self.base_tree.path2id(full_base_path), full_base_path ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/annotate.py0000644000000000000000000004464400000000000014525 0ustar00# Copyright (C) 2005-2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """File annotate based on weave storage""" # TODO: Choice of more or less verbose formats: # # interposed: show more details between blocks of modified lines # TODO: Show which revision caused a line to merge into the parent # TODO: perhaps abbreviate timescales depending on how recent they are # e.g. "3:12 Tue", "13 Oct", "Oct 2005", etc. import sys import time from .lazy_import import lazy_import lazy_import(globals(), """ import patiencediff from breezy import ( tsort, ) """) from . import ( config, errors, osutils, ) from .repository import _strip_NULL_ghosts from .revision import ( CURRENT_REVISION, Revision, ) def annotate_file_tree(tree, path, to_file, verbose=False, full=False, show_ids=False, branch=None): """Annotate path in a tree. The tree should already be read_locked() when annotate_file_tree is called. :param tree: The tree to look for revision numbers and history from. :param path: The path to annotate :param to_file: The file to output the annotation to. :param verbose: Show all details rather than truncating to ensure reasonable text width. :param full: XXXX Not sure what this does. :param show_ids: Show revision ids in the annotation output. :param branch: Branch to use for revision revno lookups """ if branch is None: branch = tree.branch if to_file is None: to_file = sys.stdout encoding = osutils.get_terminal_encoding() # Handle the show_ids case annotations = list(tree.annotate_iter(path)) if show_ids: return _show_id_annotations(annotations, to_file, full, encoding) if not getattr(tree, "get_revision_id", False): # Create a virtual revision to represent the current tree state. # Should get some more pending commit attributes, like pending tags, # bugfixes etc. current_rev = Revision(CURRENT_REVISION) current_rev.parent_ids = tree.get_parent_ids() try: current_rev.committer = branch.get_config_stack().get('email') except errors.NoWhoami: current_rev.committer = 'local user' current_rev.message = "?" current_rev.timestamp = round(time.time(), 3) current_rev.timezone = osutils.local_time_offset() else: current_rev = None annotation = list(_expand_annotations( annotations, branch, current_rev)) _print_annotations(annotation, verbose, to_file, full, encoding) def _print_annotations(annotation, verbose, to_file, full, encoding): """Print annotations to to_file. :param to_file: The file to output the annotation to. :param verbose: Show all details rather than truncating to ensure reasonable text width. :param full: XXXX Not sure what this does. """ if len(annotation) == 0: max_origin_len = max_revno_len = 0 else: max_origin_len = max(len(x[1]) for x in annotation) max_revno_len = max(len(x[0]) for x in annotation) if not verbose: max_revno_len = min(max_revno_len, 12) max_revno_len = max(max_revno_len, 3) # Output the annotations prevanno = '' for (revno_str, author, date_str, line_rev_id, text) in annotation: if verbose: anno = '%-*s %-*s %8s ' % (max_revno_len, revno_str, max_origin_len, author, date_str) else: if len(revno_str) > max_revno_len: revno_str = revno_str[:max_revno_len - 1] + '>' anno = "%-*s %-7s " % (max_revno_len, revno_str, author[:7]) if anno.lstrip() == "" and full: anno = prevanno # GZ 2017-05-21: Writing both unicode annotation and bytes from file # which the given to_file must cope with. to_file.write(anno) to_file.write('| %s\n' % (text.decode(encoding),)) prevanno = anno def _show_id_annotations(annotations, to_file, full, encoding): if not annotations: return last_rev_id = None max_origin_len = max(len(origin) for origin, text in annotations) for origin, text in annotations: if full or last_rev_id != origin: this = origin else: this = b'' to_file.write('%*s | %s' % ( max_origin_len, this.decode('utf-8'), text.decode(encoding))) last_rev_id = origin return def _expand_annotations(annotations, branch, current_rev=None): """Expand a file's annotations into command line UI ready tuples. Each tuple includes detailed information, such as the author name, and date string for the commit, rather than just the revision id. :param annotations: The annotations to expand. :param revision_id_to_revno: A map from id to revision numbers. :param branch: A locked branch to query for revision details. """ repository = branch.repository revision_ids = set(o for o, t in annotations) if current_rev is not None: # This can probably become a function on MutableTree, get_revno_map # there, or something. last_revision = current_rev.revision_id # XXX: Partially Cloned from branch, uses the old_get_graph, eep. # XXX: The main difficulty is that we need to inject a single new node # (current_rev) into the graph before it gets numbered, etc. # Once KnownGraph gets an 'add_node()' function, we can use # VF.get_known_graph_ancestry(). graph = repository.get_graph() revision_graph = { key: value for key, value in graph.iter_ancestry(current_rev.parent_ids) if value is not None} revision_graph = _strip_NULL_ghosts(revision_graph) revision_graph[last_revision] = current_rev.parent_ids merge_sorted_revisions = tsort.merge_sort( revision_graph, last_revision, None, generate_revno=True) revision_id_to_revno = { rev_id: revno for seq_num, rev_id, depth, revno, end_of_merge in merge_sorted_revisions} else: # TODO(jelmer): Only look up the revision ids that we need (i.e. those # in revision_ids). Possibly add a HPSS call that can look those up # in bulk over HPSS. revision_id_to_revno = branch.get_revision_id_to_revno_map() last_origin = None revisions = {} if CURRENT_REVISION in revision_ids: revision_id_to_revno[CURRENT_REVISION] = ( "%d?" % (branch.revno() + 1),) revisions[CURRENT_REVISION] = current_rev revisions.update( entry for entry in repository.iter_revisions(revision_ids) if entry[1] is not None) for origin, text in annotations: text = text.rstrip(b'\r\n') if origin == last_origin: (revno_str, author, date_str) = ('', '', '') else: last_origin = origin if origin not in revisions: (revno_str, author, date_str) = ('?', '?', '?') else: revno_str = '.'.join( str(i) for i in revision_id_to_revno[origin]) rev = revisions[origin] tz = rev.timezone or 0 date_str = time.strftime('%Y%m%d', time.gmtime(rev.timestamp + tz)) # a lazy way to get something like the email address # TODO: Get real email address author = rev.get_apparent_authors()[0] _, email = config.parse_username(author) if email: author = email yield (revno_str, author, date_str, origin, text) def reannotate(parents_lines, new_lines, new_revision_id, _left_matching_blocks=None, heads_provider=None): """Create a new annotated version from new lines and parent annotations. :param parents_lines: List of annotated lines for all parents :param new_lines: The un-annotated new lines :param new_revision_id: The revision-id to associate with new lines (will often be CURRENT_REVISION) :param left_matching_blocks: a hint about which areas are common between the text and its left-hand-parent. The format is the SequenceMatcher.get_matching_blocks format (start_left, start_right, length_of_match). :param heads_provider: An object which provides a .heads() call to resolve if any revision ids are children of others. If None, then any ancestry disputes will be resolved with new_revision_id """ if len(parents_lines) == 0: lines = [(new_revision_id, line) for line in new_lines] elif len(parents_lines) == 1: lines = _reannotate(parents_lines[0], new_lines, new_revision_id, _left_matching_blocks) elif len(parents_lines) == 2: left = _reannotate(parents_lines[0], new_lines, new_revision_id, _left_matching_blocks) lines = _reannotate_annotated(parents_lines[1], new_lines, new_revision_id, left, heads_provider) else: reannotations = [_reannotate(parents_lines[0], new_lines, new_revision_id, _left_matching_blocks)] reannotations.extend(_reannotate(p, new_lines, new_revision_id) for p in parents_lines[1:]) lines = [] for annos in zip(*reannotations): origins = set(a for a, l in annos) if len(origins) == 1: # All the parents agree, so just return the first one lines.append(annos[0]) else: line = annos[0][1] if len(origins) == 2 and new_revision_id in origins: origins.remove(new_revision_id) if len(origins) == 1: lines.append((origins.pop(), line)) else: lines.append((new_revision_id, line)) return lines def _reannotate(parent_lines, new_lines, new_revision_id, matching_blocks=None): new_cur = 0 if matching_blocks is None: plain_parent_lines = [l for r, l in parent_lines] matcher = patiencediff.PatienceSequenceMatcher( None, plain_parent_lines, new_lines) matching_blocks = matcher.get_matching_blocks() lines = [] for i, j, n in matching_blocks: for line in new_lines[new_cur:j]: lines.append((new_revision_id, line)) lines.extend(parent_lines[i:i + n]) new_cur = j + n return lines def _get_matching_blocks(old, new): matcher = patiencediff.PatienceSequenceMatcher(None, old, new) return matcher.get_matching_blocks() _break_annotation_tie = None def _old_break_annotation_tie(annotated_lines): """Chose an attribution between several possible ones. :param annotated_lines: A list of tuples ((file_id, rev_id), line) where the lines are identical but the revids different while no parent relation exist between them :return : The "winning" line. This must be one with a revid that guarantees that further criss-cross merges will converge. Failing to do so have performance implications. """ # sort lexicographically so that we always get a stable result. # TODO: while 'sort' is the easiest (and nearly the only possible solution) # with the current implementation, chosing the oldest revision is known to # provide better results (as in matching user expectations). The most # common use case being manual cherry-pick from an already existing # revision. return sorted(annotated_lines)[0] def _find_matching_unannotated_lines(output_lines, plain_child_lines, child_lines, start_child, end_child, right_lines, start_right, end_right, heads_provider, revision_id): """Find lines in plain_right_lines that match the existing lines. :param output_lines: Append final annotated lines to this list :param plain_child_lines: The unannotated new lines for the child text :param child_lines: Lines for the child text which have been annotated for the left parent :param start_child: Position in plain_child_lines and child_lines to start the match searching :param end_child: Last position in plain_child_lines and child_lines to search for a match :param right_lines: The annotated lines for the whole text for the right parent :param start_right: Position in right_lines to start the match :param end_right: Last position in right_lines to search for a match :param heads_provider: When parents disagree on the lineage of a line, we need to check if one side supersedes the other :param revision_id: The label to give if a line should be labeled 'tip' """ output_extend = output_lines.extend output_append = output_lines.append # We need to see if any of the unannotated lines match plain_right_subset = [l for a, l in right_lines[start_right:end_right]] plain_child_subset = plain_child_lines[start_child:end_child] match_blocks = _get_matching_blocks(plain_right_subset, plain_child_subset) last_child_idx = 0 for right_idx, child_idx, match_len in match_blocks: # All the lines that don't match are just passed along if child_idx > last_child_idx: output_extend(child_lines[start_child + last_child_idx: start_child + child_idx]) for offset in range(match_len): left = child_lines[start_child + child_idx + offset] right = right_lines[start_right + right_idx + offset] if left[0] == right[0]: # The annotations match, just return the left one output_append(left) elif left[0] == revision_id: # The left parent marked this as unmatched, so let the # right parent claim it output_append(right) else: # Left and Right both claim this line if heads_provider is None: output_append((revision_id, left[1])) else: heads = heads_provider.heads((left[0], right[0])) if len(heads) == 1: output_append((next(iter(heads)), left[1])) else: # Both claim different origins, get a stable result. # If the result is not stable, there is a risk a # performance degradation as criss-cross merges will # flip-flop the attribution. if _break_annotation_tie is None: output_append( _old_break_annotation_tie([left, right])) else: output_append(_break_annotation_tie([left, right])) last_child_idx = child_idx + match_len def _reannotate_annotated(right_parent_lines, new_lines, new_revision_id, annotated_lines, heads_provider): """Update the annotations for a node based on another parent. :param right_parent_lines: A list of annotated lines for the right-hand parent. :param new_lines: The unannotated new lines. :param new_revision_id: The revision_id to attribute to lines which are not present in either parent. :param annotated_lines: A list of annotated lines. This should be the annotation of new_lines based on parents seen so far. :param heads_provider: When parents disagree on the lineage of a line, we need to check if one side supersedes the other. """ if len(new_lines) != len(annotated_lines): raise AssertionError("mismatched new_lines and annotated_lines") # First compare the newly annotated lines with the right annotated lines. # Lines which were not changed in left or right should match. This tends to # be the bulk of the lines, and they will need no further processing. lines = [] lines_extend = lines.extend # The line just after the last match from the right side last_right_idx = 0 last_left_idx = 0 matching_left_and_right = _get_matching_blocks(right_parent_lines, annotated_lines) for right_idx, left_idx, match_len in matching_left_and_right: # annotated lines from last_left_idx to left_idx did not match the # lines from last_right_idx to right_idx, the raw lines should be # compared to determine what annotations need to be updated if last_right_idx == right_idx or last_left_idx == left_idx: # One of the sides is empty, so this is a pure insertion lines_extend(annotated_lines[last_left_idx:left_idx]) else: # We need to see if any of the unannotated lines match _find_matching_unannotated_lines(lines, new_lines, annotated_lines, last_left_idx, left_idx, right_parent_lines, last_right_idx, right_idx, heads_provider, new_revision_id) last_right_idx = right_idx + match_len last_left_idx = left_idx + match_len # If left and right agree on a range, just push that into the output lines_extend(annotated_lines[left_idx:left_idx + match_len]) return lines try: from breezy._annotator_pyx import Annotator except ImportError as e: osutils.failed_to_load_extension(e) from breezy._annotator_py import Annotator # noqa: F401 ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/archive/0000755000000000000000000000000000000000000013747 5ustar00././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/atomicfile.py0000644000000000000000000000764400000000000015027 0ustar00# Copyright (C) 2005, 2006, 2008, 2009, 2010 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import os import stat from breezy import ( errors, osutils, ) # not forksafe - but we dont fork. _pid = os.getpid() _hostname = None class AtomicFileAlreadyClosed(errors.PathError): _fmt = ('"%(function)s" called on an AtomicFile after it was closed:' ' "%(path)s"') def __init__(self, path, function): errors.PathError.__init__(self, path=path, extra=None) self.function = function class AtomicFile(object): """A file that does an atomic-rename to move into place. This also causes hardlinks to break when it's written out. Open this as for a regular file, then use commit() to move into place or abort() to cancel. """ __slots__ = ['tmpfilename', 'realfilename', '_fd'] def __init__(self, filename, mode='wb', new_mode=None): global _hostname self._fd = None if _hostname is None: _hostname = osutils.get_host_name() self.tmpfilename = '%s.%d.%s.%s.tmp' % (filename, _pid, _hostname, osutils.rand_chars(10)) self.realfilename = filename flags = os.O_EXCL | os.O_CREAT | os.O_WRONLY | osutils.O_NOINHERIT if mode == 'wb': flags |= osutils.O_BINARY elif mode != 'wt': raise ValueError("invalid AtomicFile mode %r" % mode) if new_mode is not None: local_mode = new_mode else: local_mode = 0o666 # Use a low level fd operation to avoid chmodding later. # This may not succeed, but it should help most of the time self._fd = os.open(self.tmpfilename, flags, local_mode) if new_mode is not None: # Because of umask issues, we may need to chmod anyway # the common case is that we won't, though. st = os.fstat(self._fd) if stat.S_IMODE(st.st_mode) != new_mode: osutils.chmod_if_possible(self.tmpfilename, new_mode) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self.realfilename) def write(self, data): """Write some data to the file. Like file.write()""" os.write(self._fd, data) def _close_tmpfile(self, func_name): """Close the local temp file in preparation for commit or abort""" if self._fd is None: raise AtomicFileAlreadyClosed(path=self.realfilename, function=func_name) fd = self._fd self._fd = None os.close(fd) def commit(self): """Close the file and move to final name.""" self._close_tmpfile('commit') osutils.rename(self.tmpfilename, self.realfilename) def abort(self): """Discard temporary file without committing changes.""" self._close_tmpfile('abort') os.remove(self.tmpfilename) def close(self): """Discard the file unless already committed.""" if self._fd is not None: self.abort() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type: self.abort() return False self.commit() ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/bedding.py0000644000000000000000000002261700000000000014304 0ustar00# Copyright (C) 2005-2014, 2016 Canonical Ltd # Copyright (C) 2019 Breezy developers # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Functions for deriving user configuration from system environment.""" import os import sys from .lazy_import import lazy_import lazy_import(globals(), """ from breezy import ( osutils, trace, win32utils, ) """) from . import ( errors, ) def ensure_config_dir_exists(path=None): """Make sure a configuration directory exists. This makes sure that the directory exists. On windows, since configuration directories are 2 levels deep, it makes sure both the directory and the parent directory exists. """ if path is None: path = config_dir() if not os.path.isdir(path): parent_dir = os.path.dirname(path) if not os.path.isdir(parent_dir): trace.mutter( 'creating config parent directory: %r', parent_dir) os.mkdir(parent_dir) osutils.copy_ownership_from_path(parent_dir) trace.mutter('creating config directory: %r', path) os.mkdir(path) osutils.copy_ownership_from_path(path) def bazaar_config_dir(): """Return per-user configuration directory as unicode string By default this is %APPDATA%/bazaar/2.0 on Windows, ~/.bazaar on Mac OS X and Linux. On Mac OS X and Linux, if there is a $XDG_CONFIG_HOME/bazaar directory, that will be used instead TODO: Global option --config-dir to override this. """ base = os.environ.get('BZR_HOME') if sys.platform == 'win32': if base is None: base = win32utils.get_appdata_location() if base is None: base = win32utils.get_home_location() return osutils.pathjoin(base, 'bazaar', '2.0') if base is None: xdg_dir = os.environ.get('XDG_CONFIG_HOME') if xdg_dir is None: xdg_dir = osutils.pathjoin(osutils._get_home_dir(), ".config") xdg_dir = osutils.pathjoin(xdg_dir, 'bazaar') if osutils.isdir(xdg_dir): trace.mutter( "Using configuration in XDG directory %s." % xdg_dir) return xdg_dir base = osutils._get_home_dir() return osutils.pathjoin(base, ".bazaar") def _config_dir(): """Return per-user configuration directory as unicode string By default this is %APPDATA%/breezy on Windows, $XDG_CONFIG_HOME/breezy on Mac OS X and Linux. If the breezy config directory doesn't exist but the bazaar one (see bazaar_config_dir()) does, use that instead. """ # TODO: Global option --config-dir to override this. base = os.environ.get('BRZ_HOME') if sys.platform == 'win32': if base is None: base = win32utils.get_appdata_location() if base is None: # Assume that AppData location is ALWAYS DEFINED, # and don't look for %HOME%, as we aren't sure about # where the files should be stored in %HOME%: # on other platforms the directory is ~/.config/, # but that would be incompatible with older Bazaar versions. raise RuntimeError('Unable to determine AppData location') if base is None: base = os.environ.get('XDG_CONFIG_HOME') if base is None: base = osutils.pathjoin(osutils._get_home_dir(), ".config") breezy_dir = osutils.pathjoin(base, 'breezy') if osutils.isdir(breezy_dir): return (breezy_dir, 'breezy') # If the breezy directory doesn't exist, but the bazaar one does, use that: bazaar_dir = bazaar_config_dir() if osutils.isdir(bazaar_dir): trace.mutter( "Using Bazaar configuration directory (%s)", bazaar_dir) return (bazaar_dir, 'bazaar') return (breezy_dir, 'breezy') def config_dir(): """Return per-user configuration directory as unicode string By default this is %APPDATA%/breezy on Windows, $XDG_CONFIG_HOME/breezy on Mac OS X and Linux. If the breezy config directory doesn't exist but the bazaar one (see bazaar_config_dir()) does, use that instead. """ return _config_dir()[0] def config_path(): """Return per-user configuration ini file filename.""" path, kind = _config_dir() if kind == 'bazaar': return osutils.pathjoin(path, 'bazaar.conf') else: return osutils.pathjoin(path, 'breezy.conf') def locations_config_path(): """Return per-user configuration ini file filename.""" return osutils.pathjoin(config_dir(), 'locations.conf') def authentication_config_path(): """Return per-user authentication ini file filename.""" return osutils.pathjoin(config_dir(), 'authentication.conf') def user_ignore_config_path(): """Return per-user authentication ini file filename.""" return osutils.pathjoin(config_dir(), 'ignore') def crash_dir(): """Return the directory name to store crash files. This doesn't implicitly create it. On Windows it's in the config directory; elsewhere it's /var/crash which may be monitored by apport. It can be overridden by $APPORT_CRASH_DIR. """ if sys.platform == 'win32': return osutils.pathjoin(config_dir(), 'Crash') else: # XXX: hardcoded in apport_python_hook.py; therefore here too -- mbp # 2010-01-31 return os.environ.get('APPORT_CRASH_DIR', '/var/crash') def cache_dir(): """Return the cache directory to use.""" base = os.environ.get('BRZ_HOME') if sys.platform in "win32": if base is None: base = win32utils.get_local_appdata_location() if base is None: base = win32utils.get_home_location() else: base = os.environ.get('XDG_CACHE_HOME') if base is None: base = osutils.pathjoin(osutils._get_home_dir(), ".cache") cache_dir = osutils.pathjoin(base, "breezy") # GZ 2019-06-15: Move responsibility for ensuring dir exists elsewhere? if not os.path.exists(cache_dir): os.makedirs(cache_dir) return cache_dir def _get_default_mail_domain(mailname_file='/etc/mailname'): """If possible, return the assumed default email domain. :returns: string mail domain, or None. """ if sys.platform == 'win32': # No implementation yet; patches welcome return None try: f = open(mailname_file) except (IOError, OSError): return None try: domain = f.readline().strip() return domain finally: f.close() def default_email(): v = os.environ.get('BRZ_EMAIL') if v: return v v = os.environ.get('EMAIL') if v: return v name, email = _auto_user_id() if name and email: return u'%s <%s>' % (name, email) elif email: return email raise errors.NoWhoami() def _auto_user_id(): """Calculate automatic user identification. :returns: (realname, email), either of which may be None if they can't be determined. Only used when none is set in the environment or the id file. This only returns an email address if we can be fairly sure the address is reasonable, ie if /etc/mailname is set on unix. This doesn't use the FQDN as the default domain because that may be slow, and it doesn't use the hostname alone because that's not normally a reasonable address. """ if sys.platform == 'win32': # No implementation to reliably determine Windows default mail # address; please add one. return None, None default_mail_domain = _get_default_mail_domain() if not default_mail_domain: return None, None import pwd uid = os.getuid() try: w = pwd.getpwuid(uid) except KeyError: trace.mutter('no passwd entry for uid %d?' % uid) return None, None # we try utf-8 first, because on many variants (like Linux), # /etc/passwd "should" be in utf-8, and because it's unlikely to give # false positives. (many users will have their user encoding set to # latin-1, which cannot raise UnicodeError.) gecos = w.pw_gecos if isinstance(gecos, bytes): try: gecos = gecos.decode('utf-8') encoding = 'utf-8' except UnicodeError: try: encoding = osutils.get_user_encoding() gecos = gecos.decode(encoding) except UnicodeError: trace.mutter("cannot decode passwd entry %s" % w) return None, None username = w.pw_name if isinstance(username, bytes): try: username = username.decode(encoding) except UnicodeError: trace.mutter("cannot decode passwd entry %s" % w) return None, None comma = gecos.find(',') if comma == -1: realname = gecos else: realname = gecos[:comma] return realname, (username + '@' + default_mail_domain) ././@PaxHeader0000000000000000000000000000002700000000000010214 xustar0023 mtime=1643136023.06 breezy-3.2.1+bzr7585/breezy/bisect.py0000644000000000000000000004031400000000000014153 0ustar00# Copyright (C) 2006-2011 Canonical Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """bisect command implementations.""" import sys from .controldir import ControlDir from . import revision as _mod_revision from .commands import Command from .errors import CommandError from .option import Option from .trace import note BISECT_INFO_PATH = "bisect" BISECT_REV_PATH = "bisect_revid" class BisectCurrent(object): """Bisect class for managing the current revision.""" def __init__(self, controldir, filename=BISECT_REV_PATH): self._filename = filename self._controldir = controldir self._branch = self._controldir.open_branch() if self._controldir.control_transport.has(filename): self._revid = self._controldir.control_transport.get_bytes( filename).strip() else: self._revid = self._branch.last_revision() def _save(self): """Save the current revision.""" self._controldir.control_transport.put_bytes( self._filename, self._revid + b"\n") def get_current_revid(self): """Return the current revision id.""" return self._revid def get_current_revno(self): """Return the current revision number as a tuple.""" return self._branch.revision_id_to_dotted_revno(self._revid) def get_parent_revids(self): """Return the IDs of the current revision's predecessors.""" repo = self._branch.repository with repo.lock_read(): retval = repo.get_parent_map([self._revid]).get(self._revid, None) return retval def is_merge_point(self): """Is the current revision a merge point?""" return len(self.get_parent_revids()) > 1 def show_rev_log(self, outf): """Write the current revision's log entry to a file.""" rev = self._branch.repository.get_revision(self._revid) revno = ".".join([str(x) for x in self.get_current_revno()]) outf.write("On revision %s (%s):\n%s\n" % (revno, rev.revision_id, rev.message)) def switch(self, revid): """Switch the current revision to the given revid.""" working = self._controldir.open_workingtree() if isinstance(revid, int): revid = self._branch.get_rev_id(revid) elif isinstance(revid, list): revid = revid[0].in_history(working.branch).rev_id working.revert(None, working.branch.repository.revision_tree(revid), False) self._revid = revid self._save() def reset(self): """Revert bisection, setting the working tree to normal.""" working = self._controldir.open_workingtree() last_rev = working.branch.last_revision() rev_tree = working.branch.repository.revision_tree(last_rev) working.revert(None, rev_tree, False) if self._controldir.control_transport.has(BISECT_REV_PATH): self._controldir.control_transport.delete(BISECT_REV_PATH) class BisectLog(object): """Bisect log file handler.""" def __init__(self, controldir, filename=BISECT_INFO_PATH): self._items = [] self._current = BisectCurrent(controldir) self._controldir = controldir self._branch = None self._high_revid = None self._low_revid = None self._middle_revid = None self._filename = filename self.load() def _open_for_read(self): """Open log file for reading.""" if self._filename: return self._controldir.control_transport.get(self._filename) else: return sys.stdin def _load_tree(self): """Load bzr information.""" if not self._branch: self._branch = self._controldir.open_branch() def _find_range_and_middle(self, branch_last_rev=None): """Find the current revision range, and the midpoint.""" self._load_tree() self._middle_revid = None if not branch_last_rev: last_revid = self._branch.last_revision() else: last_revid = branch_last_rev repo = self._branch.repository with repo.lock_read(): graph = repo.get_graph() rev_sequence = graph.iter_lefthand_ancestry( last_revid, (_mod_revision.NULL_REVISION,)) high_revid = None low_revid = None between_revs = [] for revision in rev_sequence: between_revs.insert(0, revision) matches = [x[1] for x in self._items if x[0] == revision and x[1] in ('yes', 'no')] if not matches: continue if len(matches) > 1: raise RuntimeError("revision %s duplicated" % revision) if matches[0] == "yes": high_revid = revision between_revs = [] elif matches[0] == "no": low_revid = revision del between_revs[0] break if not high_revid: high_revid = last_revid if not low_revid: low_revid = self._branch.get_rev_id(1) # The spread must include the high revision, to bias # odd numbers of intervening revisions towards the high # side. spread = len(between_revs) + 1 if spread < 2: middle_index = 0 else: middle_index = (spread // 2) - 1 if len(between_revs) > 0: self._middle_revid = between_revs[middle_index] else: self._middle_revid = high_revid self._high_revid = high_revid self._low_revid = low_revid def _switch_wc_to_revno(self, revno, outf): """Move the working tree to the given revno.""" self._current.switch(revno) self._current.show_rev_log(outf=outf) def _set_status(self, revid, status): """Set the bisect status for the given revid.""" if not self.is_done(): if status != "done" and revid in [x[0] for x in self._items if x[1] in ['yes', 'no']]: raise RuntimeError("attempting to add revid %s twice" % revid) self._items.append((revid, status)) def change_file_name(self, filename): """Switch log files.""" self._filename = filename def load(self): """Load the bisection log.""" self._items = [] if self._controldir.control_transport.has(self._filename): revlog = self._open_for_read() for line in revlog: (revid, status) = line.split() self._items.append((revid, status.decode('ascii'))) def save(self): """Save the bisection log.""" contents = b''.join( (b"%s %s\n" % (revid, status.encode('ascii'))) for (revid, status) in self._items) if self._filename: self._controldir.control_transport.put_bytes( self._filename, contents) else: sys.stdout.write(contents) def is_done(self): """Report whether we've found the right revision.""" return len(self._items) > 0 and self._items[-1][1] == "done" def set_status_from_revspec(self, revspec, status): """Set the bisection status for the revision in revspec.""" self._load_tree() revid = revspec[0].in_history(self._branch).rev_id self._set_status(revid, status) def set_current(self, status): """Set the current revision to the given bisection status.""" self._set_status(self._current.get_current_revid(), status) def is_merge_point(self, revid): return len(self.get_parent_revids(revid)) > 1 def get_parent_revids(self, revid): repo = self._branch.repository with repo.lock_read(): retval = repo.get_parent_map([revid]).get(revid, None) return retval def bisect(self, outf): """Using the current revision's status, do a bisection.""" self._find_range_and_middle() # If we've found the "final" revision, check for a # merge point. while ((self._middle_revid == self._high_revid or self._middle_revid == self._low_revid) and self.is_merge_point(self._middle_revid)): for parent in self.get_parent_revids(self._middle_revid): if parent == self._low_revid: continue else: self._find_range_and_middle(parent) break self._switch_wc_to_revno(self._middle_revid, outf) if self._middle_revid == self._high_revid or \ self._middle_revid == self._low_revid: self.set_current("done") class cmd_bisect(Command): """Find an interesting commit using a binary search. Bisecting, in a nutshell, is a way to find the commit at which some testable change was made, such as the introduction of a bug or feature. By identifying a version which did not have the interesting change and a later version which did, a developer can test for the presence of the change at various points in the history, eventually ending up at the precise commit when the change was first introduced. This command uses subcommands to implement the search, each of which changes the state of the bisection. The subcommands are: brz bisect start Start a bisect, possibly clearing out a previous bisect. brz bisect yes [-r rev] The specified revision (or the current revision, if not given) has the characteristic we're looking for, brz bisect no [-r rev] The specified revision (or the current revision, if not given) does not have the characteristic we're looking for, brz bisect move -r rev Switch to a different revision manually. Use if the bisect algorithm chooses a revision that is not suitable. Try to move as little as possible. brz bisect reset Clear out a bisection in progress. brz bisect log [-o file] Output a log of the current bisection to standard output, or to the specified file. brz bisect replay Replay a previously-saved bisect log, forgetting any bisection that might be in progress. brz bisect run