diff --git a/.travis.yml b/.travis.yml index 57a12dca..e8214ae8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,36 +1,38 @@ language: python sudo: false +cache: pip + +python: + - 2.7 + - 3.3 + - 3.4 + - 3.5 + - 3.5-dev + - 3.6 + - 3.6-dev + - 3.7-dev + - pypy3.3-5.2-alpha1 + env: - global: TEST_RUNNER=unittest PYTHONHASHSEED=random + - PYTHONHASHSEED=random + TEST_REQUIRE="gevent greenlet geventhttpclient fastimport" + matrix: include: - - python: "2.7" - env: TEST_REQUIRE="gevent greenlet geventhttpclient fastimport" - - python: "pypy" - env: TEST_REQUIRE="fastimport" - - python: "3.4" - env: TEST_REQUIRE="gevent greenlet geventhttpclient fastimport" - - python: "3.5" - env: TEST_REQUIRE="gevent greenlet geventhttpclient fastimport" - - python: "3.5-dev" - env: TEST_REQUIRE="gevent greenlet geventhttpclient fastimport" - - python: "3.6" - env: TEST_REQUIRE="gevent greenlet geventhttpclient fastimport" - - python: "3.6-dev" - env: TEST_REQUIRE="gevent greenlet geventhttpclient fastimport" - - python: "3.7-dev" - env: TEST_REQUIRE="gevent greenlet geventhttpclient fastimport" -cache: - directories: - - $HOME/.cache/pip -script: - - pip install pip --upgrade - - pip install $TEST_REQUIRE + - python: pypy + env: TEST_REQUIRE=fastimport + +install: + - travis_retry pip install -U pip coverage codecov $TEST_REQUIRE +script: # Test without c extensions - - python -m $TEST_RUNNER dulwich.tests.test_suite + - python -m coverage run -p --source=dulwich -m unittest dulwich.tests.test_suite # Test with c extensions - python setup.py build_ext -i - - python -m $TEST_RUNNER dulwich.tests.test_suite + - python -m coverage run -p --source=dulwich -m unittest dulwich.tests.test_suite +after_success: + - python -m coverage combine + - codecov diff --git a/NEWS b/NEWS index 794ddb52..3d416eb8 100644 --- a/NEWS +++ b/NEWS @@ -1,1503 +1,1524 @@ +0.17.3 2017-03-20 + + PLATFORM SUPPORT + + * List Python 3.3 as supported. (Jelmer Vernooij, #513) + + BUG FIXES + + * Fix compatibility with pypy 3. (Jelmer Vernooij) + +0.17.2 2017-03-19 + + BUG FIXES + + * Add workaround for + https://bitbucket.org/pypy/pypy/issues/2499/cpyext-pystring_asstring-doesnt-work, + fixing Dulwich when used with C extensions on pypy < 5.6. (Victor Stinner) + + * Properly quote config values with a '#' character in them. + (Jelmer Vernooij, #511) + 0.17.1 2017-03-01 IMPROVEMENTS * Add basic 'dulwich pull' command. (Jelmer Vernooij) BUG FIXES * Cope with existing submodules during pull. (Jelmer Vernooij, #505) 0.17.0 2017-03-01 TEST FIXES * Skip test that requires sync to synchronize filesystems if os.sync is not available. (Koen Martens) IMPROVEMENTS * Implement MemoryRepo.{set_description,get_description}. (Jelmer Vernooij) * Raise exception in Repo.stage() when absolute paths are passed in. Allow passing in relative paths to porcelain.add().(Jelmer Vernooij) BUG FIXES * Handle multi-line quoted values in config files. (Jelmer Vernooij, #495) * Allow porcelain.clone of repository without HEAD. (Jelmer Vernooij, #501) * Support passing tag ids to Walker()'s include argument. (Jelmer Vernooij) * Don't strip trailing newlines from extra headers. (Nicolas Dandrimont) * Set bufsize=0 for subprocess interaction with SSH client. Fixes hangs on Python 3. (René Stern, #434) * Don't drop first slash for SSH paths, except for those starting with "~". (Jelmer Vernooij, René Stern, #463) * Properly log off after retrieving just refs. (Jelmer Vernooij) 0.16.3 2016-01-14 TEST FIXES * Remove racy check that relies on clock time changing between writes. (Jelmer Vernooij) IMPROVEMENTS * Add porcelain.remote_add. (Jelmer Vernooij) 0.16.2 2016-01-14 IMPROVEMENTS * Fixed failing test-cases on windows. (Koen Martens) API CHANGES * Repo is now a context manager, so that it can be easily closed using a ``with`` statement. (Søren Løvborg) TEST FIXES * Only run worktree list compat tests against git 2.7.0, when 'git worktree list' was introduced. (Jelmer Vernooij) BUG FIXES * Ignore filemode when building index when core.filemode is false. (Koen Martens) * Initialize core.filemode configuration setting by probing the filesystem for trustable permissions. (Koen Martens) * Fix ``porcelain.reset`` to respect the comittish argument. (Koen Martens) * Fix dulwich.porcelain.ls_remote() on Python 3. (#471, Jelmer Vernooij) * Allow both unicode and byte strings for host paths in dulwich.client. (#435, Jelmer Vernooij) * Add remote from porcelain.clone. (#466, Jelmer Vernooij) * Fix unquoting of credentials before passing to urllib2. (#475, Volodymyr Holovko) * Cope with submodules in `build_index_from_tree`. (#477, Jelmer Vernooij) * Handle deleted files in `get_unstaged_changes`. (#483, Doug Hellmann) * Don't overwrite files when they haven't changed in `build_file_from_blob`. (#479, Benoît HERVIER) * Check for existence of index file before opening pack. Fixes a race when new packs are being added. (#482, wme) 0.16.1 2016-12-25 BUG FIXES * Fix python3 compatibility for dulwich.contrib.release_robot. (Jelmer Vernooij) 0.16.0 2016-12-24 IMPROVEMENTS * Add support for worktrees. See `git-worktree(1)` and `gitrepository-layout(5)`. (Laurent Rineau) * Add support for `commondir` file in Git control directories. (Laurent Rineau) * Add support for passwords in HTTP URLs. (Jon Bain, Mika Mäenpää) * Add `release_robot` script to contrib, allowing easy finding of current version based on Git tags. (Mark Mikofski) * Add ``Blob.splitlines`` method. (Jelmer Vernooij) BUG FIXES * Fix handling of ``Commit.tree`` being set to an actual tree object rather than a tree id. (Jelmer Vernooij) * Return remote refs from LocalGitClient.fetch_pack(), consistent with the documentation for that method. (#461, Jelmer Vernooij) * Fix handling of unknown URL schemes in get_transport_and_path. (#465, Jelmer Vernooij) 0.15.0 2016-10-09 BUG FIXES * Allow missing trailing LF when reading service name from HTTP servers. (Jelmer Vernooij, Andrew Shadura, #442) * Fix dulwich.porcelain.pull() on Python3. (Jelmer Vernooij, #451) * Properly pull in tags during dulwich.porcelain.clone. (Jelmer Vernooij, #408) CHANGES * Changed license from "GNU General Public License, version 2.0 or later" to "Apache License, version 2.0 or later or GNU General Public License, version 2.0 or later". (#153) IMPROVEMENTS * Add ``dulwich.porcelain.ls_tree`` implementation. (Jelmer Vernooij) 0.14.1 2016-07-05 BUG FIXES * Fix regression removing untouched refs when pushing over SSH. (Jelmer Vernooij #441) * Skip Python3 tests for SWIFT contrib module, as it has not yet been ported. 0.14.0 2016-07-03 BUG FIXES * Fix ShaFile.id after modification of a copied ShaFile. (Félix Mattrat, Jelmer Vernooij) * Support removing refs from porcelain.push. (Jelmer Vernooij, #437) * Stop magic protocol ref `capabilities^{}` from leaking out to clients. (Jelmer Vernooij, #254) IMPROVEMENTS * Add `dulwich.config.parse_submodules` function. * Add `RefsContainer.follow` method. (#438) 0.13.0 2016-04-24 IMPROVEMENTS * Support `ssh://` URLs in get_transport_and_path_from_url(). (Jelmer Vernooij, #402) * Support missing empty line after headers in Git commits and tags. (Nicolas Dandrimont, #413) * Fix `dulwich.porcelain.status` when used in empty trees. (Jelmer Vernooij, #415) * Return copies of objects in MemoryObjectStore rather than references, making the behaviour more consistent with that of DiskObjectStore. (Félix Mattrat, Jelmer Vernooij) * Fix ``dulwich.web`` on Python3. (#295, Jonas Haag) CHANGES * Drop support for Python 2.6. * Fix python3 client web support. (Jelmer Vernooij) BUG FIXES * Fix hang on Gzip decompression. (Jonas Haag) * Don't rely on working tell() and seek() methods on wsgi.input. (Jonas Haag) * Support fastexport/fastimport functionality on python3 with newer versions of fastimport (>= 0.9.5). (Jelmer Vernooij, Félix Mattrat) 0.12.0 2015-12-13 IMPROVEMENTS * Add a `dulwich.archive` module that can create tarballs. Based on code from Jonas Haag in klaus. * Add a `dulwich.reflog` module for reading and writing reflogs. (Jelmer Vernooij) * Fix handling of ambiguous refs in `parse_ref` to make it match the behaviour described in https://git-scm.com/docs/gitrevisions. (Chris Bunney) * Support Python3 in C modules. (Lele Gaifax) BUG FIXES * Simplify handling of SSH command invocation. Fixes quoting of paths. Thanks, Thomas Liebetraut. (#384) * Fix inconsistent handling of trailing slashes for DictRefsContainer. (#383) * Add hack to support thin packs duing fetch(), albeit while requiring the entire pack file to be loaded into memory. (jsbain) CHANGES * This will be the last release to support Python 2.6. 0.11.2 2015-09-18 IMPROVEMENTS * Add support for agent= capability. (Jelmer Vernooij, #298) * Add support for quiet capability. (Jelmer Vernooij) CHANGES * The ParamikoSSHVendor class has been moved to * dulwich.contrib.paramiko_vendor, as it's currently untested. (Jelmer Vernooij, #364) 0.11.1 2015-09-13 Fix-up release to exclude broken blame.py file. 0.11.0 2015-09-13 IMPROVEMENTS * Extended Python3 support to most of the codebase. (Gary van der Merwe, Jelmer Vernooij) * The `Repo` object has a new `close` method that can be called to close any open resources. (Gary van der Merwe) * Support 'git.bat' in SubprocessGitClient on Windows. (Stefan Zimmermann) * Advertise 'ofs-delta' capability in receive-pack server side capabilities. (Jelmer Vernooij) * Switched `default_local_git_client_cls` to `LocalGitClient`. (Gary van der Merwe) * Add `porcelain.ls_remote` and `GitClient.get_refs`. (Michael Edgar) * Add `Repo.discover` method. (B. M. Corser) * Add `dulwich.objectspec.parse_refspec`. (Jelmer Vernooij) * Add `porcelain.pack_objects` and `porcelain.repack`. (Jelmer Vernooij) BUG FIXES * Fix handling of 'done' in graph walker and implement the 'no-done' capability. (Tommy Yu, #88) * Avoid recursion limit issues resolving deltas. (William Grant, #81) * Allow arguments in local client binary path overrides. (Jelmer Vernooij) * Fix handling of commands with arguments in paramiko SSH client. (Andreas Klöckner, Jelmer Vernooij, #363) * Fix parsing of quoted strings in configs. (Jelmer Vernooij, #305) 0.10.1 2015-03-25 BUG FIXES * Return `ApplyDeltaError` when encountering delta errors in both C extensions and native delta application code. (Jelmer Vernooij, #259) 0.10.0 2015-03-22 BUG FIXES * In dulwich.index.build_index_from_tree, by default refuse to create entries that start with .git/. * Fix running of testsuite when installed. (Jelmer Vernooij, #223) * Use a block cache in _find_content_rename_candidates(), improving performance. (Mike Williams) * Add support for ``core.protectNTFS`` setting. (Jelmer Vernooij) * Fix TypeError when fetching empty updates. (Hwee Miin Koh) * Resolve delta refs when pulling into a MemoryRepo. (Max Shawabkeh, #256) * Fix handling of tags of non-commits in missing object finder. (Augie Fackler, #211) * Explicitly disable mmap on plan9 where it doesn't work. (Jeff Sickel) IMPROVEMENTS * New public method `Repo.reset_index`. (Jelmer Vernooij) * Prevent duplicate parsing of loose files in objects directory when reading. Thanks to David Keijser for the report. (Jelmer Vernooij, #231) 0.9.9 2015-03-20 SECURITY BUG FIXES * Fix buffer overflow in C implementation of pack apply_delta(). (CVE-2015-0838) Thanks to Ivan Fratric of the Google Security Team for reporting this issue. (Jelmer Vernooij) 0.9.8 2014-11-30 BUG FIXES * Various fixes to improve test suite running on Windows. (Gary van der Merwe) * Limit delta copy length to 64K in v2 pack files. (Robert Brown) * Strip newline from final ACKed SHA while fetching packs. (Michael Edgar) * Remove assignment to PyList_SIZE() that was causing segfaults on pypy. (Jelmer Vernooij, #196) IMPROVEMENTS * Add porcelain 'receive-pack' and 'upload-pack'. (Jelmer Vernooij) * Handle SIGINT signals in bin/dulwich. (Jelmer Vernooij) * Add 'status' support to bin/dulwich. (Jelmer Vernooij) * Add 'branch_create', 'branch_list', 'branch_delete' porcelain. (Jelmer Vernooij) * Add 'fetch' porcelain. (Jelmer Vernooij) * Add 'tag_delete' porcelain. (Jelmer Vernooij) * Add support for serializing/deserializing 'gpgsig' attributes in Commit. (Jelmer Vernooij) CHANGES * dul-web is now available as 'dulwich web-daemon'. (Jelmer Vernooij) * dulwich.porcelain.tag has been renamed to tag_create. dulwich.porcelain.list_tags has been renamed to tag_list. (Jelmer Vernooij) API CHANGES * Restore support for Python 2.6. (Jelmer Vernooij, Gary van der Merwe) 0.9.7 2014-06-08 BUG FIXES * Fix tests dependent on hash ordering. (Michael Edgar) * Support staging symbolic links in Repo.stage. (Robert Brown) * Ensure that all files object are closed when running the test suite. (Gary van der Merwe) * When writing OFS_DELTA pack entries, write correct offset. (Augie Fackler) * Fix handler of larger copy operations in packs. (Augie Fackler) * Various fixes to improve test suite running on Windows. (Gary van der Merwe) * Fix logic for extra adds of identical files in rename detector. (Robert Brown) IMPROVEMENTS * Add porcelain 'status'. (Ryan Faulkner) * Add porcelain 'daemon'. (Jelmer Vernooij) * Add `dulwich.greenthreads` module which provides support for concurrency of some object store operations. (Fabien Boucher) * Various changes to improve compatibility with Python 3. (Gary van der Merwe, Hannu Valtonen, michael-k) * Add OpenStack Swift backed repository implementation in dulwich.contrib. See README.swift for details. (Fabien Boucher) API CHANGES * An optional close function can be passed to the Protocol class. This will be called by its close method. (Gary van der Merwe) * All classes with close methods are now context managers, so that they can be easily closed using a `with` statement. (Gary van der Merwe) * Remove deprecated `num_objects` argument to `write_pack` methods. (Jelmer Vernooij) OTHER CHANGES * The 'dul-daemon' script has been removed. The same functionality is now available as 'dulwich daemon'. (Jelmer Vernooij) 0.9.6 2014-04-23 IMPROVEMENTS * Add support for recursive add in 'git add'. (Ryan Faulkner, Jelmer Vernooij) * Add porcelain 'list_tags'. (Ryan Faulkner) * Add porcelain 'push'. (Ryan Faulkner) * Add porcelain 'pull'. (Ryan Faulkner) * Support 'http.proxy' in HttpGitClient. (Jelmer Vernooij, #1096030) * Support 'http.useragent' in HttpGitClient. (Jelmer Vernooij) * In server, wait for clients to send empty list of wants when talking to empty repository. (Damien Tournoud) * Various changes to improve compatibility with Python 3. (Gary van der Merwe) BUG FIXES * Support unseekable 'wsgi.input' streams. (Jonas Haag) * Raise TypeError when passing unicode() object to Repo.__getitem__. (Jonas Haag) * Fix handling of `reset` command in dulwich.fastexport. (Jelmer Vernooij, #1249029) * In client, don't wait for server to close connection first. Fixes hang when used against GitHub server implementation. (Siddharth Agarwal) * DeltaChainIterator: fix a corner case where an object is inflated as an object already in the repository. (Damien Tournoud, #135) * Stop leaking file handles during pack reload. (Damien Tournoud) * Avoid reopening packs during pack cache reload. (Jelmer Vernooij) API CHANGES * Drop support for Python 2.6. (Jelmer Vernooij) 0.9.5 2014-02-23 IMPROVEMENTS * Add porcelain 'tag'. (Ryan Faulkner) * New module `dulwich.objectspec` for parsing strings referencing objects and commit ranges. (Jelmer Vernooij) * Add shallow branch support. (milki) * Allow passing urllib2 `opener` into HttpGitClient. (Dov Feldstern, #909037) CHANGES * Drop support for Python 2.4 and 2.5. (Jelmer Vernooij) API CHANGES * Remove long deprecated ``Repo.commit``, ``Repo.get_blob``, ``Repo.tree`` and ``Repo.tag``. (Jelmer Vernooij) * Remove long deprecated ``Repo.revision_history`` and ``Repo.ref``. (Jelmer Vernooij) * Remove long deprecated ``Tree.entries``. (Jelmer Vernooij) BUG FIXES * Raise KeyError rather than TypeError when passing in unicode object of length 20 or 40 to Repo.__getitem__. (Jelmer Vernooij) * Use 'rm' rather than 'unlink' in tests, since the latter does not exist on OpenBSD and other platforms. (Dmitrij D. Czarkoff) 0.9.4 2013-11-30 IMPROVEMENTS * Add ssh_kwargs attribute to ParamikoSSHVendor. (milki) * Add Repo.set_description(). (Víðir Valberg Guðmundsson) * Add a basic `dulwich.porcelain` module. (Jelmer Vernooij, Marcin Kuzminski) * Various performance improvements for object access. (Jelmer Vernooij) * New function `get_transport_and_path_from_url`, similar to `get_transport_and_path` but only supports URLs. (Jelmer Vernooij) * Add support for file:// URLs in `get_transport_and_path_from_url`. (Jelmer Vernooij) * Add LocalGitClient implementation. (Jelmer Vernooij) BUG FIXES * Support filesystems with 64bit inode and device numbers. (André Roth) CHANGES * Ref handling has been moved to dulwich.refs. (Jelmer Vernooij) API CHANGES * Remove long deprecated RefsContainer.set_ref(). (Jelmer Vernooij) * Repo.ref() is now deprecated in favour of Repo.refs[]. (Jelmer Vernooij) FEATURES * Add support for graftpoints. (milki) 0.9.3 2013-09-27 BUG FIXES * Fix path for stdint.h in MANIFEST.in. (Jelmer Vernooij) 0.9.2 2013-09-26 BUG FIXES * Include stdint.h in MANIFEST.in (Mark Mikofski) 0.9.1 2013-09-22 BUG FIXES * Support lookups of 40-character refs in BaseRepo.__getitem__. (Chow Loong Jin, Jelmer Vernooij) * Fix fetching packs with side-band-64k capability disabled. (David Keijser, Jelmer Vernooij) * Several fixes in send-pack protocol behaviour - handling of empty pack files and deletes. (milki, #1063087) * Fix capability negotiation when fetching packs over HTTP. (#1072461, William Grant) * Enforce determine_wants returning an empty list rather than None. (Fabien Boucher, Jelmer Vernooij) * In the server, support pushes just removing refs. (Fabien Boucher, Jelmer Vernooij) IMPROVEMENTS * Support passing a single revision to BaseRepo.get_walker() rather than a list of revisions. (Alberto Ruiz) * Add `Repo.get_description` method. (Jelmer Vernooij) * Support thin packs in Pack.iterobjects() and Pack.get_raw(). (William Grant) * Add `MemoryObjectStore.add_pack` and `MemoryObjectStore.add_thin_pack` methods. (David Bennett) * Add paramiko-based SSH vendor. (Aaron O'Mullan) * Support running 'dulwich.server' and 'dulwich.web' using 'python -m'. (Jelmer Vernooij) * Add ObjectStore.close(). (Jelmer Vernooij) * Raise appropriate NotImplementedError when encountering dumb HTTP servers. (Jelmer Vernooij) API CHANGES * SSHVendor.connect_ssh has been renamed to SSHVendor.run_command. (Jelmer Vernooij) * ObjectStore.add_pack() now returns a 3-tuple. The last element will be an abort() method that can be used to cancel the pack operation. (Jelmer Vernooij) 0.9.0 2013-05-31 BUG FIXES * Push efficiency - report missing objects only. (#562676, Artem Tikhomirov) * Use indentation consistent with C Git in config files. (#1031356, Curt Moore, Jelmer Vernooij) * Recognize and skip binary files in diff function. (Takeshi Kanemoto) * Fix handling of relative paths in dulwich.client.get_transport_and_path. (Brian Visel, #1169368) * Preserve ordering of entries in configuration. (Benjamin Pollack) * Support ~ expansion in SSH client paths. (milki, #1083439) * Support relative paths in alternate paths. (milki, Michel Lespinasse, #1175007) * Log all error messages from wsgiref server to the logging module. This makes the test suit quiet again. (Gary van der Merwe) * Support passing None for empty tree in changes_from_tree. (Kevin Watters) * Support fetching empty repository in client. (milki, #1060462) IMPROVEMENTS: * Add optional honor_filemode flag to build_index_from_tree. (Mark Mikofski) * Support core/filemode setting when building trees. (Jelmer Vernooij) * Add chapter on tags in tutorial. (Ryan Faulkner) FEATURES * Add support for mergetags. (milki, #963525) * Add support for posix shell hooks. (milki) 0.8.7 2012-11-27 BUG FIXES * Fix use of alternates in ``DiskObjectStore``.{__contains__,__iter__}. (Dmitriy) * Fix compatibility with Python 2.4. (David Carr) 0.8.6 2012-11-09 API CHANGES * dulwich.__init__ no longer imports client, protocol, repo and server modules. (Jelmer Vernooij) FEATURES * ConfigDict now behaves more like a dictionary. (Adam 'Cezar' Jenkins, issue #58) * HTTPGitApplication now takes an optional `fallback_app` argument. (Jonas Haag, issue #67) * Support for large pack index files. (Jameson Nash) TESTING * Make index entry tests a little bit less strict, to cope with slightly different behaviour on various platforms. (Jelmer Vernooij) * ``setup.py test`` (available when setuptools is installed) now runs all tests, not just the basic unit tests. (Jelmer Vernooij) BUG FIXES * Commit._deserialize now actually deserializes the current state rather than the previous one. (Yifan Zhang, issue #59) * Handle None elements in lists of TreeChange objects. (Alex Holmes) * Support cloning repositories without HEAD set. (D-Key, Jelmer Vernooij, issue #69) * Support ``MemoryRepo.get_config``. (Jelmer Vernooij) * In ``get_transport_and_path``, pass extra keyword arguments on to HttpGitClient. (Jelmer Vernooij) 0.8.5 2012-03-29 BUG FIXES * Avoid use of 'with' in dulwich.index. (Jelmer Vernooij) * Be a little bit strict about OS behaviour in index tests. Should fix the tests on Debian GNU/kFreeBSD. (Jelmer Vernooij) 0.8.4 2012-03-28 BUG FIXES * Options on the same line as sections in config files are now supported. (Jelmer Vernooij, #920553) * Only negotiate capabilities that are also supported by the server. (Rod Cloutier, Risto Kankkunen) * Fix parsing of invalid timezone offsets with two minus signs. (Jason R. Coombs, #697828) * Reset environment variables during tests, to avoid test isolation leaks reading ~/.gitconfig. (Risto Kankkunen) TESTS * $HOME is now explicitly specified for tests that use it to read ``~/.gitconfig``, to prevent test isolation issues. (Jelmer Vernooij, #920330) FEATURES * Additional arguments to get_transport_and_path are now passed on to the constructor of the transport. (Sam Vilain) * The WSGI server now transparently handles when a git client submits data using Content-Encoding: gzip. (David Blewett, Jelmer Vernooij) * Add dulwich.index.build_index_from_tree(). (milki) 0.8.3 2012-01-21 FEATURES * The config parser now supports the git-config file format as described in git-config(1) and can write git config files. (Jelmer Vernooij, #531092, #768687) * ``Repo.do_commit`` will now use the user identity from .git/config or ~/.gitconfig if none was explicitly specified. (Jelmer Vernooij) BUG FIXES * Allow ``determine_wants`` methods to include the zero sha in their return value. (Jelmer Vernooij) 0.8.2 2011-12-18 BUG FIXES * Cope with different zlib buffer sizes in sha1 file parser. (Jelmer Vernooij) * Fix get_transport_and_path for HTTP/HTTPS URLs. (Bruno Renié) * Avoid calling free_objects() on NULL in error cases. (Chris Eberle) * Fix use --bare argument to 'dulwich init'. (Chris Eberle) * Properly abort connections when the determine_wants function raises an exception. (Jelmer Vernooij, #856769) * Tweak xcodebuild hack to deal with more error output. (Jelmer Vernooij, #903840) FEATURES * Add support for retrieving tarballs from remote servers. (Jelmer Vernooij, #379087) * New method ``update_server_info`` which generates data for dumb server access. (Jelmer Vernooij, #731235) 0.8.1 2011-10-31 FEATURES * Repo.do_commit has a new argument 'ref'. * Repo.do_commit has a new argument 'merge_heads'. (Jelmer Vernooij) * New ``Repo.get_walker`` method. (Jelmer Vernooij) * New ``Repo.clone`` method. (Jelmer Vernooij, #725369) * ``GitClient.send_pack`` now supports the 'side-band-64k' capability. (Jelmer Vernooij) * ``HttpGitClient`` which supports the smart server protocol over HTTP. "dumb" access is not yet supported. (Jelmer Vernooij, #373688) * Add basic support for alternates. (Jelmer Vernooij, #810429) CHANGES * unittest2 or python >= 2.7 is now required for the testsuite. testtools is no longer supported. (Jelmer Vernooij, #830713) BUG FIXES * Fix compilation with older versions of MSVC. (Martin gz) * Special case 'refs/stash' as a valid ref. (Jelmer Vernooij, #695577) * Smart protocol clients can now change refs even if they are not uploading new data. (Jelmer Vernooij, #855993) * Don't compile C extensions when running in pypy. (Ronny Pfannschmidt, #881546) * Use different name for strnlen replacement function to avoid clashing with system strnlen. (Jelmer Vernooij, #880362) API CHANGES * ``Repo.revision_history`` is now deprecated in favor of ``Repo.get_walker``. (Jelmer Vernooij) 0.8.0 2011-08-07 FEATURES * New DeltaChainIterator abstract class for quickly iterating all objects in a pack, with implementations for pack indexing and inflation. (Dave Borowitz) * New walk module with a Walker class for customizable commit walking. (Dave Borowitz) * New tree_changes_for_merge function in diff_tree. (Dave Borowitz) * Easy rename detection in RenameDetector even without find_copies_harder. (Dave Borowitz) BUG FIXES * Avoid storing all objects in memory when writing pack. (Jelmer Vernooij, #813268) * Support IPv6 for git:// connections. (Jelmer Vernooij, #801543) * Improve performance of Repo.revision_history(). (Timo Schmid, #535118) * Fix use of SubprocessWrapper on Windows. (Paulo Madeira, #670035) * Fix compilation on newer versions of Mac OS X (Lion and up). (Ryan McKern, #794543) * Prevent raising ValueError for correct refs in RefContainer.__delitem__. * Correctly return a tuple from MemoryObjectStore.get_raw. (Dave Borowitz) * Fix a bug in reading the pack checksum when there are fewer than 20 bytes left in the buffer. (Dave Borowitz) * Support ~ in git:// URL paths. (Jelmer Vernooij, #813555) * Make ShaFile.__eq__ work when other is not a ShaFile. (Dave Borowitz) * ObjectStore.get_graph_walker() now no longer yields the same revision more than once. This has a significant improvement for performance when wide revision graphs are involved. (Jelmer Vernooij, #818168) * Teach ReceivePackHandler how to read empty packs. (Dave Borowitz) * Don't send a pack with duplicates of the same object. (Dave Borowitz) * Teach the server how to serve a clone of an empty repo. (Dave Borowitz) * Correctly advertise capabilities during receive-pack. (Dave Borowitz) * Fix add/add and add/rename conflicts in tree_changes_for_merge. (Dave Borowitz) * Use correct MIME types in web server. (Dave Borowitz) API CHANGES * write_pack no longer takes the num_objects argument and requires an object to be passed in that is iterable (rather than an iterator) and that provides __len__. (Jelmer Vernooij) * write_pack_data has been renamed to write_pack_objects and no longer takes a num_objects argument. (Jelmer Vernooij) * take_msb_bytes, read_zlib_chunks, unpack_objects, and PackStreamReader.read_objects now take an additional argument indicating a crc32 to compute. (Dave Borowitz) * PackObjectIterator was removed; its functionality is still exposed by PackData.iterobjects. (Dave Borowitz) * Add a sha arg to write_pack_object to incrementally compute a SHA. (Dave Borowitz) * Include offset in PackStreamReader results. (Dave Borowitz) * Move PackStreamReader from server to pack. (Dave Borowitz) * Extract a check_length_and_checksum, compute_file_sha, and pack_object_header pack helper functions. (Dave Borowitz) * Extract a compute_file_sha function. (Dave Borowitz) * Remove move_in_thin_pack as a separate method; add_thin_pack now completes the thin pack and moves it in in one step. Remove ThinPackData as well. (Dave Borowitz) * Custom buffer size in read_zlib_chunks. (Dave Borowitz) * New UnpackedObject data class that replaces ad-hoc tuples in the return value of unpack_object and various DeltaChainIterator methods. (Dave Borowitz) * Add a lookup_path convenience method to Tree. (Dave Borowitz) * Optionally create RenameDetectors without passing in tree SHAs. (Dave Borowitz) * Optionally include unchanged entries in RenameDetectors. (Dave Borowitz) * Optionally pass a RenameDetector to tree_changes. (Dave Borowitz) * Optionally pass a request object through to server handlers. (Dave Borowitz) TEST CHANGES * If setuptools is installed, "python setup.py test" will now run the testsuite. (Jelmer Vernooij) * Add a new build_pack test utility for building packs from a simple spec. (Dave Borowitz) * Add a new build_commit_graph test utility for building commits from a simple spec. (Dave Borowitz) 0.7.1 2011-04-12 BUG FIXES * Fix double decref in _diff_tree.c. (Ted Horst, #715528) * Fix the build on Windows. (Pascal Quantin) * Fix get_transport_and_path compatibility with pre-2.6.5 versions of Python. (Max Bowsher, #707438) * BaseObjectStore.determine_wants_all no longer breaks on zero SHAs. (Jelmer Vernooij) * write_tree_diff() now supports submodules. (Jelmer Vernooij) * Fix compilation for XCode 4 and older versions of distutils.sysconfig. (Daniele Sluijters) IMPROVEMENTS * Sphinxified documentation. (Lukasz Balcerzak) * Add Pack.keep.(Marc Brinkmann) API CHANGES * The order of the parameters to Tree.add(name, mode, sha) has changed, and is now consistent with the rest of Dulwich. Existing code will still work but print a DeprecationWarning. (Jelmer Vernooij, #663550) * Tree.entries() is now deprecated in favour of Tree.items() and Tree.iteritems(). (Jelmer Vernooij) 0.7.0 2011-01-21 FEATURES * New `dulwich.diff_tree` module for simple content-based rename detection. (Dave Borowitz) * Add Tree.items(). (Jelmer Vernooij) * Add eof() and unread_pkt_line() methods to Protocol. (Dave Borowitz) * Add write_tree_diff(). (Jelmer Vernooij) * Add `serve_command` function for git server commands as executables. (Jelmer Vernooij) * dulwich.client.get_transport_and_path now supports rsync-style repository URLs. (Dave Borowitz, #568493) BUG FIXES * Correct short-circuiting operation for no-op fetches in the server. (Dave Borowitz) * Support parsing git mbox patches without a version tail, as generated by Mercurial. (Jelmer Vernooij) * Fix dul-receive-pack and dul-upload-pack. (Jelmer Vernooij) * Zero-padded file modes in Tree objects no longer trigger an exception but the check code warns about them. (Augie Fackler, #581064) * Repo.init() now honors the mkdir flag. (#671159) * The ref format is now checked when setting a ref rather than when reading it back. (Dave Borowitz, #653527) * Make sure pack files are closed correctly. (Tay Ray Chuan) DOCUMENTATION * Run the tutorial inside the test suite. (Jelmer Vernooij) * Reorganized and updated the tutorial. (Jelmer Vernooij, Dave Borowitz, #610550, #610540) 0.6.2 2010-10-16 BUG FIXES * HTTP server correctly handles empty CONTENT_LENGTH. (Dave Borowitz) * Don't error when creating GitFiles with the default mode. (Dave Borowitz) * ThinPackData.from_file now works with resolve_ext_ref callback. (Dave Borowitz) * Provide strnlen() on mingw32 which doesn't have it. (Hans Kolek) * Set bare=true in the configuratin for bare repositories. (Dirk Neumann) FEATURES * Use slots for core objects to save up on memory. (Jelmer Vernooij) * Web server supports streaming progress/pack output. (Dave Borowitz) * New public function dulwich.pack.write_pack_header. (Dave Borowitz) * Distinguish between missing files and read errors in HTTP server. (Dave Borowitz) * Initial work on support for fastimport using python-fastimport. (Jelmer Vernooij) * New dulwich.pack.MemoryPackIndex class. (Jelmer Vernooij) * Delegate SHA peeling to the object store. (Dave Borowitz) TESTS * Use GitFile when modifying packed-refs in tests. (Dave Borowitz) * New tests in test_web with better coverage and fewer ad-hoc mocks. (Dave Borowitz) * Standardize quote delimiters in test_protocol. (Dave Borowitz) * Fix use when testtools is installed. (Jelmer Vernooij) * Add trivial test for write_pack_header. (Jelmer Vernooij) * Refactor some of dulwich.tests.compat.server_utils. (Dave Borowitz) * Allow overwriting id property of objects in test utils. (Dave Borowitz) * Use real in-memory objects rather than stubs for server tests. (Dave Borowitz) * Clean up MissingObjectFinder. (Dave Borowitz) API CHANGES * ObjectStore.iter_tree_contents now walks contents in depth-first, sorted order. (Dave Borowitz) * ObjectStore.iter_tree_contents can optionally yield tree objects as well. (Dave Borowitz). * Add side-band-64k support to ReceivePackHandler. (Dave Borowitz) * Change server capabilities methods to classmethods. (Dave Borowitz) * Tweak server handler injection. (Dave Borowitz) * PackIndex1 and PackIndex2 now subclass FilePackIndex, which is itself a subclass of PackIndex. (Jelmer Vernooij) DOCUMENTATION * Add docstrings for various functions in dulwich.objects. (Jelmer Vernooij) * Clean up docstrings in dulwich.protocol. (Dave Borowitz) * Explicitly specify allowed protocol commands to ProtocolGraphWalker.read_proto_line. (Dave Borowitz) * Add utility functions to DictRefsContainer. (Dave Borowitz) 0.6.1 2010-07-22 BUG FIXES * Fix memory leak in C implementation of sorted_tree_items. (Dave Borowitz) * Use correct path separators for named repo files. (Dave Borowitz) * python > 2.7 and testtools-based test runners will now also pick up skipped tests correctly. (Jelmer Vernooij) FEATURES * Move named file initilization to BaseRepo. (Dave Borowitz) * Add logging utilities and git/HTTP server logging. (Dave Borowitz) * The GitClient interface has been cleaned up and instances are now reusable. (Augie Fackler) * Allow overriding paths to executables in GitSSHClient. (Ross Light, Jelmer Vernooij, #585204) * Add PackBasedObjectStore.pack_loose_objects(). (Jelmer Vernooij) TESTS * Add tests for sorted_tree_items and C implementation. (Dave Borowitz) * Add a MemoryRepo that stores everything in memory. (Dave Borowitz) * Quiet logging output from web tests. (Dave Borowitz) * More flexible version checking for compat tests. (Dave Borowitz) * Compat tests for servers with and without side-band-64k. (Dave Borowitz) CLEANUP * Clean up file headers. (Dave Borowitz) TESTS * Use GitFile when modifying packed-refs in tests. (Dave Borowitz) API CHANGES * dulwich.pack.write_pack_index_v{1,2} now take a file-like object rather than a filename. (Jelmer Vernooij) * Make dul-daemon/dul-web trivial wrappers around server functionality. (Dave Borowitz) * Move reference WSGI handler to web.py. (Dave Borowitz) * Factor out _report_status in ReceivePackHandler. (Dave Borowitz) * Factor out a function to convert a line to a pkt-line. (Dave Borowitz) 0.6.0 2010-05-22 note: This list is most likely incomplete for 0.6.0. BUG FIXES * Fix ReceivePackHandler to disallow removing refs without delete-refs. (Dave Borowitz) * Deal with capabilities required by the client, even if they can not be disabled in the server. (Dave Borowitz) * Fix trailing newlines in generated patch files. (Jelmer Vernooij) * Implement RefsContainer.__contains__. (Jelmer Vernooij) * Cope with \r in ref files on Windows. ( http://github.com/jelmer/dulwich/issues/#issue/13, Jelmer Vernooij) * Fix GitFile breakage on Windows. (Anatoly Techtonik, #557585) * Support packed ref deletion with no peeled refs. (Augie Fackler) * Fix send pack when there is nothing to fetch. (Augie Fackler) * Fix fetch if no progress function is specified. (Augie Fackler) * Allow double-staging of files that are deleted in the index. (Dave Borowitz) * Fix RefsContainer.add_if_new to support dangling symrefs. (Dave Borowitz) * Non-existant index files in non-bare repositories are now treated as empty. (Dave Borowitz) * Always update ShaFile.id when the contents of the object get changed. (Jelmer Vernooij) * Various Python2.4-compatibility fixes. (Dave Borowitz) * Fix thin pack handling. (Dave Borowitz) FEATURES * Add include-tag capability to server. (Dave Borowitz) * New dulwich.fastexport module that can generate fastexport streams. (Jelmer Vernooij) * Implemented BaseRepo.__contains__. (Jelmer Vernooij) * Add __setitem__ to DictRefsContainer. (Dave Borowitz) * Overall improvements checking Git objects. (Dave Borowitz) * Packs are now verified while they are received. (Dave Borowitz) TESTS * Add framework for testing compatibility with C Git. (Dave Borowitz) * Add various tests for the use of non-bare repositories. (Dave Borowitz) * Cope with diffstat not being available on all platforms. (Tay Ray Chuan, Jelmer Vernooij) * Add make_object and make_commit convenience functions to test utils. (Dave Borowitz) API BREAKAGES * The 'committer' and 'message' arguments to Repo.do_commit() have been swapped. 'committer' is now optional. (Jelmer Vernooij) * Repo.get_blob, Repo.commit, Repo.tag and Repo.tree are now deprecated. (Jelmer Vernooij) * RefsContainer.set_ref() was renamed to RefsContainer.set_symbolic_ref(), for clarity. (Jelmer Vernooij) API CHANGES * The primary serialization APIs in dulwich.objects now work with chunks of strings rather than with full-text strings. (Jelmer Vernooij) 0.5.02010-03-03 BUG FIXES * Support custom fields in commits (readonly). (Jelmer Vernooij) * Improved ref handling. (Dave Borowitz) * Rework server protocol to be smarter and interoperate with cgit client. (Dave Borowitz) * Add a GitFile class that uses the same locking protocol for writes as cgit. (Dave Borowitz) * Cope with forward slashes correctly in the index on Windows. (Jelmer Vernooij, #526793) FEATURES * --pure option to setup.py to allow building/installing without the C extensions. (Hal Wine, Anatoly Techtonik, Jelmer Vernooij, #434326) * Implement Repo.get_config(). (Jelmer Vernooij, Augie Fackler) * HTTP dumb and smart server. (Dave Borowitz) * Add abstract baseclass for Repo that does not require file system operations. (Dave Borowitz) 0.4.1 2010-01-03 FEATURES * Add ObjectStore.iter_tree_contents(). (Jelmer Vernooij) * Add Index.changes_from_tree(). (Jelmer Vernooij) * Add ObjectStore.tree_changes(). (Jelmer Vernooij) * Add functionality for writing patches in dulwich.patch. (Jelmer Vernooij) 0.4.0 2009-10-07 DOCUMENTATION * Added tutorial. API CHANGES * dulwich.object_store.tree_lookup_path will now return the mode and sha of the object found rather than the object itself. BUG FIXES * Use binascii.hexlify / binascii.unhexlify for better performance. * Cope with extra unknown data in index files by ignoring it (for now). * Add proper error message when server unexpectedly hangs up. (#415843) * Correctly write opcode for equal in create_delta. 0.3.3 2009-07-23 FEATURES * Implement ShaFile.__hash__(). * Implement Tree.__len__() BUG FIXES * Check for 'objects' and 'refs' directories when looking for a Git repository. (#380818) 0.3.2 2009-05-20 BUG FIXES * Support the encoding field in Commits. * Some Windows compatibility fixes. * Fixed several issues in commit support. FEATURES * Basic support for handling submodules. 0.3.1 2009-05-13 FEATURES * Implemented Repo.__getitem__, Repo.__setitem__ and Repo.__delitem__ to access content. API CHANGES * Removed Repo.set_ref, Repo.remove_ref, Repo.tags, Repo.get_refs and Repo.heads in favor of Repo.refs, a dictionary-like object for accessing refs. BUG FIXES * Removed import of 'sha' module in objects.py, which was causing deprecation warnings on Python 2.6. 0.3.0 2009-05-10 FEATURES * A new function 'commit_tree' has been added that can commit a tree based on an index. BUG FIXES * The memory usage when generating indexes has been significantly reduced. * A memory leak in the C implementation of parse_tree has been fixed. * The send-pack smart server command now works. (Thanks Scott Chacon) * The handling of short timestamps (less than 10 digits) has been fixed. * The handling of timezones has been fixed. 0.2.1 2009-04-30 BUG FIXES * Fix compatibility with Python2.4. 0.2.0 2009-04-30 FEATURES * Support for activity reporting in smart protocol client. * Optional C extensions for better performance in a couple of places that are performance-critical. 0.1.1 2009-03-13 BUG FIXES * Fixed regression in Repo.find_missing_objects() * Don't fetch ^{} objects from remote hosts, as requesting them causes a hangup. * Always write pack to disk completely before calculating checksum. FEATURES * Allow disabling thin packs when talking to remote hosts. 0.1.0 2009-01-24 * Initial release. diff --git a/PKG-INFO b/PKG-INFO index 0e2b3e97..e5831e19 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,29 +1,31 @@ Metadata-Version: 1.1 Name: dulwich -Version: 0.17.1 +Version: 0.17.3 Summary: Python Git Library Home-page: https://www.dulwich.io/ Author: Jelmer Vernooij Author-email: jelmer@jelmer.uk License: Apachev2 or later or GPLv2 Description: Python implementation of the Git file formats and protocols, without the need to have git installed. All functionality is available in pure Python. Optional C extensions can be built for improved performance. The project is named after the part of London that Mr. and Mrs. Git live in in the particular Monty Python sketch. Keywords: git Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: License :: OSI Approved :: Apache Software License Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Operating System :: POSIX Classifier: Topic :: Software Development :: Version Control diff --git a/README.md b/README.md index 146b27f6..876eac02 100644 --- a/README.md +++ b/README.md @@ -1,50 +1,50 @@ [![Build Status](https://travis-ci.org/jelmer/dulwich.png?branch=master)](https://travis-ci.org/jelmer/dulwich) This is the Dulwich project. It aims to provide an interface to git repos (both local and remote) that doesn't call out to git directly but instead uses pure Python. **Main website**: [www.dulwich.io](https://www.dulwich.io/) **License**: Apache License, version 2 or GNU General Public License, version 2 or later. The project is named after the part of London that Mr. and Mrs. Git live in in the particular Monty Python sketch. Installation ------------ By default, Dulwich' setup.py will attempt to build and install the optional C extensions. The reason for this is that they significantly improve the performance since some low-level operations that are executed often are much slower in CPython. If you don't want to install the C bindings, specify the --pure argument to setup.py:: $ python setup.py --pure install or if you are installing from pip:: $ pip install dulwich --global-option="--pure" Further documentation --------------------- The dulwich documentation can be found in doc/ and [on the web](https://www.dulwich.io/docs/). The API reference can be generated using pydoctor, by running "make pydoctor", or [on the web](https://www.dulwich.io/apidocs). Help ---- There is a *#dulwich* IRC channel on the [Freenode](https://www.freenode.net/), and [dulwich-announce](https://groups.google.com/forum/#!forum/dulwich-announce) and [dulwich-discuss](https://groups.google.com/forum/#!forum/dulwich-discuss) mailing lists. Supported versions of Python ---------------------------- -At the moment, Dulwich supports (and is tested on) CPython 2.7, 3.4, 3.5 and Pypy. +At the moment, Dulwich supports (and is tested on) CPython 2.7, 3.3, 3.4, 3.5, 3.6 and Pypy. diff --git a/dulwich.egg-info/PKG-INFO b/dulwich.egg-info/PKG-INFO index 0e2b3e97..e5831e19 100644 --- a/dulwich.egg-info/PKG-INFO +++ b/dulwich.egg-info/PKG-INFO @@ -1,29 +1,31 @@ Metadata-Version: 1.1 Name: dulwich -Version: 0.17.1 +Version: 0.17.3 Summary: Python Git Library Home-page: https://www.dulwich.io/ Author: Jelmer Vernooij Author-email: jelmer@jelmer.uk License: Apachev2 or later or GPLv2 Description: Python implementation of the Git file formats and protocols, without the need to have git installed. All functionality is available in pure Python. Optional C extensions can be built for improved performance. The project is named after the part of London that Mr. and Mrs. Git live in in the particular Monty Python sketch. Keywords: git Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: License :: OSI Approved :: Apache Software License Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Operating System :: POSIX Classifier: Topic :: Software Development :: Version Control diff --git a/dulwich/__init__.py b/dulwich/__init__.py index 0d073412..ec08b88e 100644 --- a/dulwich/__init__.py +++ b/dulwich/__init__.py @@ -1,25 +1,25 @@ # __init__.py -- The git module of dulwich # Copyright (C) 2007 James Westby # Copyright (C) 2008 Jelmer Vernooij # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # for a copy of the GNU General Public License # and for a copy of the Apache # License, Version 2.0. # """Python implementation of the Git file formats and protocols.""" -__version__ = (0, 17, 1) +__version__ = (0, 17, 3) diff --git a/dulwich/_diff_tree.c b/dulwich/_diff_tree.c index 149dccd8..2ea5ced7 100644 --- a/dulwich/_diff_tree.c +++ b/dulwich/_diff_tree.c @@ -1,509 +1,508 @@ /* * Copyright (C) 2010 Google, Inc. * * Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU * General Public License as public by the Free Software Foundation; version 2.0 * or (at your option) any later version. You can redistribute it and/or * modify it under the terms of either of these two licenses. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * You should have received a copy of the licenses; if not, see * for a copy of the GNU General Public License * and for a copy of the Apache * License, Version 2.0. */ #include #include #ifdef _MSC_VER typedef unsigned short mode_t; #endif #if (PY_VERSION_HEX < 0x02050000) typedef int Py_ssize_t; #endif #if (PY_VERSION_HEX < 0x02060000) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #endif #if PY_MAJOR_VERSION >= 3 #define PyInt_FromLong PyLong_FromLong #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyString_AS_STRING PyBytes_AS_STRING -#define PyString_AsString PyBytes_AsString #define PyString_AsStringAndSize PyBytes_AsStringAndSize #define PyString_Check PyBytes_Check #define PyString_CheckExact PyBytes_CheckExact #define PyString_FromStringAndSize PyBytes_FromStringAndSize #define PyString_FromString PyBytes_FromString #define PyString_GET_SIZE PyBytes_GET_SIZE #define PyString_Size PyBytes_Size #define _PyString_Join _PyBytes_Join #endif static PyObject *tree_entry_cls = NULL, *null_entry = NULL, *defaultdict_cls = NULL, *int_cls = NULL; static int block_size; /** * Free an array of PyObject pointers, decrementing any references. */ static void free_objects(PyObject **objs, Py_ssize_t n) { Py_ssize_t i; for (i = 0; i < n; i++) Py_XDECREF(objs[i]); PyMem_Free(objs); } /** * Get the entries of a tree, prepending the given path. * * :param path: The path to prepend, without trailing slashes. * :param path_len: The length of path. * :param tree: The Tree object to iterate. * :param n: Set to the length of result. * :return: A (C) array of PyObject pointers to TreeEntry objects for each path * in tree. */ static PyObject **tree_entries(char *path, Py_ssize_t path_len, PyObject *tree, Py_ssize_t *n) { PyObject *iteritems, *items, **result = NULL; PyObject *old_entry, *name, *sha; Py_ssize_t i = 0, name_len, new_path_len; char *new_path; if (tree == Py_None) { *n = 0; result = PyMem_New(PyObject*, 0); if (!result) { PyErr_NoMemory(); return NULL; } return result; } iteritems = PyObject_GetAttrString(tree, "iteritems"); if (!iteritems) return NULL; items = PyObject_CallFunctionObjArgs(iteritems, Py_True, NULL); Py_DECREF(iteritems); if (items == NULL) { return NULL; } /* The C implementation of iteritems returns a list, so depend on that. */ if (!PyList_Check(items)) { PyErr_SetString(PyExc_TypeError, "Tree.iteritems() did not return a list"); return NULL; } *n = PyList_Size(items); result = PyMem_New(PyObject*, *n); if (!result) { PyErr_NoMemory(); goto error; } for (i = 0; i < *n; i++) { old_entry = PyList_GetItem(items, i); if (!old_entry) goto error; sha = PyTuple_GetItem(old_entry, 2); if (!sha) goto error; name = PyTuple_GET_ITEM(old_entry, 0); name_len = PyString_Size(name); if (PyErr_Occurred()) goto error; new_path_len = name_len; if (path_len) new_path_len += path_len + 1; new_path = PyMem_Malloc(new_path_len); if (!new_path) { PyErr_NoMemory(); goto error; } if (path_len) { memcpy(new_path, path, path_len); new_path[path_len] = '/'; memcpy(new_path + path_len + 1, PyString_AS_STRING(name), name_len); } else { memcpy(new_path, PyString_AS_STRING(name), name_len); } #if PY_MAJOR_VERSION >= 3 result[i] = PyObject_CallFunction(tree_entry_cls, "y#OO", new_path, new_path_len, PyTuple_GET_ITEM(old_entry, 1), sha); #else result[i] = PyObject_CallFunction(tree_entry_cls, "s#OO", new_path, new_path_len, PyTuple_GET_ITEM(old_entry, 1), sha); #endif PyMem_Free(new_path); if (!result[i]) { goto error; } } Py_DECREF(items); return result; error: if (result) free_objects(result, i); Py_DECREF(items); return NULL; } /** * Use strcmp to compare the paths of two TreeEntry objects. */ static int entry_path_cmp(PyObject *entry1, PyObject *entry2) { PyObject *path1 = NULL, *path2 = NULL; int result = 0; path1 = PyObject_GetAttrString(entry1, "path"); if (!path1) goto done; if (!PyString_Check(path1)) { PyErr_SetString(PyExc_TypeError, "path is not a (byte)string"); goto done; } path2 = PyObject_GetAttrString(entry2, "path"); if (!path2) goto done; if (!PyString_Check(path2)) { PyErr_SetString(PyExc_TypeError, "path is not a (byte)string"); goto done; } result = strcmp(PyString_AS_STRING(path1), PyString_AS_STRING(path2)); done: Py_XDECREF(path1); Py_XDECREF(path2); return result; } static PyObject *py_merge_entries(PyObject *self, PyObject *args) { PyObject *tree1, *tree2, **entries1 = NULL, **entries2 = NULL; PyObject *e1, *e2, *pair, *result = NULL; Py_ssize_t n1 = 0, n2 = 0, i1 = 0, i2 = 0; int path_len; char *path_str; int cmp; #if PY_MAJOR_VERSION >= 3 if (!PyArg_ParseTuple(args, "y#OO", &path_str, &path_len, &tree1, &tree2)) #else if (!PyArg_ParseTuple(args, "s#OO", &path_str, &path_len, &tree1, &tree2)) #endif return NULL; entries1 = tree_entries(path_str, path_len, tree1, &n1); if (!entries1) goto error; entries2 = tree_entries(path_str, path_len, tree2, &n2); if (!entries2) goto error; result = PyList_New(0); if (!result) goto error; while (i1 < n1 && i2 < n2) { cmp = entry_path_cmp(entries1[i1], entries2[i2]); if (PyErr_Occurred()) goto error; if (!cmp) { e1 = entries1[i1++]; e2 = entries2[i2++]; } else if (cmp < 0) { e1 = entries1[i1++]; e2 = null_entry; } else { e1 = null_entry; e2 = entries2[i2++]; } pair = PyTuple_Pack(2, e1, e2); if (!pair) goto error; PyList_Append(result, pair); Py_DECREF(pair); } while (i1 < n1) { pair = PyTuple_Pack(2, entries1[i1++], null_entry); if (!pair) goto error; PyList_Append(result, pair); Py_DECREF(pair); } while (i2 < n2) { pair = PyTuple_Pack(2, null_entry, entries2[i2++]); if (!pair) goto error; PyList_Append(result, pair); Py_DECREF(pair); } goto done; error: Py_XDECREF(result); result = NULL; done: if (entries1) free_objects(entries1, n1); if (entries2) free_objects(entries2, n2); return result; } static PyObject *py_is_tree(PyObject *self, PyObject *args) { PyObject *entry, *mode, *result; long lmode; if (!PyArg_ParseTuple(args, "O", &entry)) return NULL; mode = PyObject_GetAttrString(entry, "mode"); if (!mode) return NULL; if (mode == Py_None) { result = Py_False; Py_INCREF(result); } else { lmode = PyInt_AsLong(mode); if (lmode == -1 && PyErr_Occurred()) { Py_DECREF(mode); return NULL; } result = PyBool_FromLong(S_ISDIR((mode_t)lmode)); } Py_DECREF(mode); return result; } static int add_hash(PyObject *get, PyObject *set, char *str, int n) { PyObject *str_obj = NULL, *hash_obj = NULL, *value = NULL, *set_value = NULL; long hash; /* It would be nice to hash without copying str into a PyString, but that * isn't exposed by the API. */ str_obj = PyString_FromStringAndSize(str, n); if (!str_obj) goto error; hash = PyObject_Hash(str_obj); if (hash == -1) goto error; hash_obj = PyInt_FromLong(hash); if (!hash_obj) goto error; value = PyObject_CallFunctionObjArgs(get, hash_obj, NULL); if (!value) goto error; set_value = PyObject_CallFunction(set, "(Ol)", hash_obj, PyInt_AS_LONG(value) + n); if (!set_value) goto error; Py_DECREF(str_obj); Py_DECREF(hash_obj); Py_DECREF(value); Py_DECREF(set_value); return 0; error: Py_XDECREF(str_obj); Py_XDECREF(hash_obj); Py_XDECREF(value); Py_XDECREF(set_value); return -1; } static PyObject *py_count_blocks(PyObject *self, PyObject *args) { PyObject *obj, *chunks = NULL, *chunk, *counts = NULL, *get = NULL, *set = NULL; char *chunk_str, *block = NULL; Py_ssize_t num_chunks, chunk_len; int i, j, n = 0; char c; if (!PyArg_ParseTuple(args, "O", &obj)) goto error; counts = PyObject_CallFunctionObjArgs(defaultdict_cls, int_cls, NULL); if (!counts) goto error; get = PyObject_GetAttrString(counts, "__getitem__"); set = PyObject_GetAttrString(counts, "__setitem__"); chunks = PyObject_CallMethod(obj, "as_raw_chunks", NULL); if (!chunks) goto error; if (!PyList_Check(chunks)) { PyErr_SetString(PyExc_TypeError, "as_raw_chunks() did not return a list"); goto error; } num_chunks = PyList_GET_SIZE(chunks); block = PyMem_New(char, block_size); if (!block) { PyErr_NoMemory(); goto error; } for (i = 0; i < num_chunks; i++) { chunk = PyList_GET_ITEM(chunks, i); if (!PyString_Check(chunk)) { PyErr_SetString(PyExc_TypeError, "chunk is not a string"); goto error; } if (PyString_AsStringAndSize(chunk, &chunk_str, &chunk_len) == -1) goto error; for (j = 0; j < chunk_len; j++) { c = chunk_str[j]; block[n++] = c; if (c == '\n' || n == block_size) { if (add_hash(get, set, block, n) == -1) goto error; n = 0; } } } if (n && add_hash(get, set, block, n) == -1) goto error; Py_DECREF(chunks); Py_DECREF(get); Py_DECREF(set); PyMem_Free(block); return counts; error: Py_XDECREF(chunks); Py_XDECREF(get); Py_XDECREF(set); Py_XDECREF(counts); PyMem_Free(block); return NULL; } static PyMethodDef py_diff_tree_methods[] = { { "_is_tree", (PyCFunction)py_is_tree, METH_VARARGS, NULL }, { "_merge_entries", (PyCFunction)py_merge_entries, METH_VARARGS, NULL }, { "_count_blocks", (PyCFunction)py_count_blocks, METH_VARARGS, NULL }, { NULL, NULL, 0, NULL } }; static PyObject * moduleinit(void) { PyObject *m, *objects_mod = NULL, *diff_tree_mod = NULL; PyObject *block_size_obj = NULL; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_diff_tree", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ py_diff_tree_methods, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear*/ NULL, /* m_free */ }; m = PyModule_Create(&moduledef); #else m = Py_InitModule("_diff_tree", py_diff_tree_methods); #endif if (!m) goto error; objects_mod = PyImport_ImportModule("dulwich.objects"); if (!objects_mod) goto error; tree_entry_cls = PyObject_GetAttrString(objects_mod, "TreeEntry"); Py_DECREF(objects_mod); if (!tree_entry_cls) goto error; diff_tree_mod = PyImport_ImportModule("dulwich.diff_tree"); if (!diff_tree_mod) goto error; null_entry = PyObject_GetAttrString(diff_tree_mod, "_NULL_ENTRY"); if (!null_entry) goto error; block_size_obj = PyObject_GetAttrString(diff_tree_mod, "_BLOCK_SIZE"); if (!block_size_obj) goto error; block_size = (int)PyInt_AsLong(block_size_obj); if (PyErr_Occurred()) goto error; defaultdict_cls = PyObject_GetAttrString(diff_tree_mod, "defaultdict"); if (!defaultdict_cls) goto error; /* This is kind of hacky, but I don't know of a better way to get the * PyObject* version of int. */ int_cls = PyDict_GetItemString(PyEval_GetBuiltins(), "int"); if (!int_cls) { PyErr_SetString(PyExc_NameError, "int"); goto error; } Py_DECREF(diff_tree_mod); return m; error: Py_XDECREF(objects_mod); Py_XDECREF(diff_tree_mod); Py_XDECREF(null_entry); Py_XDECREF(block_size_obj); Py_XDECREF(defaultdict_cls); Py_XDECREF(int_cls); return NULL; } #if PY_MAJOR_VERSION >= 3 PyMODINIT_FUNC PyInit__diff_tree(void) { return moduleinit(); } #else PyMODINIT_FUNC init_diff_tree(void) { moduleinit(); } #endif diff --git a/dulwich/_pack.c b/dulwich/_pack.c index ddac9e70..e3ae9182 100644 --- a/dulwich/_pack.c +++ b/dulwich/_pack.c @@ -1,310 +1,310 @@ /* * Copyright (C) 2009 Jelmer Vernooij * * Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU * General Public License as public by the Free Software Foundation; version 2.0 * or (at your option) any later version. You can redistribute it and/or * modify it under the terms of either of these two licenses. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * You should have received a copy of the licenses; if not, see * for a copy of the GNU General Public License * and for a copy of the Apache * License, Version 2.0. */ #include #include #if PY_MAJOR_VERSION >= 3 #define PyInt_FromLong PyLong_FromLong #define PyString_AS_STRING PyBytes_AS_STRING -#define PyString_AsString PyBytes_AsString +#define PyString_AS_STRING PyBytes_AS_STRING #define PyString_Check PyBytes_Check #define PyString_CheckExact PyBytes_CheckExact #define PyString_FromStringAndSize PyBytes_FromStringAndSize #define PyString_FromString PyBytes_FromString #define PyString_GET_SIZE PyBytes_GET_SIZE #define PyString_Size PyBytes_Size #define _PyString_Join _PyBytes_Join #endif static PyObject *PyExc_ApplyDeltaError = NULL; static int py_is_sha(PyObject *sha) { if (!PyString_CheckExact(sha)) return 0; if (PyString_Size(sha) != 20) return 0; return 1; } static size_t get_delta_header_size(uint8_t *delta, size_t *index, size_t length) { size_t size = 0; size_t i = 0; while ((*index) < length) { uint8_t cmd = delta[*index]; (*index)++; size |= (cmd & ~0x80) << i; i += 7; if (!(cmd & 0x80)) break; } return size; } static PyObject *py_chunked_as_string(PyObject *py_buf) { if (PyList_Check(py_buf)) { PyObject *sep = PyString_FromString(""); if (sep == NULL) { PyErr_NoMemory(); return NULL; } py_buf = _PyString_Join(sep, py_buf); Py_DECREF(sep); if (py_buf == NULL) { PyErr_NoMemory(); return NULL; } } else if (PyString_Check(py_buf)) { Py_INCREF(py_buf); } else { PyErr_SetString(PyExc_TypeError, "src_buf is not a string or a list of chunks"); return NULL; } return py_buf; } static PyObject *py_apply_delta(PyObject *self, PyObject *args) { uint8_t *src_buf, *delta; size_t src_buf_len, delta_len; size_t src_size, dest_size; size_t outindex = 0; size_t index; uint8_t *out; PyObject *ret, *py_src_buf, *py_delta, *ret_list; if (!PyArg_ParseTuple(args, "OO", &py_src_buf, &py_delta)) return NULL; py_src_buf = py_chunked_as_string(py_src_buf); if (py_src_buf == NULL) return NULL; py_delta = py_chunked_as_string(py_delta); if (py_delta == NULL) { Py_DECREF(py_src_buf); return NULL; } src_buf = (uint8_t *)PyString_AS_STRING(py_src_buf); src_buf_len = (size_t)PyString_GET_SIZE(py_src_buf); delta = (uint8_t *)PyString_AS_STRING(py_delta); delta_len = (size_t)PyString_GET_SIZE(py_delta); index = 0; src_size = get_delta_header_size(delta, &index, delta_len); if (src_size != src_buf_len) { PyErr_Format(PyExc_ApplyDeltaError, "Unexpected source buffer size: %lu vs %ld", src_size, src_buf_len); Py_DECREF(py_src_buf); Py_DECREF(py_delta); return NULL; } dest_size = get_delta_header_size(delta, &index, delta_len); ret = PyString_FromStringAndSize(NULL, dest_size); if (ret == NULL) { PyErr_NoMemory(); Py_DECREF(py_src_buf); Py_DECREF(py_delta); return NULL; } - out = (uint8_t *)PyString_AsString(ret); + out = (uint8_t *)PyString_AS_STRING(ret); while (index < delta_len) { uint8_t cmd = delta[index]; index++; if (cmd & 0x80) { size_t cp_off = 0, cp_size = 0; int i; for (i = 0; i < 4; i++) { if (cmd & (1 << i)) { uint8_t x = delta[index]; index++; cp_off |= x << (i * 8); } } for (i = 0; i < 3; i++) { if (cmd & (1 << (4+i))) { uint8_t x = delta[index]; index++; cp_size |= x << (i * 8); } } if (cp_size == 0) cp_size = 0x10000; if (cp_off + cp_size < cp_size || cp_off + cp_size > src_size || cp_size > dest_size) break; memcpy(out+outindex, src_buf+cp_off, cp_size); outindex += cp_size; dest_size -= cp_size; } else if (cmd != 0) { if (cmd > dest_size) break; memcpy(out+outindex, delta+index, cmd); outindex += cmd; index += cmd; dest_size -= cmd; } else { PyErr_SetString(PyExc_ApplyDeltaError, "Invalid opcode 0"); Py_DECREF(ret); Py_DECREF(py_delta); Py_DECREF(py_src_buf); return NULL; } } Py_DECREF(py_src_buf); Py_DECREF(py_delta); if (index != delta_len) { PyErr_SetString(PyExc_ApplyDeltaError, "delta not empty"); Py_DECREF(ret); return NULL; } if (dest_size != 0) { PyErr_SetString(PyExc_ApplyDeltaError, "dest size incorrect"); Py_DECREF(ret); return NULL; } ret_list = Py_BuildValue("[N]", ret); if (ret_list == NULL) { Py_DECREF(ret); return NULL; } return ret_list; } static PyObject *py_bisect_find_sha(PyObject *self, PyObject *args) { PyObject *unpack_name; char *sha; int sha_len; int start, end; #if PY_MAJOR_VERSION >= 3 if (!PyArg_ParseTuple(args, "iiy#O", &start, &end, &sha, &sha_len, &unpack_name)) #else if (!PyArg_ParseTuple(args, "iis#O", &start, &end, &sha, &sha_len, &unpack_name)) #endif return NULL; if (sha_len != 20) { PyErr_SetString(PyExc_ValueError, "Sha is not 20 bytes long"); return NULL; } if (start > end) { PyErr_SetString(PyExc_AssertionError, "start > end"); return NULL; } while (start <= end) { PyObject *file_sha; int i = (start + end)/2; int cmp; file_sha = PyObject_CallFunction(unpack_name, "i", i); if (file_sha == NULL) { return NULL; } if (!py_is_sha(file_sha)) { PyErr_SetString(PyExc_TypeError, "unpack_name returned non-sha object"); Py_DECREF(file_sha); return NULL; } - cmp = memcmp(PyString_AsString(file_sha), sha, 20); + cmp = memcmp(PyString_AS_STRING(file_sha), sha, 20); Py_DECREF(file_sha); if (cmp < 0) start = i + 1; else if (cmp > 0) end = i - 1; else { return PyInt_FromLong(i); } } Py_RETURN_NONE; } static PyMethodDef py_pack_methods[] = { { "apply_delta", (PyCFunction)py_apply_delta, METH_VARARGS, NULL }, { "bisect_find_sha", (PyCFunction)py_bisect_find_sha, METH_VARARGS, NULL }, { NULL, NULL, 0, NULL } }; static PyObject * moduleinit(void) { PyObject *m; PyObject *errors_module; errors_module = PyImport_ImportModule("dulwich.errors"); if (errors_module == NULL) return NULL; PyExc_ApplyDeltaError = PyObject_GetAttrString(errors_module, "ApplyDeltaError"); Py_DECREF(errors_module); if (PyExc_ApplyDeltaError == NULL) return NULL; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_pack", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ py_pack_methods, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear*/ NULL, /* m_free */ }; m = PyModule_Create(&moduledef); #else m = Py_InitModule3("_pack", py_pack_methods, NULL); #endif if (m == NULL) return NULL; return m; } #if PY_MAJOR_VERSION >= 3 PyMODINIT_FUNC PyInit__pack(void) { return moduleinit(); } #else PyMODINIT_FUNC init_pack(void) { moduleinit(); } #endif diff --git a/dulwich/config.py b/dulwich/config.py index 0edc68a6..c91514bf 100644 --- a/dulwich/config.py +++ b/dulwich/config.py @@ -1,437 +1,438 @@ # config.py - Reading and writing Git config files # Copyright (C) 2011-2013 Jelmer Vernooij # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # for a copy of the GNU General Public License # and for a copy of the Apache # License, Version 2.0. # """Reading and writing Git configuration files. TODO: * preserve formatting when updating configuration files * treat subsection names as case-insensitive for [branch.foo] style subsections """ import errno import os from collections import ( OrderedDict, MutableMapping, ) from dulwich.file import GitFile class Config(object): """A Git configuration.""" def get(self, section, name): """Retrieve the contents of a configuration setting. :param section: Tuple with section name and optional subsection namee :param subsection: Subsection name :return: Contents of the setting :raise KeyError: if the value is not set """ raise NotImplementedError(self.get) def get_boolean(self, section, name, default=None): """Retrieve a configuration setting as boolean. :param section: Tuple with section name and optional subsection namee :param name: Name of the setting, including section and possible subsection. :return: Contents of the setting :raise KeyError: if the value is not set """ try: value = self.get(section, name) except KeyError: return default if value.lower() == b"true": return True elif value.lower() == b"false": return False raise ValueError("not a valid boolean string: %r" % value) def set(self, section, name, value): """Set a configuration value. :param section: Tuple with section name and optional subsection namee :param name: Name of the configuration value, including section and optional subsection :param: Value of the setting """ raise NotImplementedError(self.set) def iteritems(self, section): """Iterate over the configuration pairs for a specific section. :param section: Tuple with section name and optional subsection namee :return: Iterator over (name, value) pairs """ raise NotImplementedError(self.iteritems) def itersections(self): """Iterate over the sections. :return: Iterator over section tuples """ raise NotImplementedError(self.itersections) def has_section(self, name): """Check if a specified section exists. :param name: Name of section to check for :return: boolean indicating whether the section exists """ return (name in self.itersections()) class ConfigDict(Config, MutableMapping): """Git configuration stored in a dictionary.""" def __init__(self, values=None): """Create a new ConfigDict.""" if values is None: values = OrderedDict() self._values = values def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self._values) def __eq__(self, other): return ( isinstance(other, self.__class__) and other._values == self._values) def __getitem__(self, key): return self._values.__getitem__(key) def __setitem__(self, key, value): return self._values.__setitem__(key, value) def __delitem__(self, key): return self._values.__delitem__(key) def __iter__(self): return self._values.__iter__() def __len__(self): return self._values.__len__() @classmethod def _parse_setting(cls, name): parts = name.split(".") if len(parts) == 3: return (parts[0], parts[1], parts[2]) else: return (parts[0], None, parts[1]) def get(self, section, name): if not isinstance(section, tuple): section = (section, ) if len(section) > 1: try: return self._values[section][name] except KeyError: pass return self._values[(section[0],)][name] def set(self, section, name, value): if not isinstance(section, tuple): section = (section, ) if not isinstance(name, bytes): raise TypeError(name) if type(value) not in (bool, bytes): raise TypeError(value) self._values.setdefault(section, OrderedDict())[name] = value def iteritems(self, section): return self._values.get(section, OrderedDict()).items() def itersections(self): return self._values.keys() def _format_string(value): if (value.startswith(b" ") or value.startswith(b"\t") or value.endswith(b" ") or + b'#' in value or value.endswith(b"\t")): return b'"' + _escape_value(value) + b'"' return _escape_value(value) _ESCAPE_TABLE = { ord(b"\\"): ord(b"\\"), ord(b"\""): ord(b"\""), ord(b"n"): ord(b"\n"), ord(b"t"): ord(b"\t"), ord(b"b"): ord(b"\b"), } _COMMENT_CHARS = [ord(b"#"), ord(b";")] _WHITESPACE_CHARS = [ord(b"\t"), ord(b" ")] def _parse_string(value): value = bytearray(value.strip()) ret = bytearray() whitespace = bytearray() in_quotes = False i = 0 while i < len(value): c = value[i] if c == ord(b"\\"): i += 1 try: v = _ESCAPE_TABLE[value[i]] except IndexError: raise ValueError( "escape character in %r at %d before end of string" % (value, i)) except KeyError: raise ValueError( "escape character followed by unknown character %s at %d in %r" % (value[i], i, value)) if whitespace: ret.extend(whitespace) whitespace = bytearray() ret.append(v) elif c == ord(b"\""): in_quotes = (not in_quotes) elif c in _COMMENT_CHARS and not in_quotes: # the rest of the line is a comment break elif c in _WHITESPACE_CHARS: whitespace.append(c) else: if whitespace: ret.extend(whitespace) whitespace = bytearray() ret.append(c) i += 1 if in_quotes: raise ValueError("missing end quote") return bytes(ret) def _escape_value(value): """Escape a value.""" return value.replace(b"\\", b"\\\\").replace(b"\n", b"\\n").replace(b"\t", b"\\t").replace(b"\"", b"\\\"") def _check_variable_name(name): for i in range(len(name)): c = name[i:i+1] if not c.isalnum() and c != b'-': return False return True def _check_section_name(name): for i in range(len(name)): c = name[i:i+1] if not c.isalnum() and c not in (b'-', b'.'): return False return True def _strip_comments(line): line = line.split(b"#")[0] line = line.split(b";")[0] return line class ConfigFile(ConfigDict): """A Git configuration file, like .git/config or ~/.gitconfig. """ @classmethod def from_file(cls, f): """Read configuration from a file-like object.""" ret = cls() section = None setting = None for lineno, line in enumerate(f.readlines()): line = line.lstrip() if setting is None: # Parse section header ("[bla]") if len(line) > 0 and line[:1] == b"[": line = _strip_comments(line).rstrip() last = line.index(b"]") if last == -1: raise ValueError("expected trailing ]") pts = line[1:last].split(b" ", 1) line = line[last+1:] pts[0] = pts[0].lower() if len(pts) == 2: if pts[1][:1] != b"\"" or pts[1][-1:] != b"\"": raise ValueError( "Invalid subsection %r" % pts[1]) else: pts[1] = pts[1][1:-1] if not _check_section_name(pts[0]): raise ValueError("invalid section name %r" % pts[0]) section = (pts[0], pts[1]) else: if not _check_section_name(pts[0]): raise ValueError("invalid section name %r" % pts[0]) pts = pts[0].split(b".", 1) if len(pts) == 2: section = (pts[0], pts[1]) else: section = (pts[0], ) ret._values[section] = OrderedDict() if _strip_comments(line).strip() == b"": continue if section is None: raise ValueError("setting %r without section" % line) try: setting, value = line.split(b"=", 1) except ValueError: setting = line value = b"true" setting = setting.strip().lower() if not _check_variable_name(setting): raise ValueError("invalid variable name %s" % setting) if value.endswith(b"\\\n"): continuation = value[:-2] else: continuation = None value = _parse_string(value) ret._values[section][setting] = value setting = None else: # continuation line if line.endswith(b"\\\n"): continuation += line[:-2] else: continuation += line value = _parse_string(continuation) ret._values[section][setting] = value continuation = None setting = None return ret @classmethod def from_path(cls, path): """Read configuration from a file on disk.""" with GitFile(path, 'rb') as f: ret = cls.from_file(f) ret.path = path return ret def write_to_path(self, path=None): """Write configuration to a file on disk.""" if path is None: path = self.path with GitFile(path, 'wb') as f: self.write_to_file(f) def write_to_file(self, f): """Write configuration to a file-like object.""" for section, values in self._values.items(): try: section_name, subsection_name = section except ValueError: (section_name, ) = section subsection_name = None if subsection_name is None: f.write(b"[" + section_name + b"]\n") else: f.write(b"[" + section_name + b" \"" + subsection_name + b"\"]\n") for key, value in values.items(): if value is True: value = b"true" elif value is False: value = b"false" else: - value = _escape_value(value) + value = _format_string(value) f.write(b"\t" + key + b" = " + value + b"\n") class StackedConfig(Config): """Configuration which reads from multiple config files..""" def __init__(self, backends, writable=None): self.backends = backends self.writable = writable def __repr__(self): return "<%s for %r>" % (self.__class__.__name__, self.backends) @classmethod def default_backends(cls): """Retrieve the default configuration. See git-config(1) for details on the files searched. """ paths = [] paths.append(os.path.expanduser("~/.gitconfig")) xdg_config_home = os.environ.get( "XDG_CONFIG_HOME", os.path.expanduser("~/.config/"), ) paths.append(os.path.join(xdg_config_home, "git", "config")) if "GIT_CONFIG_NOSYSTEM" not in os.environ: paths.append("/etc/gitconfig") backends = [] for path in paths: try: cf = ConfigFile.from_path(path) except (IOError, OSError) as e: if e.errno != errno.ENOENT: raise else: continue backends.append(cf) return backends def get(self, section, name): for backend in self.backends: try: return backend.get(section, name) except KeyError: pass raise KeyError(name) def set(self, section, name, value): if self.writable is None: raise NotImplementedError(self.set) return self.writable.set(section, name, value) def parse_submodules(config): """Parse a gitmodules GitConfig file, returning submodules. :param config: A `ConfigFile` :return: list of tuples (submodule path, url, name), where name is quoted part of the section's name. """ for section in config.keys(): section_kind, section_name = section if section_kind == b'submodule': sm_path = config.get(section, b'path') sm_url = config.get(section, b'url') yield (sm_path, sm_url, section_name) diff --git a/dulwich/contrib/swift.py b/dulwich/contrib/swift.py index 8bf5695d..069ee23f 100644 --- a/dulwich/contrib/swift.py +++ b/dulwich/contrib/swift.py @@ -1,1045 +1,1049 @@ # swift.py -- Repo implementation atop OpenStack SWIFT # Copyright (C) 2013 eNovance SAS # # Author: Fabien Boucher # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # for a copy of the GNU General Public License # and for a copy of the Apache # License, Version 2.0. # """Repo implementation atop OpenStack SWIFT.""" # TODO: Refactor to share more code with dulwich/repo.py. # TODO(fbo): Second attempt to _send() must be notified via real log # TODO(fbo): More logs for operations import os import stat import zlib import tempfile import posixpath try: import urlparse except ImportError: import urllib.parse as urlparse from io import BytesIO try: from ConfigParser import ConfigParser except ImportError: from configparser import ConfigParser from geventhttpclient import HTTPClient from dulwich.greenthreads import ( GreenThreadsMissingObjectFinder, GreenThreadsObjectStoreIterator, ) from dulwich.lru_cache import LRUSizeCache from dulwich.objects import ( Blob, Commit, Tree, Tag, S_ISGITLINK, ) from dulwich.object_store import ( PackBasedObjectStore, PACKDIR, INFODIR, ) from dulwich.pack import ( PackData, Pack, PackIndexer, PackStreamCopier, write_pack_header, compute_file_sha, iter_sha1, write_pack_index_v2, load_pack_index_file, read_pack_header, _compute_object_size, unpack_object, write_pack_object, ) from dulwich.protocol import TCP_GIT_PORT from dulwich.refs import ( InfoRefsContainer, read_info_refs, write_info_refs, ) from dulwich.repo import ( BaseRepo, OBJECTDIR, ) from dulwich.server import ( Backend, TCPGitServer, ) try: from simplejson import loads as json_loads from simplejson import dumps as json_dumps except ImportError: from json import loads as json_loads from json import dumps as json_dumps import sys """ # Configuration file sample [swift] # Authentication URL (Keystone or Swift) auth_url = http://127.0.0.1:5000/v2.0 # Authentication version to use auth_ver = 2 # The tenant and username separated by a semicolon username = admin;admin # The user password password = pass # The Object storage region to use (auth v2) (Default RegionOne) region_name = RegionOne # The Object storage endpoint URL to use (auth v2) (Default internalURL) endpoint_type = internalURL # Concurrency to use for parallel tasks (Default 10) concurrency = 10 # Size of the HTTP pool (Default 10) http_pool_length = 10 # Timeout delay for HTTP connections (Default 20) http_timeout = 20 # Chunk size to read from pack (Bytes) (Default 12228) chunk_length = 12228 # Cache size (MBytes) (Default 20) cache_length = 20 """ class PackInfoObjectStoreIterator(GreenThreadsObjectStoreIterator): def __len__(self): while len(self.finder.objects_to_send): for _ in range(0, len(self.finder.objects_to_send)): sha = self.finder.next() self._shas.append(sha) return len(self._shas) class PackInfoMissingObjectFinder(GreenThreadsMissingObjectFinder): def next(self): while True: if not self.objects_to_send: return None (sha, name, leaf) = self.objects_to_send.pop() if sha not in self.sha_done: break if not leaf: info = self.object_store.pack_info_get(sha) if info[0] == Commit.type_num: self.add_todo([(info[2], "", False)]) elif info[0] == Tree.type_num: self.add_todo([tuple(i) for i in info[1]]) elif info[0] == Tag.type_num: self.add_todo([(info[1], None, False)]) if sha in self._tagged: self.add_todo([(self._tagged[sha], None, True)]) self.sha_done.add(sha) self.progress("counting objects: %d\r" % len(self.sha_done)) return (sha, name) def load_conf(path=None, file=None): """Load configuration in global var CONF :param path: The path to the configuration file :param file: If provided read instead the file like object """ conf = ConfigParser() if file: - conf.readfp(file) + try: + conf.read_file(file, path) + except AttributeError: + # read_file only exists in Python3 + conf.readfp(file) return conf confpath = None if not path: try: confpath = os.environ['DULWICH_SWIFT_CFG'] except KeyError: raise Exception("You need to specify a configuration file") else: confpath = path if not os.path.isfile(confpath): raise Exception("Unable to read configuration file %s" % confpath) conf.read(confpath) return conf def swift_load_pack_index(scon, filename): """Read a pack index file from Swift :param scon: a `SwiftConnector` instance :param filename: Path to the index file objectise :return: a `PackIndexer` instance """ f = scon.get_object(filename) try: return load_pack_index_file(filename, f) finally: f.close() def pack_info_create(pack_data, pack_index): pack = Pack.from_objects(pack_data, pack_index) info = {} for obj in pack.iterobjects(): # Commit if obj.type_num == Commit.type_num: info[obj.id] = (obj.type_num, obj.parents, obj.tree) # Tree elif obj.type_num == Tree.type_num: shas = [(s, n, not stat.S_ISDIR(m)) for n, m, s in obj.items() if not S_ISGITLINK(m)] info[obj.id] = (obj.type_num, shas) # Blob elif obj.type_num == Blob.type_num: info[obj.id] = None # Tag elif obj.type_num == Tag.type_num: info[obj.id] = (obj.type_num, obj.object[1]) return zlib.compress(json_dumps(info)) def load_pack_info(filename, scon=None, file=None): if not file: f = scon.get_object(filename) else: f = file if not f: return None try: return json_loads(zlib.decompress(f.read())) finally: f.close() class SwiftException(Exception): pass class SwiftConnector(object): """A Connector to swift that manage authentication and errors catching """ def __init__(self, root, conf): """ Initialize a SwiftConnector :param root: The swift container that will act as Git bare repository :param conf: A ConfigParser Object """ self.conf = conf self.auth_ver = self.conf.get("swift", "auth_ver") if self.auth_ver not in ["1", "2"]: raise NotImplementedError( "Wrong authentication version use either 1 or 2") self.auth_url = self.conf.get("swift", "auth_url") self.user = self.conf.get("swift", "username") self.password = self.conf.get("swift", "password") self.concurrency = self.conf.getint('swift', 'concurrency') or 10 self.http_timeout = self.conf.getint('swift', 'http_timeout') or 20 self.http_pool_length = \ self.conf.getint('swift', 'http_pool_length') or 10 self.region_name = self.conf.get("swift", "region_name") or "RegionOne" self.endpoint_type = \ self.conf.get("swift", "endpoint_type") or "internalURL" self.cache_length = self.conf.getint("swift", "cache_length") or 20 self.chunk_length = self.conf.getint("swift", "chunk_length") or 12228 self.root = root block_size = 1024 * 12 # 12KB if self.auth_ver == "1": self.storage_url, self.token = self.swift_auth_v1() else: self.storage_url, self.token = self.swift_auth_v2() token_header = {'X-Auth-Token': str(self.token)} self.httpclient = \ HTTPClient.from_url(str(self.storage_url), concurrency=self.http_pool_length, block_size=block_size, connection_timeout=self.http_timeout, network_timeout=self.http_timeout, headers=token_header) self.base_path = str( posixpath.join(urlparse.urlparse(self.storage_url).path, self.root)) def swift_auth_v1(self): self.user = self.user.replace(";", ":") auth_httpclient = HTTPClient.from_url( self.auth_url, connection_timeout=self.http_timeout, network_timeout=self.http_timeout, ) headers = {'X-Auth-User': self.user, 'X-Auth-Key': self.password} path = urlparse.urlparse(self.auth_url).path ret = auth_httpclient.request('GET', path, headers=headers) # Should do something with redirections (301 in my case) if ret.status_code < 200 or ret.status_code >= 300: raise SwiftException('AUTH v1.0 request failed on ' + '%s with error code %s (%s)' % (str(auth_httpclient.get_base_url()) + path, ret.status_code, str(ret.items()))) storage_url = ret['X-Storage-Url'] token = ret['X-Auth-Token'] return storage_url, token def swift_auth_v2(self): self.tenant, self.user = self.user.split(';') auth_dict = {} auth_dict['auth'] = {'passwordCredentials': { 'username': self.user, 'password': self.password, }, 'tenantName': self.tenant} auth_json = json_dumps(auth_dict) headers = {'Content-Type': 'application/json'} auth_httpclient = HTTPClient.from_url( self.auth_url, connection_timeout=self.http_timeout, network_timeout=self.http_timeout, ) path = urlparse.urlparse(self.auth_url).path if not path.endswith('tokens'): path = posixpath.join(path, 'tokens') ret = auth_httpclient.request('POST', path, body=auth_json, headers=headers) if ret.status_code < 200 or ret.status_code >= 300: raise SwiftException('AUTH v2.0 request failed on ' + '%s with error code %s (%s)' % (str(auth_httpclient.get_base_url()) + path, ret.status_code, str(ret.items()))) auth_ret_json = json_loads(ret.read()) token = auth_ret_json['access']['token']['id'] catalogs = auth_ret_json['access']['serviceCatalog'] object_store = [o_store for o_store in catalogs if o_store['type'] == 'object-store'][0] endpoints = object_store['endpoints'] endpoint = [endp for endp in endpoints if endp["region"] == self.region_name][0] return endpoint[self.endpoint_type], token def test_root_exists(self): """Check that Swift container exist :return: True if exist or None it not """ ret = self.httpclient.request('HEAD', self.base_path) if ret.status_code == 404: return None if ret.status_code < 200 or ret.status_code > 300: raise SwiftException('HEAD request failed with error code %s' % ret.status_code) return True def create_root(self): """Create the Swift container :raise: `SwiftException` if unable to create """ if not self.test_root_exists(): ret = self.httpclient.request('PUT', self.base_path) if ret.status_code < 200 or ret.status_code > 300: raise SwiftException('PUT request failed with error code %s' % ret.status_code) def get_container_objects(self): """Retrieve objects list in a container :return: A list of dict that describe objects or None if container does not exist """ qs = '?format=json' path = self.base_path + qs ret = self.httpclient.request('GET', path) if ret.status_code == 404: return None if ret.status_code < 200 or ret.status_code > 300: raise SwiftException('GET request failed with error code %s' % ret.status_code) content = ret.read() return json_loads(content) def get_object_stat(self, name): """Retrieve object stat :param name: The object name :return: A dict that describe the object or None if object does not exist """ path = self.base_path + '/' + name ret = self.httpclient.request('HEAD', path) if ret.status_code == 404: return None if ret.status_code < 200 or ret.status_code > 300: raise SwiftException('HEAD request failed with error code %s' % ret.status_code) resp_headers = {} for header, value in ret.items(): resp_headers[header.lower()] = value return resp_headers def put_object(self, name, content): """Put an object :param name: The object name :param content: A file object :raise: `SwiftException` if unable to create """ content.seek(0) data = content.read() path = self.base_path + '/' + name headers = {'Content-Length': str(len(data))} def _send(): ret = self.httpclient.request('PUT', path, body=data, headers=headers) return ret try: # Sometime got Broken Pipe - Dirty workaround ret = _send() except Exception: # Second attempt work ret = _send() if ret.status_code < 200 or ret.status_code > 300: raise SwiftException('PUT request failed with error code %s' % ret.status_code) def get_object(self, name, range=None): """Retrieve an object :param name: The object name :param range: A string range like "0-10" to retrieve specified bytes in object content :return: A file like instance or bytestring if range is specified """ headers = {} if range: headers['Range'] = 'bytes=%s' % range path = self.base_path + '/' + name ret = self.httpclient.request('GET', path, headers=headers) if ret.status_code == 404: return None if ret.status_code < 200 or ret.status_code > 300: raise SwiftException('GET request failed with error code %s' % ret.status_code) content = ret.read() if range: return content return BytesIO(content) def del_object(self, name): """Delete an object :param name: The object name :raise: `SwiftException` if unable to delete """ path = self.base_path + '/' + name ret = self.httpclient.request('DELETE', path) if ret.status_code < 200 or ret.status_code > 300: raise SwiftException('DELETE request failed with error code %s' % ret.status_code) def del_root(self): """Delete the root container by removing container content :raise: `SwiftException` if unable to delete """ for obj in self.get_container_objects(): self.del_object(obj['name']) ret = self.httpclient.request('DELETE', self.base_path) if ret.status_code < 200 or ret.status_code > 300: raise SwiftException('DELETE request failed with error code %s' % ret.status_code) class SwiftPackReader(object): """A SwiftPackReader that mimic read and sync method The reader allows to read a specified amount of bytes from a given offset of a Swift object. A read offset is kept internaly. The reader will read from Swift a specified amount of data to complete its internal buffer. chunk_length specifiy the amount of data to read from Swift. """ def __init__(self, scon, filename, pack_length): """Initialize a SwiftPackReader :param scon: a `SwiftConnector` instance :param filename: the pack filename :param pack_length: The size of the pack object """ self.scon = scon self.filename = filename self.pack_length = pack_length self.offset = 0 self.base_offset = 0 self.buff = b'' self.buff_length = self.scon.chunk_length def _read(self, more=False): if more: self.buff_length = self.buff_length * 2 l = self.base_offset r = min(self.base_offset + self.buff_length, self.pack_length) ret = self.scon.get_object(self.filename, range="%s-%s" % (l, r)) self.buff = ret def read(self, length): """Read a specified amount of Bytes form the pack object :param length: amount of bytes to read :return: bytestring """ end = self.offset+length if self.base_offset + end > self.pack_length: data = self.buff[self.offset:] self.offset = end return data if end > len(self.buff): # Need to read more from swift self._read(more=True) return self.read(length) data = self.buff[self.offset:end] self.offset = end return data def seek(self, offset): """Seek to a specified offset :param offset: the offset to seek to """ self.base_offset = offset self._read() self.offset = 0 def read_checksum(self): """Read the checksum from the pack :return: the checksum bytestring """ return self.scon.get_object(self.filename, range="-20") class SwiftPackData(PackData): """The data contained in a packfile. We use the SwiftPackReader to read bytes from packs stored in Swift using the Range header feature of Swift. """ def __init__(self, scon, filename): """ Initialize a SwiftPackReader :param scon: a `SwiftConnector` instance :param filename: the pack filename """ self.scon = scon self._filename = filename self._header_size = 12 headers = self.scon.get_object_stat(self._filename) self.pack_length = int(headers['content-length']) pack_reader = SwiftPackReader(self.scon, self._filename, self.pack_length) (version, self._num_objects) = read_pack_header(pack_reader.read) self._offset_cache = LRUSizeCache(1024*1024*self.scon.cache_length, compute_size=_compute_object_size) self.pack = None def get_object_at(self, offset): if offset in self._offset_cache: return self._offset_cache[offset] assert offset >= self._header_size pack_reader = SwiftPackReader(self.scon, self._filename, self.pack_length) pack_reader.seek(offset) unpacked, _ = unpack_object(pack_reader.read) return (unpacked.pack_type_num, unpacked._obj()) def get_stored_checksum(self): pack_reader = SwiftPackReader(self.scon, self._filename, self.pack_length) return pack_reader.read_checksum() def close(self): pass class SwiftPack(Pack): """A Git pack object. Same implementation as pack.Pack except that _idx_load and _data_load are bounded to Swift version of load_pack_index and PackData. """ def __init__(self, *args, **kwargs): self.scon = kwargs['scon'] del kwargs['scon'] super(SwiftPack, self).__init__(*args, **kwargs) self._pack_info_path = self._basename + '.info' self._pack_info = None self._pack_info_load = lambda: load_pack_info(self._pack_info_path, self.scon) self._idx_load = lambda: swift_load_pack_index(self.scon, self._idx_path) self._data_load = lambda: SwiftPackData(self.scon, self._data_path) @property def pack_info(self): """The pack data object being used.""" if self._pack_info is None: self._pack_info = self._pack_info_load() return self._pack_info class SwiftObjectStore(PackBasedObjectStore): """A Swift Object Store Allow to manage a bare Git repository from Openstack Swift. This object store only supports pack files and not loose objects. """ def __init__(self, scon): """Open a Swift object store. :param scon: A `SwiftConnector` instance """ super(SwiftObjectStore, self).__init__() self.scon = scon self.root = self.scon.root self.pack_dir = posixpath.join(OBJECTDIR, PACKDIR) self._alternates = None @property def packs(self): """List with pack objects.""" if not self._pack_cache: self._update_pack_cache() return self._pack_cache.values() def _update_pack_cache(self): for pack in self._load_packs(): self._pack_cache[pack._basename] = pack def _iter_loose_objects(self): """Loose objects are not supported by this repository """ return [] def iter_shas(self, finder): """An iterator over pack's ObjectStore. :return: a `ObjectStoreIterator` or `GreenThreadsObjectStoreIterator` instance if gevent is enabled """ shas = iter(finder.next, None) return PackInfoObjectStoreIterator( self, shas, finder, self.scon.concurrency) def find_missing_objects(self, *args, **kwargs): kwargs['concurrency'] = self.scon.concurrency return PackInfoMissingObjectFinder(self, *args, **kwargs) def _load_packs(self): """Load all packs from Swift :return: a list of `SwiftPack` instances """ objects = self.scon.get_container_objects() pack_files = [o['name'].replace(".pack", "") for o in objects if o['name'].endswith(".pack")] return [SwiftPack(pack, scon=self.scon) for pack in pack_files] def pack_info_get(self, sha): for pack in self.packs: if sha in pack: return pack.pack_info[sha] def _collect_ancestors(self, heads, common=set()): def _find_parents(commit): for pack in self.packs: if commit in pack: try: parents = pack.pack_info[commit][1] except KeyError: # Seems to have no parents return [] return parents bases = set() commits = set() queue = [] queue.extend(heads) while queue: e = queue.pop(0) if e in common: bases.add(e) elif e not in commits: commits.add(e) parents = _find_parents(e) queue.extend(parents) return (commits, bases) def add_pack(self): """Add a new pack to this object store. :return: Fileobject to write to and a commit function to call when the pack is finished. """ f = BytesIO() def commit(): f.seek(0) pack = PackData(file=f, filename="") entries = pack.sorted_entries() if len(entries): basename = posixpath.join(self.pack_dir, "pack-%s" % iter_sha1(entry[0] for entry in entries)) index = BytesIO() write_pack_index_v2(index, entries, pack.get_stored_checksum()) self.scon.put_object(basename + ".pack", f) f.close() self.scon.put_object(basename + ".idx", index) index.close() final_pack = SwiftPack(basename, scon=self.scon) final_pack.check_length_and_checksum() self._add_known_pack(basename, final_pack) return final_pack else: return None def abort(): pass return f, commit, abort def add_object(self, obj): self.add_objects([(obj, None), ]) def _pack_cache_stale(self): return False def _get_loose_object(self, sha): return None def add_thin_pack(self, read_all, read_some): """Read a thin pack Read it from a stream and complete it in a temporary file. Then the pack and the corresponding index file are uploaded to Swift. """ fd, path = tempfile.mkstemp(prefix='tmp_pack_') f = os.fdopen(fd, 'w+b') try: indexer = PackIndexer(f, resolve_ext_ref=self.get_raw) copier = PackStreamCopier(read_all, read_some, f, delta_iter=indexer) copier.verify() return self._complete_thin_pack(f, path, copier, indexer) finally: f.close() os.unlink(path) def _complete_thin_pack(self, f, path, copier, indexer): entries = list(indexer) # Update the header with the new number of objects. f.seek(0) write_pack_header(f, len(entries) + len(indexer.ext_refs())) # Must flush before reading (http://bugs.python.org/issue3207) f.flush() # Rescan the rest of the pack, computing the SHA with the new header. new_sha = compute_file_sha(f, end_ofs=-20) # Must reposition before writing (http://bugs.python.org/issue3207) f.seek(0, os.SEEK_CUR) # Complete the pack. for ext_sha in indexer.ext_refs(): assert len(ext_sha) == 20 type_num, data = self.get_raw(ext_sha) offset = f.tell() crc32 = write_pack_object(f, type_num, data, sha=new_sha) entries.append((ext_sha, offset, crc32)) pack_sha = new_sha.digest() f.write(pack_sha) f.flush() # Move the pack in. entries.sort() pack_base_name = posixpath.join( self.pack_dir, 'pack-' + iter_sha1(e[0] for e in entries).decode(sys.getfilesystemencoding())) self.scon.put_object(pack_base_name + '.pack', f) # Write the index. filename = pack_base_name + '.idx' index_file = BytesIO() write_pack_index_v2(index_file, entries, pack_sha) self.scon.put_object(filename, index_file) # Write pack info. f.seek(0) pack_data = PackData(filename="", file=f) index_file.seek(0) pack_index = load_pack_index_file('', index_file) serialized_pack_info = pack_info_create(pack_data, pack_index) f.close() index_file.close() pack_info_file = BytesIO(serialized_pack_info) filename = pack_base_name + '.info' self.scon.put_object(filename, pack_info_file) pack_info_file.close() # Add the pack to the store and return it. final_pack = SwiftPack(pack_base_name, scon=self.scon) final_pack.check_length_and_checksum() self._add_known_pack(pack_base_name, final_pack) return final_pack class SwiftInfoRefsContainer(InfoRefsContainer): """Manage references in info/refs object. """ def __init__(self, scon, store): self.scon = scon self.filename = 'info/refs' self.store = store f = self.scon.get_object(self.filename) if not f: f = BytesIO(b'') super(SwiftInfoRefsContainer, self).__init__(f) def _load_check_ref(self, name, old_ref): self._check_refname(name) f = self.scon.get_object(self.filename) if not f: return {} refs = read_info_refs(f) if old_ref is not None: if refs[name] != old_ref: return False return refs def _write_refs(self, refs): f = BytesIO() f.writelines(write_info_refs(refs, self.store)) self.scon.put_object(self.filename, f) def set_if_equals(self, name, old_ref, new_ref): """Set a refname to new_ref only if it currently equals old_ref. """ if name == 'HEAD': return True refs = self._load_check_ref(name, old_ref) if not isinstance(refs, dict): return False refs[name] = new_ref self._write_refs(refs) self._refs[name] = new_ref return True def remove_if_equals(self, name, old_ref): """Remove a refname only if it currently equals old_ref. """ if name == 'HEAD': return True refs = self._load_check_ref(name, old_ref) if not isinstance(refs, dict): return False del refs[name] self._write_refs(refs) del self._refs[name] return True def allkeys(self): try: self._refs['HEAD'] = self._refs['refs/heads/master'] except KeyError: pass return self._refs.keys() class SwiftRepo(BaseRepo): def __init__(self, root, conf): """Init a Git bare Repository on top of a Swift container. References are managed in info/refs objects by `SwiftInfoRefsContainer`. The root attribute is the Swift container that contain the Git bare repository. :param root: The container which contains the bare repo :param conf: A ConfigParser object """ self.root = root.lstrip('/') self.conf = conf self.scon = SwiftConnector(self.root, self.conf) objects = self.scon.get_container_objects() if not objects: raise Exception('There is not any GIT repo here : %s' % self.root) objects = [o['name'].split('/')[0] for o in objects] if OBJECTDIR not in objects: raise Exception('This repository (%s) is not bare.' % self.root) self.bare = True self._controldir = self.root object_store = SwiftObjectStore(self.scon) refs = SwiftInfoRefsContainer(self.scon, object_store) BaseRepo.__init__(self, object_store, refs) def _determine_file_mode(self): """Probe the file-system to determine whether permissions can be trusted. :return: True if permissions can be trusted, False otherwise. """ return False def _put_named_file(self, filename, contents): """Put an object in a Swift container :param filename: the path to the object to put on Swift :param contents: the content as bytestring """ f = BytesIO() f.write(contents) self.scon.put_object(filename, f) f.close() @classmethod def init_bare(cls, scon, conf): """Create a new bare repository. :param scon: a `SwiftConnector` instance :param conf: a ConfigParser object :return: a `SwiftRepo` instance """ scon.create_root() for obj in [posixpath.join(OBJECTDIR, PACKDIR), posixpath.join(INFODIR, 'refs')]: scon.put_object(obj, BytesIO(b'')) ret = cls(scon.root, conf) ret._init_files(True) return ret class SwiftSystemBackend(Backend): def __init__(self, logger, conf): self.conf = conf self.logger = logger def open_repository(self, path): self.logger.info('opening repository at %s', path) return SwiftRepo(path, self.conf) def cmd_daemon(args): """Entry point for starting a TCP git server.""" import optparse parser = optparse.OptionParser() parser.add_option("-l", "--listen_address", dest="listen_address", default="127.0.0.1", help="Binding IP address.") parser.add_option("-p", "--port", dest="port", type=int, default=TCP_GIT_PORT, help="Binding TCP port.") parser.add_option("-c", "--swift_config", dest="swift_config", default="", help="Path to the configuration file for Swift backend.") options, args = parser.parse_args(args) try: import gevent import geventhttpclient except ImportError: print("gevent and geventhttpclient libraries are mandatory " " for use the Swift backend.") sys.exit(1) import gevent.monkey gevent.monkey.patch_socket() from dulwich.contrib.swift import load_conf from dulwich import log_utils logger = log_utils.getLogger(__name__) conf = load_conf(options.swift_config) backend = SwiftSystemBackend(logger, conf) log_utils.default_logging_config() server = TCPGitServer(backend, options.listen_address, port=options.port) server.serve_forever() def cmd_init(args): import optparse parser = optparse.OptionParser() parser.add_option("-c", "--swift_config", dest="swift_config", default="", help="Path to the configuration file for Swift backend.") options, args = parser.parse_args(args) conf = load_conf(options.swift_config) if args == []: parser.error("missing repository name") repo = args[0] scon = SwiftConnector(repo, conf) SwiftRepo.init_bare(scon, conf) def main(argv=sys.argv): commands = { "init": cmd_init, "daemon": cmd_daemon, } if len(sys.argv) < 2: print("Usage: %s <%s> [OPTIONS...]" % (sys.argv[0], "|".join(commands.keys()))) sys.exit(1) cmd = sys.argv[1] if not cmd in commands: print("No such subcommand: %s" % cmd) sys.exit(1) commands[cmd](sys.argv[2:]) if __name__ == '__main__': main() diff --git a/dulwich/objects.py b/dulwich/objects.py index b807644c..de004dea 100644 --- a/dulwich/objects.py +++ b/dulwich/objects.py @@ -1,1313 +1,1313 @@ # objects.py -- Access to base git objects # Copyright (C) 2007 James Westby # Copyright (C) 2008-2013 Jelmer Vernooij # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # for a copy of the GNU General Public License # and for a copy of the Apache # License, Version 2.0. # """Access to base git objects.""" import binascii from io import BytesIO from collections import namedtuple import os import posixpath import stat import warnings import zlib from hashlib import sha1 from dulwich.errors import ( ChecksumMismatch, NotBlobError, NotCommitError, NotTagError, NotTreeError, ObjectFormatException, ) from dulwich.file import GitFile ZERO_SHA = b'0' * 40 # Header fields for commits _TREE_HEADER = b'tree' _PARENT_HEADER = b'parent' _AUTHOR_HEADER = b'author' _COMMITTER_HEADER = b'committer' _ENCODING_HEADER = b'encoding' _MERGETAG_HEADER = b'mergetag' _GPGSIG_HEADER = b'gpgsig' # Header fields for objects _OBJECT_HEADER = b'object' _TYPE_HEADER = b'type' _TAG_HEADER = b'tag' _TAGGER_HEADER = b'tagger' S_IFGITLINK = 0o160000 def S_ISGITLINK(m): """Check if a mode indicates a submodule. :param m: Mode to check :return: a ``boolean`` """ return (stat.S_IFMT(m) == S_IFGITLINK) def _decompress(string): dcomp = zlib.decompressobj() dcomped = dcomp.decompress(string) dcomped += dcomp.flush() return dcomped def sha_to_hex(sha): """Takes a string and returns the hex of the sha within""" hexsha = binascii.hexlify(sha) assert len(hexsha) == 40, "Incorrect length of sha1 string: %d" % hexsha return hexsha def hex_to_sha(hex): """Takes a hex sha and returns a binary sha""" assert len(hex) == 40, "Incorrect length of hexsha: %s" % hex try: return binascii.unhexlify(hex) except TypeError as exc: if not isinstance(hex, bytes): raise raise ValueError(exc.args[0]) def valid_hexsha(hex): if len(hex) != 40: return False try: binascii.unhexlify(hex) except (TypeError, binascii.Error): return False else: return True def hex_to_filename(path, hex): """Takes a hex sha and returns its filename relative to the given path.""" # os.path.join accepts bytes or unicode, but all args must be of the same # type. Make sure that hex which is expected to be bytes, is the same type # as path. if getattr(path, 'encode', None) is not None: hex = hex.decode('ascii') dir = hex[:2] file = hex[2:] # Check from object dir return os.path.join(path, dir, file) def filename_to_hex(filename): """Takes an object filename and returns its corresponding hex sha.""" # grab the last (up to) two path components names = filename.rsplit(os.path.sep, 2)[-2:] errmsg = "Invalid object filename: %s" % filename assert len(names) == 2, errmsg base, rest = names assert len(base) == 2 and len(rest) == 38, errmsg hex = (base + rest).encode('ascii') hex_to_sha(hex) return hex def object_header(num_type, length): """Return an object header for the given numeric type and text length.""" return object_class(num_type).type_name + b' ' + str(length).encode('ascii') + b'\0' def serializable_property(name, docstring=None): """A property that helps tracking whether serialization is necessary. """ def set(obj, value): setattr(obj, "_"+name, value) obj._needs_serialization = True def get(obj): return getattr(obj, "_"+name) return property(get, set, doc=docstring) def object_class(type): """Get the object class corresponding to the given type. :param type: Either a type name string or a numeric type. :return: The ShaFile subclass corresponding to the given type, or None if type is not a valid type name/number. """ return _TYPE_MAP.get(type, None) def check_hexsha(hex, error_msg): """Check if a string is a valid hex sha string. :param hex: Hex string to check :param error_msg: Error message to use in exception :raise ObjectFormatException: Raised when the string is not valid """ if not valid_hexsha(hex): raise ObjectFormatException("%s %s" % (error_msg, hex)) def check_identity(identity, error_msg): """Check if the specified identity is valid. This will raise an exception if the identity is not valid. :param identity: Identity string :param error_msg: Error message to use in exception """ email_start = identity.find(b'<') email_end = identity.find(b'>') if (email_start < 0 or email_end < 0 or email_end <= email_start or identity.find(b'<', email_start + 1) >= 0 or identity.find(b'>', email_end + 1) >= 0 or not identity.endswith(b'>')): raise ObjectFormatException(error_msg) def git_line(*items): """Formats items into a space sepreated line.""" return b' '.join(items) + b'\n' class FixedSha(object): """SHA object that behaves like hashlib's but is given a fixed value.""" __slots__ = ('_hexsha', '_sha') def __init__(self, hexsha): if getattr(hexsha, 'encode', None) is not None: hexsha = hexsha.encode('ascii') if not isinstance(hexsha, bytes): raise TypeError('Expected bytes for hexsha, got %r' % hexsha) self._hexsha = hexsha self._sha = hex_to_sha(hexsha) def digest(self): """Return the raw SHA digest.""" return self._sha def hexdigest(self): """Return the hex SHA digest.""" return self._hexsha.decode('ascii') class ShaFile(object): """A git SHA file.""" __slots__ = ('_chunked_text', '_sha', '_needs_serialization') @staticmethod def _parse_legacy_object_header(magic, f): """Parse a legacy object, creating it but not reading the file.""" bufsize = 1024 decomp = zlib.decompressobj() header = decomp.decompress(magic) start = 0 end = -1 while end < 0: extra = f.read(bufsize) header += decomp.decompress(extra) magic += extra end = header.find(b'\0', start) start = len(header) header = header[:end] type_name, size = header.split(b' ', 1) size = int(size) # sanity check obj_class = object_class(type_name) if not obj_class: raise ObjectFormatException("Not a known type: %s" % type_name) return obj_class() def _parse_legacy_object(self, map): """Parse a legacy object, setting the raw string.""" text = _decompress(map) header_end = text.find(b'\0') if header_end < 0: raise ObjectFormatException("Invalid object header, no \\0") self.set_raw_string(text[header_end+1:]) def as_legacy_object_chunks(self): """Return chunks representing the object in the experimental format. :return: List of strings """ compobj = zlib.compressobj() yield compobj.compress(self._header()) for chunk in self.as_raw_chunks(): yield compobj.compress(chunk) yield compobj.flush() def as_legacy_object(self): """Return string representing the object in the experimental format. """ return b''.join(self.as_legacy_object_chunks()) def as_raw_chunks(self): """Return chunks with serialization of the object. :return: List of strings, not necessarily one per line """ if self._needs_serialization: self._sha = None self._chunked_text = self._serialize() self._needs_serialization = False return self._chunked_text def as_raw_string(self): """Return raw string with serialization of the object. :return: String object """ return b''.join(self.as_raw_chunks()) def __str__(self): """Return raw string serialization of this object.""" return self.as_raw_string() def __hash__(self): """Return unique hash for this object.""" return hash(self.id) def as_pretty_string(self): """Return a string representing this object, fit for display.""" return self.as_raw_string() def set_raw_string(self, text, sha=None): """Set the contents of this object from a serialized string.""" if not isinstance(text, bytes): raise TypeError('Expected bytes for text, got %r' % text) self.set_raw_chunks([text], sha) def set_raw_chunks(self, chunks, sha=None): """Set the contents of this object from a list of chunks.""" self._chunked_text = chunks self._deserialize(chunks) if sha is None: self._sha = None else: self._sha = FixedSha(sha) self._needs_serialization = False @staticmethod def _parse_object_header(magic, f): """Parse a new style object, creating it but not reading the file.""" num_type = (ord(magic[0:1]) >> 4) & 7 obj_class = object_class(num_type) if not obj_class: raise ObjectFormatException("Not a known type %d" % num_type) return obj_class() def _parse_object(self, map): """Parse a new style object, setting self._text.""" # skip type and size; type must have already been determined, and # we trust zlib to fail if it's otherwise corrupted byte = ord(map[0:1]) used = 1 while (byte & 0x80) != 0: byte = ord(map[used:used+1]) used += 1 raw = map[used:] self.set_raw_string(_decompress(raw)) @classmethod def _is_legacy_object(cls, magic): b0 = ord(magic[0:1]) b1 = ord(magic[1:2]) word = (b0 << 8) + b1 return (b0 & 0x8F) == 0x08 and (word % 31) == 0 @classmethod def _parse_file(cls, f): map = f.read() if cls._is_legacy_object(map): obj = cls._parse_legacy_object_header(map, f) obj._parse_legacy_object(map) else: obj = cls._parse_object_header(map, f) obj._parse_object(map) return obj def __init__(self): """Don't call this directly""" self._sha = None self._chunked_text = [] self._needs_serialization = True def _deserialize(self, chunks): raise NotImplementedError(self._deserialize) def _serialize(self): raise NotImplementedError(self._serialize) @classmethod def from_path(cls, path): """Open a SHA file from disk.""" with GitFile(path, 'rb') as f: return cls.from_file(f) @classmethod def from_file(cls, f): """Get the contents of a SHA file on disk.""" try: obj = cls._parse_file(f) obj._sha = None return obj except (IndexError, ValueError): raise ObjectFormatException("invalid object header") @staticmethod def from_raw_string(type_num, string, sha=None): """Creates an object of the indicated type from the raw string given. :param type_num: The numeric type of the object. :param string: The raw uncompressed contents. :param sha: Optional known sha for the object """ obj = object_class(type_num)() obj.set_raw_string(string, sha) return obj @staticmethod def from_raw_chunks(type_num, chunks, sha=None): """Creates an object of the indicated type from the raw chunks given. :param type_num: The numeric type of the object. :param chunks: An iterable of the raw uncompressed contents. :param sha: Optional known sha for the object """ obj = object_class(type_num)() obj.set_raw_chunks(chunks, sha) return obj @classmethod def from_string(cls, string): """Create a ShaFile from a string.""" obj = cls() obj.set_raw_string(string) return obj def _check_has_member(self, member, error_msg): """Check that the object has a given member variable. :param member: the member variable to check for :param error_msg: the message for an error if the member is missing :raise ObjectFormatException: with the given error_msg if member is missing or is None """ if getattr(self, member, None) is None: raise ObjectFormatException(error_msg) def check(self): """Check this object for internal consistency. :raise ObjectFormatException: if the object is malformed in some way :raise ChecksumMismatch: if the object was created with a SHA that does not match its contents """ # TODO: if we find that error-checking during object parsing is a # performance bottleneck, those checks should be moved to the class's # check() method during optimization so we can still check the object # when necessary. old_sha = self.id try: self._deserialize(self.as_raw_chunks()) self._sha = None new_sha = self.id except Exception as e: raise ObjectFormatException(e) if old_sha != new_sha: raise ChecksumMismatch(new_sha, old_sha) def _header(self): return object_header(self.type, self.raw_length()) def raw_length(self): """Returns the length of the raw string of this object.""" ret = 0 for chunk in self.as_raw_chunks(): ret += len(chunk) return ret def sha(self): """The SHA1 object that is the name of this object.""" if self._sha is None or self._needs_serialization: # this is a local because as_raw_chunks() overwrites self._sha new_sha = sha1() new_sha.update(self._header()) for chunk in self.as_raw_chunks(): new_sha.update(chunk) self._sha = new_sha return self._sha def copy(self): """Create a new copy of this SHA1 object from its raw string""" obj_class = object_class(self.get_type()) return obj_class.from_raw_string( self.get_type(), self.as_raw_string(), self.id) @property def id(self): """The hex SHA of this object.""" return self.sha().hexdigest().encode('ascii') def get_type(self): """Return the type number for this object class.""" return self.type_num def set_type(self, type): """Set the type number for this object class.""" self.type_num = type # DEPRECATED: use type_num or type_name as needed. type = property(get_type, set_type) def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.id) def __ne__(self, other): return not isinstance(other, ShaFile) or self.id != other.id def __eq__(self, other): """Return True if the SHAs of the two objects match. It doesn't make sense to talk about an order on ShaFiles, so we don't override the rich comparison methods (__le__, etc.). """ return isinstance(other, ShaFile) and self.id == other.id def __lt__(self, other): if not isinstance(other, ShaFile): raise TypeError return self.id < other.id def __le__(self, other): if not isinstance(other, ShaFile): raise TypeError return self.id <= other.id def __cmp__(self, other): if not isinstance(other, ShaFile): raise TypeError return cmp(self.id, other.id) class Blob(ShaFile): """A Git Blob object.""" __slots__ = () type_name = b'blob' type_num = 3 def __init__(self): super(Blob, self).__init__() self._chunked_text = [] self._needs_serialization = False def _get_data(self): return self.as_raw_string() def _set_data(self, data): self.set_raw_string(data) data = property(_get_data, _set_data, "The text contained within the blob object.") def _get_chunked(self): return self._chunked_text def _set_chunked(self, chunks): self._chunked_text = chunks def _serialize(self): return self._chunked_text def _deserialize(self, chunks): self._chunked_text = chunks chunked = property(_get_chunked, _set_chunked, "The text within the blob object, as chunks (not necessarily lines).") @classmethod def from_path(cls, path): blob = ShaFile.from_path(path) if not isinstance(blob, cls): raise NotBlobError(path) return blob def check(self): """Check this object for internal consistency. :raise ObjectFormatException: if the object is malformed in some way """ super(Blob, self).check() def splitlines(self): """Return list of lines in this blob. This preserves the original line endings. """ chunks = self.chunked if not chunks: return [] if len(chunks) == 1: return chunks[0].splitlines(True) remaining = None ret = [] for chunk in chunks: lines = chunk.splitlines(True) if len(lines) > 1: ret.append((remaining or b"") + lines[0]) ret.extend(lines[1:-1]) remaining = lines[-1] elif len(lines) == 1: if remaining is None: remaining = lines.pop() else: remaining += lines.pop() if remaining is not None: ret.append(remaining) return ret def _parse_message(chunks): """Parse a message with a list of fields and a body. :param chunks: the raw chunks of the tag or commit object. :return: iterator of tuples of (field, value), one per header line, in the order read from the text, possibly including duplicates. Includes a field named None for the freeform tag/commit text. """ f = BytesIO(b''.join(chunks)) k = None v = "" eof = False def _strip_last_newline(value): """Strip the last newline from value""" if value and value.endswith(b'\n'): return value[:-1] return value # Parse the headers # # Headers can contain newlines. The next line is indented with a space. # We store the latest key as 'k', and the accumulated value as 'v'. for l in f: if l.startswith(b' '): # Indented continuation of the previous line v += l[1:] else: if k is not None: # We parsed a new header, return its value yield (k, _strip_last_newline(v)) if l == b'\n': # Empty line indicates end of headers break (k, v) = l.split(b' ', 1) else: # We reached end of file before the headers ended. We still need to # return the previous header, then we need to return a None field for # the text. eof = True if k is not None: yield (k, _strip_last_newline(v)) yield (None, None) if not eof: # We didn't reach the end of file while parsing headers. We can return # the rest of the file as a message. yield (None, f.read()) f.close() class Tag(ShaFile): """A Git Tag object.""" type_name = b'tag' type_num = 4 __slots__ = ('_tag_timezone_neg_utc', '_name', '_object_sha', '_object_class', '_tag_time', '_tag_timezone', '_tagger', '_message') def __init__(self): super(Tag, self).__init__() self._tagger = None self._tag_time = None self._tag_timezone = None self._tag_timezone_neg_utc = False @classmethod def from_path(cls, filename): tag = ShaFile.from_path(filename) if not isinstance(tag, cls): raise NotTagError(filename) return tag def check(self): """Check this object for internal consistency. :raise ObjectFormatException: if the object is malformed in some way """ super(Tag, self).check() self._check_has_member("_object_sha", "missing object sha") self._check_has_member("_object_class", "missing object type") self._check_has_member("_name", "missing tag name") if not self._name: raise ObjectFormatException("empty tag name") check_hexsha(self._object_sha, "invalid object sha") if getattr(self, "_tagger", None): check_identity(self._tagger, "invalid tagger") last = None for field, _ in _parse_message(self._chunked_text): if field == _OBJECT_HEADER and last is not None: raise ObjectFormatException("unexpected object") elif field == _TYPE_HEADER and last != _OBJECT_HEADER: raise ObjectFormatException("unexpected type") elif field == _TAG_HEADER and last != _TYPE_HEADER: raise ObjectFormatException("unexpected tag name") elif field == _TAGGER_HEADER and last != _TAG_HEADER: raise ObjectFormatException("unexpected tagger") last = field def _serialize(self): chunks = [] chunks.append(git_line(_OBJECT_HEADER, self._object_sha)) chunks.append(git_line(_TYPE_HEADER, self._object_class.type_name)) chunks.append(git_line(_TAG_HEADER, self._name)) if self._tagger: if self._tag_time is None: chunks.append(git_line(_TAGGER_HEADER, self._tagger)) else: chunks.append(git_line( _TAGGER_HEADER, self._tagger, str(self._tag_time).encode('ascii'), format_timezone(self._tag_timezone, self._tag_timezone_neg_utc))) if self._message is not None: chunks.append(b'\n') # To close headers chunks.append(self._message) return chunks def _deserialize(self, chunks): """Grab the metadata attached to the tag""" self._tagger = None self._tag_time = None self._tag_timezone = None self._tag_timezone_neg_utc = False for field, value in _parse_message(chunks): if field == _OBJECT_HEADER: self._object_sha = value elif field == _TYPE_HEADER: obj_class = object_class(value) if not obj_class: raise ObjectFormatException("Not a known type: %s" % value) self._object_class = obj_class elif field == _TAG_HEADER: self._name = value elif field == _TAGGER_HEADER: try: sep = value.index(b'> ') except ValueError: self._tagger = value self._tag_time = None self._tag_timezone = None self._tag_timezone_neg_utc = False else: self._tagger = value[0:sep+1] try: (timetext, timezonetext) = value[sep+2:].rsplit(b' ', 1) self._tag_time = int(timetext) self._tag_timezone, self._tag_timezone_neg_utc = \ parse_timezone(timezonetext) except ValueError as e: raise ObjectFormatException(e) elif field is None: self._message = value else: raise ObjectFormatException("Unknown field %s" % field) def _get_object(self): """Get the object pointed to by this tag. :return: tuple of (object class, sha). """ return (self._object_class, self._object_sha) def _set_object(self, value): (self._object_class, self._object_sha) = value self._needs_serialization = True object = property(_get_object, _set_object) name = serializable_property("name", "The name of this tag") tagger = serializable_property("tagger", "Returns the name of the person who created this tag") tag_time = serializable_property("tag_time", "The creation timestamp of the tag. As the number of seconds " "since the epoch") tag_timezone = serializable_property("tag_timezone", "The timezone that tag_time is in.") message = serializable_property( "message", "The message attached to this tag") class TreeEntry(namedtuple('TreeEntry', ['path', 'mode', 'sha'])): """Named tuple encapsulating a single tree entry.""" def in_path(self, path): """Return a copy of this entry with the given path prepended.""" if not isinstance(self.path, bytes): raise TypeError('Expected bytes for path, got %r' % path) return TreeEntry(posixpath.join(path, self.path), self.mode, self.sha) def parse_tree(text, strict=False): """Parse a tree text. :param text: Serialized text to parse :return: iterator of tuples of (name, mode, sha) :raise ObjectFormatException: if the object was malformed in some way """ count = 0 l = len(text) while count < l: mode_end = text.index(b' ', count) mode_text = text[count:mode_end] if strict and mode_text.startswith(b'0'): raise ObjectFormatException("Invalid mode '%s'" % mode_text) try: mode = int(mode_text, 8) except ValueError: raise ObjectFormatException("Invalid mode '%s'" % mode_text) name_end = text.index(b'\0', mode_end) name = text[mode_end+1:name_end] count = name_end+21 sha = text[name_end+1:count] if len(sha) != 20: raise ObjectFormatException("Sha has invalid length") hexsha = sha_to_hex(sha) yield (name, mode, hexsha) def serialize_tree(items): """Serialize the items in a tree to a text. :param items: Sorted iterable over (name, mode, sha) tuples :return: Serialized tree text as chunks """ for name, mode, hexsha in items: yield ("%04o" % mode).encode('ascii') + b' ' + name + b'\0' + hex_to_sha(hexsha) def sorted_tree_items(entries, name_order): """Iterate over a tree entries dictionary. :param name_order: If True, iterate entries in order of their name. If False, iterate entries in tree order, that is, treat subtree entries as having '/' appended. :param entries: Dictionary mapping names to (mode, sha) tuples :return: Iterator over (name, mode, hexsha) """ key_func = name_order and key_entry_name_order or key_entry for name, entry in sorted(entries.items(), key=key_func): mode, hexsha = entry # Stricter type checks than normal to mirror checks in the C version. mode = int(mode) if not isinstance(hexsha, bytes): raise TypeError('Expected bytes for SHA, got %r' % hexsha) yield TreeEntry(name, mode, hexsha) def key_entry(entry): """Sort key for tree entry. :param entry: (name, value) tuplee """ (name, value) = entry if stat.S_ISDIR(value[0]): name += b'/' return name def key_entry_name_order(entry): """Sort key for tree entry in name order.""" return entry[0] def pretty_format_tree_entry(name, mode, hexsha, encoding="utf-8"): """Pretty format tree entry. :param name: Name of the directory entry :param mode: Mode of entry :param hexsha: Hexsha of the referenced object :return: string describing the tree entry """ if mode & stat.S_IFDIR: kind = "tree" else: kind = "blob" return "%04o %s %s\t%s\n" % ( mode, kind, hexsha.decode('ascii'), name.decode(encoding, 'replace')) class Tree(ShaFile): """A Git tree object""" type_name = b'tree' type_num = 2 __slots__ = ('_entries') def __init__(self): super(Tree, self).__init__() self._entries = {} @classmethod def from_path(cls, filename): tree = ShaFile.from_path(filename) if not isinstance(tree, cls): raise NotTreeError(filename) return tree def __contains__(self, name): return name in self._entries def __getitem__(self, name): return self._entries[name] def __setitem__(self, name, value): """Set a tree entry by name. :param name: The name of the entry, as a string. :param value: A tuple of (mode, hexsha), where mode is the mode of the entry as an integral type and hexsha is the hex SHA of the entry as a string. """ mode, hexsha = value self._entries[name] = (mode, hexsha) self._needs_serialization = True def __delitem__(self, name): del self._entries[name] self._needs_serialization = True def __len__(self): return len(self._entries) def __iter__(self): return iter(self._entries) def add(self, name, mode, hexsha): """Add an entry to the tree. :param mode: The mode of the entry as an integral type. Not all possible modes are supported by git; see check() for details. :param name: The name of the entry, as a string. :param hexsha: The hex SHA of the entry as a string. """ if isinstance(name, int) and isinstance(mode, bytes): (name, mode) = (mode, name) warnings.warn( "Please use Tree.add(name, mode, hexsha)", category=DeprecationWarning, stacklevel=2) self._entries[name] = mode, hexsha self._needs_serialization = True def iteritems(self, name_order=False): """Iterate over entries. :param name_order: If True, iterate in name order instead of tree order. :return: Iterator over (name, mode, sha) tuples """ return sorted_tree_items(self._entries, name_order) def items(self): """Return the sorted entries in this tree. :return: List with (name, mode, sha) tuples """ return list(self.iteritems()) def _deserialize(self, chunks): """Grab the entries in the tree""" try: parsed_entries = parse_tree(b''.join(chunks)) except ValueError as e: raise ObjectFormatException(e) # TODO: list comprehension is for efficiency in the common (small) # case; if memory efficiency in the large case is a concern, use a genexp. self._entries = dict([(n, (m, s)) for n, m, s in parsed_entries]) def check(self): """Check this object for internal consistency. :raise ObjectFormatException: if the object is malformed in some way """ super(Tree, self).check() last = None allowed_modes = (stat.S_IFREG | 0o755, stat.S_IFREG | 0o644, stat.S_IFLNK, stat.S_IFDIR, S_IFGITLINK, # TODO: optionally exclude as in git fsck --strict stat.S_IFREG | 0o664) for name, mode, sha in parse_tree(b''.join(self._chunked_text), True): check_hexsha(sha, 'invalid sha %s' % sha) if b'/' in name or name in (b'', b'.', b'..'): raise ObjectFormatException('invalid name %s' % name) if mode not in allowed_modes: raise ObjectFormatException('invalid mode %06o' % mode) entry = (name, (mode, sha)) if last: if key_entry(last) > key_entry(entry): raise ObjectFormatException('entries not sorted') if name == last[0]: raise ObjectFormatException('duplicate entry %s' % name) last = entry def _serialize(self): return list(serialize_tree(self.iteritems())) def as_pretty_string(self): text = [] for name, mode, hexsha in self.iteritems(): text.append(pretty_format_tree_entry(name, mode, hexsha)) return "".join(text) def lookup_path(self, lookup_obj, path): """Look up an object in a Git tree. :param lookup_obj: Callback for retrieving object by SHA1 :param path: Path to lookup :return: A tuple of (mode, SHA) of the resulting path. """ parts = path.split(b'/') sha = self.id mode = None for p in parts: if not p: continue obj = lookup_obj(sha) if not isinstance(obj, Tree): raise NotTreeError(sha) mode, sha = obj[p] return mode, sha def parse_timezone(text): """Parse a timezone text fragment (e.g. '+0100'). :param text: Text to parse. :return: Tuple with timezone as seconds difference to UTC and a boolean indicating whether this was a UTC timezone prefixed with a negative sign (-0000). """ # cgit parses the first character as the sign, and the rest # as an integer (using strtol), which could also be negative. # We do the same for compatibility. See #697828. if not text[0] in b'+-': raise ValueError("Timezone must start with + or - (%(text)s)" % vars()) sign = text[:1] offset = int(text[1:]) if sign == b'-': offset = -offset unnecessary_negative_timezone = (offset >= 0 and sign == b'-') signum = (offset < 0) and -1 or 1 offset = abs(offset) hours = int(offset / 100) minutes = (offset % 100) return (signum * (hours * 3600 + minutes * 60), unnecessary_negative_timezone) def format_timezone(offset, unnecessary_negative_timezone=False): """Format a timezone for Git serialization. :param offset: Timezone offset as seconds difference to UTC :param unnecessary_negative_timezone: Whether to use a minus sign for UTC or positive timezones (-0000 and --700 rather than +0000 / +0700). """ if offset % 60 != 0: raise ValueError("Unable to handle non-minute offset.") if offset < 0 or unnecessary_negative_timezone: sign = '-' offset = -offset else: sign = '+' return ('%c%02d%02d' % (sign, offset / 3600, (offset / 60) % 60)).encode('ascii') def parse_commit(chunks): """Parse a commit object from chunks. :param chunks: Chunks to parse :return: Tuple of (tree, parents, author_info, commit_info, encoding, mergetag, gpgsig, message, extra) """ parents = [] extra = [] tree = None author_info = (None, None, (None, None)) commit_info = (None, None, (None, None)) encoding = None mergetag = [] message = None gpgsig = None for field, value in _parse_message(chunks): # TODO(jelmer): Enforce ordering if field == _TREE_HEADER: tree = value elif field == _PARENT_HEADER: parents.append(value) elif field == _AUTHOR_HEADER: author, timetext, timezonetext = value.rsplit(b' ', 2) author_time = int(timetext) author_info = (author, author_time, parse_timezone(timezonetext)) elif field == _COMMITTER_HEADER: committer, timetext, timezonetext = value.rsplit(b' ', 2) commit_time = int(timetext) commit_info = (committer, commit_time, parse_timezone(timezonetext)) elif field == _ENCODING_HEADER: encoding = value elif field == _MERGETAG_HEADER: mergetag.append(Tag.from_string(value + b'\n')) elif field == _GPGSIG_HEADER: gpgsig = value elif field is None: message = value else: extra.append((field, value)) return (tree, parents, author_info, commit_info, encoding, mergetag, gpgsig, message, extra) class Commit(ShaFile): """A git commit object""" type_name = b'commit' type_num = 1 __slots__ = ('_parents', '_encoding', '_extra', '_author_timezone_neg_utc', '_commit_timezone_neg_utc', '_commit_time', '_author_time', '_author_timezone', '_commit_timezone', - '_author', '_committer', '_parents', '_extra', - '_encoding', '_tree', '_message', '_mergetag', '_gpgsig') + '_author', '_committer', '_tree', '_message', + '_mergetag', '_gpgsig') def __init__(self): super(Commit, self).__init__() self._parents = [] self._encoding = None self._mergetag = [] self._gpgsig = None self._extra = [] self._author_timezone_neg_utc = False self._commit_timezone_neg_utc = False @classmethod def from_path(cls, path): commit = ShaFile.from_path(path) if not isinstance(commit, cls): raise NotCommitError(path) return commit def _deserialize(self, chunks): (self._tree, self._parents, author_info, commit_info, self._encoding, self._mergetag, self._gpgsig, self._message, self._extra) = ( parse_commit(chunks)) (self._author, self._author_time, (self._author_timezone, self._author_timezone_neg_utc)) = author_info (self._committer, self._commit_time, (self._commit_timezone, self._commit_timezone_neg_utc)) = commit_info def check(self): """Check this object for internal consistency. :raise ObjectFormatException: if the object is malformed in some way """ super(Commit, self).check() self._check_has_member("_tree", "missing tree") self._check_has_member("_author", "missing author") self._check_has_member("_committer", "missing committer") # times are currently checked when set for parent in self._parents: check_hexsha(parent, "invalid parent sha") check_hexsha(self._tree, "invalid tree sha") check_identity(self._author, "invalid author") check_identity(self._committer, "invalid committer") last = None for field, _ in _parse_message(self._chunked_text): if field == _TREE_HEADER and last is not None: raise ObjectFormatException("unexpected tree") elif field == _PARENT_HEADER and last not in (_PARENT_HEADER, _TREE_HEADER): raise ObjectFormatException("unexpected parent") elif field == _AUTHOR_HEADER and last not in (_TREE_HEADER, _PARENT_HEADER): raise ObjectFormatException("unexpected author") elif field == _COMMITTER_HEADER and last != _AUTHOR_HEADER: raise ObjectFormatException("unexpected committer") elif field == _ENCODING_HEADER and last != _COMMITTER_HEADER: raise ObjectFormatException("unexpected encoding") last = field # TODO: optionally check for duplicate parents def _serialize(self): chunks = [] tree_bytes = self._tree.id if isinstance(self._tree, Tree) else self._tree chunks.append(git_line(_TREE_HEADER, tree_bytes)) for p in self._parents: chunks.append(git_line(_PARENT_HEADER, p)) chunks.append(git_line( _AUTHOR_HEADER, self._author, str(self._author_time).encode('ascii'), format_timezone(self._author_timezone, self._author_timezone_neg_utc))) chunks.append(git_line( _COMMITTER_HEADER, self._committer, str(self._commit_time).encode('ascii'), format_timezone(self._commit_timezone, self._commit_timezone_neg_utc))) if self.encoding: chunks.append(git_line(_ENCODING_HEADER, self.encoding)) for mergetag in self.mergetag: mergetag_chunks = mergetag.as_raw_string().split(b'\n') chunks.append(git_line(_MERGETAG_HEADER, mergetag_chunks[0])) # Embedded extra header needs leading space for chunk in mergetag_chunks[1:]: chunks.append(b' ' + chunk + b'\n') # No trailing empty line if chunks[-1].endswith(b' \n'): chunks[-1] = chunks[-1][:-2] for k, v in self.extra: if b'\n' in k or b'\n' in v: raise AssertionError( "newline in extra data: %r -> %r" % (k, v)) chunks.append(git_line(k, v)) if self.gpgsig: sig_chunks = self.gpgsig.split(b'\n') chunks.append(git_line(_GPGSIG_HEADER, sig_chunks[0])) for chunk in sig_chunks[1:]: chunks.append(git_line(b'', chunk)) chunks.append(b'\n') # There must be a new line after the headers chunks.append(self._message) return chunks tree = serializable_property( "tree", "Tree that is the state of this commit") def _get_parents(self): """Return a list of parents of this commit.""" return self._parents def _set_parents(self, value): """Set a list of parents of this commit.""" self._needs_serialization = True self._parents = value parents = property(_get_parents, _set_parents, doc="Parents of this commit, by their SHA1.") def _get_extra(self): """Return extra settings of this commit.""" return self._extra extra = property(_get_extra, doc="Extra header fields not understood (presumably added in a " "newer version of git). Kept verbatim so the object can " "be correctly reserialized. For private commit metadata, use " "pseudo-headers in Commit.message, rather than this field.") author = serializable_property("author", "The name of the author of the commit") committer = serializable_property("committer", "The name of the committer of the commit") message = serializable_property( "message", "The commit message") commit_time = serializable_property("commit_time", "The timestamp of the commit. As the number of seconds since the epoch.") commit_timezone = serializable_property("commit_timezone", "The zone the commit time is in") author_time = serializable_property("author_time", "The timestamp the commit was written. As the number of " "seconds since the epoch.") author_timezone = serializable_property( "author_timezone", "Returns the zone the author time is in.") encoding = serializable_property( "encoding", "Encoding of the commit message.") mergetag = serializable_property( "mergetag", "Associated signed tag.") gpgsig = serializable_property( "gpgsig", "GPG Signature.") OBJECT_CLASSES = ( Commit, Tree, Blob, Tag, ) _TYPE_MAP = {} for cls in OBJECT_CLASSES: _TYPE_MAP[cls.type_name] = cls _TYPE_MAP[cls.type_num] = cls # Hold on to the pure-python implementations for testing _parse_tree_py = parse_tree _sorted_tree_items_py = sorted_tree_items try: # Try to import C versions from dulwich._objects import parse_tree, sorted_tree_items except ImportError: pass diff --git a/dulwich/tests/test_config.py b/dulwich/tests/test_config.py index c60e1f7b..1aba056d 100644 --- a/dulwich/tests/test_config.py +++ b/dulwich/tests/test_config.py @@ -1,319 +1,326 @@ # test_config.py -- Tests for reading and writing configuration files # Copyright (C) 2011 Jelmer Vernooij # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # for a copy of the GNU General Public License # and for a copy of the Apache # License, Version 2.0. # """Tests for reading and writing configuration files.""" from io import BytesIO import os from dulwich.config import ( ConfigDict, ConfigFile, StackedConfig, _check_section_name, _check_variable_name, _format_string, _escape_value, _parse_string, parse_submodules, ) from dulwich.tests import ( TestCase, ) class ConfigFileTests(TestCase): def from_file(self, text): return ConfigFile.from_file(BytesIO(text)) def test_empty(self): ConfigFile() def test_eq(self): self.assertEqual(ConfigFile(), ConfigFile()) def test_default_config(self): cf = self.from_file(b"""[core] repositoryformatversion = 0 filemode = true bare = false logallrefupdates = true """) self.assertEqual(ConfigFile({(b"core", ): { b"repositoryformatversion": b"0", b"filemode": b"true", b"bare": b"false", b"logallrefupdates": b"true"}}), cf) def test_from_file_empty(self): cf = self.from_file(b"") self.assertEqual(ConfigFile(), cf) def test_empty_line_before_section(self): cf = self.from_file(b"\n[section]\n") self.assertEqual(ConfigFile({(b"section", ): {}}), cf) def test_comment_before_section(self): cf = self.from_file(b"# foo\n[section]\n") self.assertEqual(ConfigFile({(b"section", ): {}}), cf) def test_comment_after_section(self): cf = self.from_file(b"[section] # foo\n") self.assertEqual(ConfigFile({(b"section", ): {}}), cf) def test_comment_after_variable(self): cf = self.from_file(b"[section]\nbar= foo # a comment\n") self.assertEqual(ConfigFile({(b"section", ): {b"bar": b"foo"}}), cf) def test_from_file_section(self): cf = self.from_file(b"[core]\nfoo = bar\n") self.assertEqual(b"bar", cf.get((b"core", ), b"foo")) self.assertEqual(b"bar", cf.get((b"core", b"foo"), b"foo")) def test_from_file_section_case_insensitive(self): cf = self.from_file(b"[cOre]\nfOo = bar\n") self.assertEqual(b"bar", cf.get((b"core", ), b"foo")) self.assertEqual(b"bar", cf.get((b"core", b"foo"), b"foo")) def test_from_file_with_mixed_quoted(self): cf = self.from_file(b"[core]\nfoo = \"bar\"la\n") self.assertEqual(b"barla", cf.get((b"core", ), b"foo")) def test_from_file_with_open_quoted(self): self.assertRaises(ValueError, self.from_file, b"[core]\nfoo = \"bar\n") def test_from_file_with_quotes(self): cf = self.from_file( b"[core]\n" b'foo = " bar"\n') self.assertEqual(b" bar", cf.get((b"core", ), b"foo")) def test_from_file_with_interrupted_line(self): cf = self.from_file( b"[core]\n" b'foo = bar\\\n' b' la\n') self.assertEqual(b"barla", cf.get((b"core", ), b"foo")) def test_from_file_with_boolean_setting(self): cf = self.from_file( b"[core]\n" b'foo\n') self.assertEqual(b"true", cf.get((b"core", ), b"foo")) def test_from_file_subsection(self): cf = self.from_file(b"[branch \"foo\"]\nfoo = bar\n") self.assertEqual(b"bar", cf.get((b"branch", b"foo"), b"foo")) def test_from_file_subsection_invalid(self): self.assertRaises(ValueError, self.from_file, b"[branch \"foo]\nfoo = bar\n") def test_from_file_subsection_not_quoted(self): cf = self.from_file(b"[branch.foo]\nfoo = bar\n") self.assertEqual(b"bar", cf.get((b"branch", b"foo"), b"foo")) def test_write_to_file_empty(self): c = ConfigFile() f = BytesIO() c.write_to_file(f) self.assertEqual(b"", f.getvalue()) def test_write_to_file_section(self): c = ConfigFile() c.set((b"core", ), b"foo", b"bar") f = BytesIO() c.write_to_file(f) self.assertEqual(b"[core]\n\tfoo = bar\n", f.getvalue()) def test_write_to_file_subsection(self): c = ConfigFile() c.set((b"branch", b"blie"), b"foo", b"bar") f = BytesIO() c.write_to_file(f) self.assertEqual(b"[branch \"blie\"]\n\tfoo = bar\n", f.getvalue()) def test_same_line(self): cf = self.from_file(b"[branch.foo] foo = bar\n") self.assertEqual(b"bar", cf.get((b"branch", b"foo"), b"foo")) def test_quoted(self): cf = self.from_file(b"""[gui] fontdiff = -family \\\"Ubuntu Mono\\\" -size 11 -weight normal -slant roman -underline 0 -overstrike 0 """) self.assertEqual(ConfigFile({(b'gui', ): { b'fontdiff': b'-family "Ubuntu Mono" -size 11 -weight normal -slant roman -underline 0 -overstrike 0', }}), cf) def test_quoted_multiline(self): cf = self.from_file(b"""[alias] who = \"!who() {\\ git log --no-merges --pretty=format:'%an - %ae' $@ | sort | uniq -c | sort -rn;\\ };\\ who\" """) self.assertEqual(ConfigFile({(b'alias', ): { b'who': b"!who() {git log --no-merges --pretty=format:'%an - %ae' $@ | sort | uniq -c | sort -rn;};who"}}), cf) + def test_set_hash_gets_quoted(self): + c = ConfigFile() + c.set(b"xandikos", b"color", b"#665544") + f = BytesIO() + c.write_to_file(f) + self.assertEqual(b"[xandikos]\n\tcolor = \"#665544\"\n", f.getvalue()) + class ConfigDictTests(TestCase): def test_get_set(self): cd = ConfigDict() self.assertRaises(KeyError, cd.get, b"foo", b"core") cd.set((b"core", ), b"foo", b"bla") self.assertEqual(b"bla", cd.get((b"core", ), b"foo")) cd.set((b"core", ), b"foo", b"bloe") self.assertEqual(b"bloe", cd.get((b"core", ), b"foo")) def test_get_boolean(self): cd = ConfigDict() cd.set((b"core", ), b"foo", b"true") self.assertTrue(cd.get_boolean((b"core", ), b"foo")) cd.set((b"core", ), b"foo", b"false") self.assertFalse(cd.get_boolean((b"core", ), b"foo")) cd.set((b"core", ), b"foo", b"invalid") self.assertRaises(ValueError, cd.get_boolean, (b"core", ), b"foo") def test_dict(self): cd = ConfigDict() cd.set((b"core", ), b"foo", b"bla") cd.set((b"core2", ), b"foo", b"bloe") self.assertEqual([(b"core", ), (b"core2", )], list(cd.keys())) self.assertEqual(cd[(b"core", )], {b'foo': b'bla'}) cd[b'a'] = b'b' self.assertEqual(cd[b'a'], b'b') def test_iteritems(self): cd = ConfigDict() cd.set((b"core", ), b"foo", b"bla") cd.set((b"core2", ), b"foo", b"bloe") self.assertEqual( [(b'foo', b'bla')], list(cd.iteritems((b"core", )))) def test_iteritems_nonexistant(self): cd = ConfigDict() cd.set((b"core2", ), b"foo", b"bloe") self.assertEqual([], list(cd.iteritems((b"core", )))) def test_itersections(self): cd = ConfigDict() cd.set((b"core2", ), b"foo", b"bloe") self.assertEqual([(b"core2", )], list(cd.itersections())) class StackedConfigTests(TestCase): def test_default_backends(self): StackedConfig.default_backends() class EscapeValueTests(TestCase): def test_nothing(self): self.assertEqual(b"foo", _escape_value(b"foo")) def test_backslash(self): self.assertEqual(b"foo\\\\", _escape_value(b"foo\\")) def test_newline(self): self.assertEqual(b"foo\\n", _escape_value(b"foo\n")) class FormatStringTests(TestCase): def test_quoted(self): self.assertEqual(b'" foo"', _format_string(b" foo")) self.assertEqual(b'"\\tfoo"', _format_string(b"\tfoo")) def test_not_quoted(self): self.assertEqual(b'foo', _format_string(b"foo")) self.assertEqual(b'foo bar', _format_string(b"foo bar")) class ParseStringTests(TestCase): def test_quoted(self): self.assertEqual(b' foo', _parse_string(b'" foo"')) self.assertEqual(b'\tfoo', _parse_string(b'"\\tfoo"')) def test_not_quoted(self): self.assertEqual(b'foo', _parse_string(b"foo")) self.assertEqual(b'foo bar', _parse_string(b"foo bar")) def test_nothing(self): self.assertEqual(b"", _parse_string(b'')) def test_tab(self): self.assertEqual(b"\tbar\t", _parse_string(b"\\tbar\\t")) def test_newline(self): self.assertEqual(b"\nbar\t", _parse_string(b"\\nbar\\t\t")) def test_quote(self): self.assertEqual(b"\"foo\"", _parse_string(b"\\\"foo\\\"")) class CheckVariableNameTests(TestCase): def test_invalid(self): self.assertFalse(_check_variable_name(b"foo ")) self.assertFalse(_check_variable_name(b"bar,bar")) self.assertFalse(_check_variable_name(b"bar.bar")) def test_valid(self): self.assertTrue(_check_variable_name(b"FOO")) self.assertTrue(_check_variable_name(b"foo")) self.assertTrue(_check_variable_name(b"foo-bar")) class CheckSectionNameTests(TestCase): def test_invalid(self): self.assertFalse(_check_section_name(b"foo ")) self.assertFalse(_check_section_name(b"bar,bar")) def test_valid(self): self.assertTrue(_check_section_name(b"FOO")) self.assertTrue(_check_section_name(b"foo")) self.assertTrue(_check_section_name(b"foo-bar")) self.assertTrue(_check_section_name(b"bar.bar")) class SubmodulesTests(TestCase): def testSubmodules(self): cf = ConfigFile.from_file(BytesIO(b"""\ [submodule "core/lib"] path = core/lib url = https://github.com/phhusson/QuasselC.git """)) got = list(parse_submodules(cf)) self.assertEqual([ (b'core/lib', b'https://github.com/phhusson/QuasselC.git', b'core/lib')], got) diff --git a/dulwich/tests/test_pack.py b/dulwich/tests/test_pack.py index 84401ac4..3cf060ae 100644 --- a/dulwich/tests/test_pack.py +++ b/dulwich/tests/test_pack.py @@ -1,1056 +1,1086 @@ # test_pack.py -- Tests for the handling of git packs. # Copyright (C) 2007 James Westby # Copyright (C) 2008 Jelmer Vernooij # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # for a copy of the GNU General Public License # and for a copy of the Apache # License, Version 2.0. # """Tests for Dulwich packs.""" from io import BytesIO from hashlib import sha1 import os import shutil import tempfile import zlib from dulwich.errors import ( ApplyDeltaError, ChecksumMismatch, ) from dulwich.file import ( GitFile, ) from dulwich.object_store import ( MemoryObjectStore, ) from dulwich.objects import ( hex_to_sha, sha_to_hex, Commit, Tree, Blob, ) from dulwich.pack import ( OFS_DELTA, REF_DELTA, MemoryPackIndex, Pack, PackData, apply_delta, create_delta, deltify_pack_objects, load_pack_index, UnpackedObject, read_zlib_chunks, write_pack_header, write_pack_index_v1, write_pack_index_v2, write_pack_object, write_pack, unpack_object, compute_file_sha, PackStreamReader, DeltaChainIterator, _delta_encode_size, _encode_copy_operation, ) from dulwich.tests import ( TestCase, ) from dulwich.tests.utils import ( make_object, build_pack, ) pack1_sha = b'bc63ddad95e7321ee734ea11a7a62d314e0d7481' a_sha = b'6f670c0fb53f9463760b7295fbb814e965fb20c8' tree_sha = b'b2a2766a2879c209ab1176e7e778b81ae422eeaa' commit_sha = b'f18faa16531ac570a3fdc8c7ca16682548dafd12' class PackTests(TestCase): """Base class for testing packs""" def setUp(self): super(PackTests, self).setUp() self.tempdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tempdir) datadir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data/packs')) def get_pack_index(self, sha): """Returns a PackIndex from the datadir with the given sha""" return load_pack_index(os.path.join(self.datadir, 'pack-%s.idx' % sha.decode('ascii'))) def get_pack_data(self, sha): """Returns a PackData object from the datadir with the given sha""" return PackData(os.path.join(self.datadir, 'pack-%s.pack' % sha.decode('ascii'))) def get_pack(self, sha): return Pack(os.path.join(self.datadir, 'pack-%s' % sha.decode('ascii'))) def assertSucceeds(self, func, *args, **kwargs): try: func(*args, **kwargs) except ChecksumMismatch as e: self.fail(e) class PackIndexTests(PackTests): """Class that tests the index of packfiles""" def test_object_index(self): """Tests that the correct object offset is returned from the index.""" p = self.get_pack_index(pack1_sha) self.assertRaises(KeyError, p.object_index, pack1_sha) self.assertEqual(p.object_index(a_sha), 178) self.assertEqual(p.object_index(tree_sha), 138) self.assertEqual(p.object_index(commit_sha), 12) def test_index_len(self): p = self.get_pack_index(pack1_sha) self.assertEqual(3, len(p)) def test_get_stored_checksum(self): p = self.get_pack_index(pack1_sha) self.assertEqual(b'f2848e2ad16f329ae1c92e3b95e91888daa5bd01', sha_to_hex(p.get_stored_checksum())) self.assertEqual(b'721980e866af9a5f93ad674144e1459b8ba3e7b7', sha_to_hex(p.get_pack_checksum())) def test_index_check(self): p = self.get_pack_index(pack1_sha) self.assertSucceeds(p.check) def test_iterentries(self): p = self.get_pack_index(pack1_sha) entries = [(sha_to_hex(s), o, c) for s, o, c in p.iterentries()] self.assertEqual([ (b'6f670c0fb53f9463760b7295fbb814e965fb20c8', 178, None), (b'b2a2766a2879c209ab1176e7e778b81ae422eeaa', 138, None), (b'f18faa16531ac570a3fdc8c7ca16682548dafd12', 12, None) ], entries) def test_iter(self): p = self.get_pack_index(pack1_sha) self.assertEqual(set([tree_sha, commit_sha, a_sha]), set(p)) class TestPackDeltas(TestCase): test_string1 = b'The answer was flailing in the wind' test_string2 = b'The answer was falling down the pipe' test_string3 = b'zzzzz' test_string_empty = b'' test_string_big = b'Z' * 8192 test_string_huge = b'Z' * 100000 def _test_roundtrip(self, base, target): self.assertEqual(target, b''.join(apply_delta(base, create_delta(base, target)))) def test_nochange(self): self._test_roundtrip(self.test_string1, self.test_string1) def test_nochange_huge(self): self._test_roundtrip(self.test_string_huge, self.test_string_huge) def test_change(self): self._test_roundtrip(self.test_string1, self.test_string2) def test_rewrite(self): self._test_roundtrip(self.test_string1, self.test_string3) def test_empty_to_big(self): self._test_roundtrip(self.test_string_empty, self.test_string_big) def test_empty_to_huge(self): self._test_roundtrip(self.test_string_empty, self.test_string_huge) def test_huge_copy(self): self._test_roundtrip(self.test_string_huge + self.test_string1, self.test_string_huge + self.test_string2) def test_dest_overflow(self): self.assertRaises( ApplyDeltaError, apply_delta, b'a'*0x10000, b'\x80\x80\x04\x80\x80\x04\x80' + b'a'*0x10000) self.assertRaises( ApplyDeltaError, apply_delta, b'', b'\x00\x80\x02\xb0\x11\x11') + def test_pypy_issue(self): + # Test for https://github.com/jelmer/dulwich/issues/509 / + # https://bitbucket.org/pypy/pypy/issues/2499/cpyext-pystring_asstring-doesnt-work + chunks = [ + b'tree 03207ccf58880a748188836155ceed72f03d65d6\n' + b'parent 408fbab530fd4abe49249a636a10f10f44d07a21\n' + b'author Victor Stinner 1421355207 +0100\n' + b'committer Victor Stinner 1421355207 +0100\n' + b'\n' + b'Backout changeset 3a06020af8cf\n' + b'\nStreamWriter: close() now clears the reference to the transport\n' + b'\nStreamWriter now raises an exception if it is closed: write(), writelines(),\n' + b'write_eof(), can_write_eof(), get_extra_info(), drain().\n'] + delta = [ + b'\xcd\x03\xad\x03]tree ff3c181a393d5a7270cddc01ea863818a8621ca8\n' + b'parent 20a103cc90135494162e819f98d0edfc1f1fba6b\x91]7\x0510738' + b'\x91\x99@\x0b10738 +0100\x93\x04\x01\xc9'] + res = apply_delta(chunks, delta) + expected = [ + b'tree ff3c181a393d5a7270cddc01ea863818a8621ca8\n' + b'parent 20a103cc90135494162e819f98d0edfc1f1fba6b', + b'\nauthor Victor Stinner 14213', + b'10738', + b' +0100\ncommitter Victor Stinner 14213', + b'10738 +0100', + b'\n\nStreamWriter: close() now clears the reference to the transport\n\n' + b'StreamWriter now raises an exception if it is closed: write(), writelines(),\n' + b'write_eof(), can_write_eof(), get_extra_info(), drain().\n'] + self.assertEqual(b''.join(expected), b''.join(res)) + class TestPackData(PackTests): """Tests getting the data from the packfile.""" def test_create_pack(self): self.get_pack_data(pack1_sha).close() def test_from_file(self): path = os.path.join(self.datadir, 'pack-%s.pack' % pack1_sha.decode('ascii')) with open(path, 'rb') as f: PackData.from_file(f, os.path.getsize(path)) def test_pack_len(self): with self.get_pack_data(pack1_sha) as p: self.assertEqual(3, len(p)) def test_index_check(self): with self.get_pack_data(pack1_sha) as p: self.assertSucceeds(p.check) def test_iterobjects(self): with self.get_pack_data(pack1_sha) as p: commit_data = (b'tree b2a2766a2879c209ab1176e7e778b81ae422eeaa\n' b'author James Westby ' b'1174945067 +0100\n' b'committer James Westby ' b'1174945067 +0100\n' b'\n' b'Test commit\n') blob_sha = b'6f670c0fb53f9463760b7295fbb814e965fb20c8' tree_data = b'100644 a\0' + hex_to_sha(blob_sha) actual = [] for offset, type_num, chunks, crc32 in p.iterobjects(): actual.append((offset, type_num, b''.join(chunks), crc32)) self.assertEqual([ (12, 1, commit_data, 3775879613), (138, 2, tree_data, 912998690), (178, 3, b'test 1\n', 1373561701) ], actual) def test_iterentries(self): with self.get_pack_data(pack1_sha) as p: entries = set((sha_to_hex(s), o, c) for s, o, c in p.iterentries()) self.assertEqual(set([ (b'6f670c0fb53f9463760b7295fbb814e965fb20c8', 178, 1373561701), (b'b2a2766a2879c209ab1176e7e778b81ae422eeaa', 138, 912998690), (b'f18faa16531ac570a3fdc8c7ca16682548dafd12', 12, 3775879613), ]), entries) def test_create_index_v1(self): with self.get_pack_data(pack1_sha) as p: filename = os.path.join(self.tempdir, 'v1test.idx') p.create_index_v1(filename) idx1 = load_pack_index(filename) idx2 = self.get_pack_index(pack1_sha) self.assertEqual(idx1, idx2) def test_create_index_v2(self): with self.get_pack_data(pack1_sha) as p: filename = os.path.join(self.tempdir, 'v2test.idx') p.create_index_v2(filename) idx1 = load_pack_index(filename) idx2 = self.get_pack_index(pack1_sha) self.assertEqual(idx1, idx2) def test_compute_file_sha(self): f = BytesIO(b'abcd1234wxyz') self.assertEqual(sha1(b'abcd1234wxyz').hexdigest(), compute_file_sha(f).hexdigest()) self.assertEqual(sha1(b'abcd1234wxyz').hexdigest(), compute_file_sha(f, buffer_size=5).hexdigest()) self.assertEqual(sha1(b'abcd1234').hexdigest(), compute_file_sha(f, end_ofs=-4).hexdigest()) self.assertEqual(sha1(b'1234wxyz').hexdigest(), compute_file_sha(f, start_ofs=4).hexdigest()) self.assertEqual( sha1(b'1234').hexdigest(), compute_file_sha(f, start_ofs=4, end_ofs=-4).hexdigest()) def test_compute_file_sha_short_file(self): f = BytesIO(b'abcd1234wxyz') self.assertRaises(AssertionError, compute_file_sha, f, end_ofs=-20) self.assertRaises(AssertionError, compute_file_sha, f, end_ofs=20) self.assertRaises(AssertionError, compute_file_sha, f, start_ofs=10, end_ofs=-12) class TestPack(PackTests): def test_len(self): with self.get_pack(pack1_sha) as p: self.assertEqual(3, len(p)) def test_contains(self): with self.get_pack(pack1_sha) as p: self.assertTrue(tree_sha in p) def test_get(self): with self.get_pack(pack1_sha) as p: self.assertEqual(type(p[tree_sha]), Tree) def test_iter(self): with self.get_pack(pack1_sha) as p: self.assertEqual(set([tree_sha, commit_sha, a_sha]), set(p)) def test_iterobjects(self): with self.get_pack(pack1_sha) as p: expected = set([p[s] for s in [commit_sha, tree_sha, a_sha]]) self.assertEqual(expected, set(list(p.iterobjects()))) def test_pack_tuples(self): with self.get_pack(pack1_sha) as p: tuples = p.pack_tuples() expected = set([(p[s], None) for s in [commit_sha, tree_sha, a_sha]]) self.assertEqual(expected, set(list(tuples))) self.assertEqual(expected, set(list(tuples))) self.assertEqual(3, len(tuples)) def test_get_object_at(self): """Tests random access for non-delta objects""" with self.get_pack(pack1_sha) as p: obj = p[a_sha] self.assertEqual(obj.type_name, b'blob') self.assertEqual(obj.sha().hexdigest().encode('ascii'), a_sha) obj = p[tree_sha] self.assertEqual(obj.type_name, b'tree') self.assertEqual(obj.sha().hexdigest().encode('ascii'), tree_sha) obj = p[commit_sha] self.assertEqual(obj.type_name, b'commit') self.assertEqual(obj.sha().hexdigest().encode('ascii'), commit_sha) def test_copy(self): with self.get_pack(pack1_sha) as origpack: self.assertSucceeds(origpack.index.check) basename = os.path.join(self.tempdir, 'Elch') write_pack(basename, origpack.pack_tuples()) with Pack(basename) as newpack: self.assertEqual(origpack, newpack) self.assertSucceeds(newpack.index.check) self.assertEqual(origpack.name(), newpack.name()) self.assertEqual(origpack.index.get_pack_checksum(), newpack.index.get_pack_checksum()) wrong_version = origpack.index.version != newpack.index.version orig_checksum = origpack.index.get_stored_checksum() new_checksum = newpack.index.get_stored_checksum() self.assertTrue(wrong_version or orig_checksum == new_checksum) def test_commit_obj(self): with self.get_pack(pack1_sha) as p: commit = p[commit_sha] self.assertEqual(b'James Westby ', commit.author) self.assertEqual([], commit.parents) def _copy_pack(self, origpack): basename = os.path.join(self.tempdir, 'somepack') write_pack(basename, origpack.pack_tuples()) return Pack(basename) def test_keep_no_message(self): with self.get_pack(pack1_sha) as p: p = self._copy_pack(p) with p: keepfile_name = p.keep() # file should exist self.assertTrue(os.path.exists(keepfile_name)) with open(keepfile_name, 'r') as f: buf = f.read() self.assertEqual('', buf) def test_keep_message(self): with self.get_pack(pack1_sha) as p: p = self._copy_pack(p) msg = b'some message' with p: keepfile_name = p.keep(msg) # file should exist self.assertTrue(os.path.exists(keepfile_name)) # and contain the right message, with a linefeed with open(keepfile_name, 'rb') as f: buf = f.read() self.assertEqual(msg + b'\n', buf) def test_name(self): with self.get_pack(pack1_sha) as p: self.assertEqual(pack1_sha, p.name()) def test_length_mismatch(self): with self.get_pack_data(pack1_sha) as data: index = self.get_pack_index(pack1_sha) Pack.from_objects(data, index).check_length_and_checksum() data._file.seek(12) bad_file = BytesIO() write_pack_header(bad_file, 9999) bad_file.write(data._file.read()) bad_file = BytesIO(bad_file.getvalue()) bad_data = PackData('', file=bad_file) bad_pack = Pack.from_lazy_objects(lambda: bad_data, lambda: index) self.assertRaises(AssertionError, lambda: bad_pack.data) self.assertRaises(AssertionError, lambda: bad_pack.check_length_and_checksum()) def test_checksum_mismatch(self): with self.get_pack_data(pack1_sha) as data: index = self.get_pack_index(pack1_sha) Pack.from_objects(data, index).check_length_and_checksum() data._file.seek(0) bad_file = BytesIO(data._file.read()[:-20] + (b'\xff' * 20)) bad_data = PackData('', file=bad_file) bad_pack = Pack.from_lazy_objects(lambda: bad_data, lambda: index) self.assertRaises(ChecksumMismatch, lambda: bad_pack.data) self.assertRaises(ChecksumMismatch, lambda: bad_pack.check_length_and_checksum()) def test_iterobjects_2(self): with self.get_pack(pack1_sha) as p: objs = dict((o.id, o) for o in p.iterobjects()) self.assertEqual(3, len(objs)) self.assertEqual(sorted(objs), sorted(p.index)) self.assertTrue(isinstance(objs[a_sha], Blob)) self.assertTrue(isinstance(objs[tree_sha], Tree)) self.assertTrue(isinstance(objs[commit_sha], Commit)) class TestThinPack(PackTests): def setUp(self): super(TestThinPack, self).setUp() self.store = MemoryObjectStore() self.blobs = {} for blob in (b'foo', b'bar', b'foo1234', b'bar2468'): self.blobs[blob] = make_object(Blob, data=blob) self.store.add_object(self.blobs[b'foo']) self.store.add_object(self.blobs[b'bar']) # Build a thin pack. 'foo' is as an external reference, 'bar' an # internal reference. self.pack_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.pack_dir) self.pack_prefix = os.path.join(self.pack_dir, 'pack') with open(self.pack_prefix + '.pack', 'wb') as f: build_pack(f, [ (REF_DELTA, (self.blobs[b'foo'].id, b'foo1234')), (Blob.type_num, b'bar'), (REF_DELTA, (self.blobs[b'bar'].id, b'bar2468'))], store=self.store) # Index the new pack. with self.make_pack(True) as pack: with PackData(pack._data_path) as data: data.pack = pack data.create_index(self.pack_prefix + '.idx') del self.store[self.blobs[b'bar'].id] def make_pack(self, resolve_ext_ref): return Pack( self.pack_prefix, resolve_ext_ref=self.store.get_raw if resolve_ext_ref else None) def test_get_raw(self): with self.make_pack(False) as p: self.assertRaises( KeyError, p.get_raw, self.blobs[b'foo1234'].id) with self.make_pack(True) as p: self.assertEqual( (3, b'foo1234'), p.get_raw(self.blobs[b'foo1234'].id)) def test_iterobjects(self): with self.make_pack(False) as p: self.assertRaises(KeyError, list, p.iterobjects()) with self.make_pack(True) as p: self.assertEqual( sorted([self.blobs[b'foo1234'].id, self.blobs[b'bar'].id, self.blobs[b'bar2468'].id]), sorted(o.id for o in p.iterobjects())) class WritePackTests(TestCase): def test_write_pack_header(self): f = BytesIO() write_pack_header(f, 42) self.assertEqual(b'PACK\x00\x00\x00\x02\x00\x00\x00*', f.getvalue()) def test_write_pack_object(self): f = BytesIO() f.write(b'header') offset = f.tell() crc32 = write_pack_object(f, Blob.type_num, b'blob') self.assertEqual(crc32, zlib.crc32(f.getvalue()[6:]) & 0xffffffff) f.write(b'x') # unpack_object needs extra trailing data. f.seek(offset) unpacked, unused = unpack_object(f.read, compute_crc32=True) self.assertEqual(Blob.type_num, unpacked.pack_type_num) self.assertEqual(Blob.type_num, unpacked.obj_type_num) self.assertEqual([b'blob'], unpacked.decomp_chunks) self.assertEqual(crc32, unpacked.crc32) self.assertEqual(b'x', unused) def test_write_pack_object_sha(self): f = BytesIO() f.write(b'header') offset = f.tell() sha_a = sha1(b'foo') sha_b = sha_a.copy() write_pack_object(f, Blob.type_num, b'blob', sha=sha_a) self.assertNotEqual(sha_a.digest(), sha_b.digest()) sha_b.update(f.getvalue()[offset:]) self.assertEqual(sha_a.digest(), sha_b.digest()) pack_checksum = hex_to_sha('721980e866af9a5f93ad674144e1459b8ba3e7b7') class BaseTestPackIndexWriting(object): def assertSucceeds(self, func, *args, **kwargs): try: func(*args, **kwargs) except ChecksumMismatch as e: self.fail(e) def index(self, filename, entries, pack_checksum): raise NotImplementedError(self.index) def test_empty(self): idx = self.index('empty.idx', [], pack_checksum) self.assertEqual(idx.get_pack_checksum(), pack_checksum) self.assertEqual(0, len(idx)) def test_large(self): entry1_sha = hex_to_sha('4e6388232ec39792661e2e75db8fb117fc869ce6') entry2_sha = hex_to_sha('e98f071751bd77f59967bfa671cd2caebdccc9a2') entries = [(entry1_sha, 0xf2972d0830529b87, 24), (entry2_sha, (~0xf2972d0830529b87)&(2**64-1), 92)] if not self._supports_large: self.assertRaises(TypeError, self.index, 'single.idx', entries, pack_checksum) return idx = self.index('single.idx', entries, pack_checksum) self.assertEqual(idx.get_pack_checksum(), pack_checksum) self.assertEqual(2, len(idx)) actual_entries = list(idx.iterentries()) self.assertEqual(len(entries), len(actual_entries)) for mine, actual in zip(entries, actual_entries): my_sha, my_offset, my_crc = mine actual_sha, actual_offset, actual_crc = actual self.assertEqual(my_sha, actual_sha) self.assertEqual(my_offset, actual_offset) if self._has_crc32_checksum: self.assertEqual(my_crc, actual_crc) else: self.assertTrue(actual_crc is None) def test_single(self): entry_sha = hex_to_sha('6f670c0fb53f9463760b7295fbb814e965fb20c8') my_entries = [(entry_sha, 178, 42)] idx = self.index('single.idx', my_entries, pack_checksum) self.assertEqual(idx.get_pack_checksum(), pack_checksum) self.assertEqual(1, len(idx)) actual_entries = list(idx.iterentries()) self.assertEqual(len(my_entries), len(actual_entries)) for mine, actual in zip(my_entries, actual_entries): my_sha, my_offset, my_crc = mine actual_sha, actual_offset, actual_crc = actual self.assertEqual(my_sha, actual_sha) self.assertEqual(my_offset, actual_offset) if self._has_crc32_checksum: self.assertEqual(my_crc, actual_crc) else: self.assertTrue(actual_crc is None) class BaseTestFilePackIndexWriting(BaseTestPackIndexWriting): def setUp(self): self.tempdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tempdir) def index(self, filename, entries, pack_checksum): path = os.path.join(self.tempdir, filename) self.writeIndex(path, entries, pack_checksum) idx = load_pack_index(path) self.assertSucceeds(idx.check) self.assertEqual(idx.version, self._expected_version) return idx def writeIndex(self, filename, entries, pack_checksum): # FIXME: Write to BytesIO instead rather than hitting disk ? with GitFile(filename, "wb") as f: self._write_fn(f, entries, pack_checksum) class TestMemoryIndexWriting(TestCase, BaseTestPackIndexWriting): def setUp(self): TestCase.setUp(self) self._has_crc32_checksum = True self._supports_large = True def index(self, filename, entries, pack_checksum): return MemoryPackIndex(entries, pack_checksum) def tearDown(self): TestCase.tearDown(self) class TestPackIndexWritingv1(TestCase, BaseTestFilePackIndexWriting): def setUp(self): TestCase.setUp(self) BaseTestFilePackIndexWriting.setUp(self) self._has_crc32_checksum = False self._expected_version = 1 self._supports_large = False self._write_fn = write_pack_index_v1 def tearDown(self): TestCase.tearDown(self) BaseTestFilePackIndexWriting.tearDown(self) class TestPackIndexWritingv2(TestCase, BaseTestFilePackIndexWriting): def setUp(self): TestCase.setUp(self) BaseTestFilePackIndexWriting.setUp(self) self._has_crc32_checksum = True self._supports_large = True self._expected_version = 2 self._write_fn = write_pack_index_v2 def tearDown(self): TestCase.tearDown(self) BaseTestFilePackIndexWriting.tearDown(self) class ReadZlibTests(TestCase): decomp = ( b'tree 4ada885c9196b6b6fa08744b5862bf92896fc002\n' b'parent None\n' b'author Jelmer Vernooij 1228980214 +0000\n' b'committer Jelmer Vernooij 1228980214 +0000\n' b'\n' b"Provide replacement for mmap()'s offset argument.") comp = zlib.compress(decomp) extra = b'nextobject' def setUp(self): super(ReadZlibTests, self).setUp() self.read = BytesIO(self.comp + self.extra).read self.unpacked = UnpackedObject(Tree.type_num, None, len(self.decomp), 0) def test_decompress_size(self): good_decomp_len = len(self.decomp) self.unpacked.decomp_len = -1 self.assertRaises(ValueError, read_zlib_chunks, self.read, self.unpacked) self.unpacked.decomp_len = good_decomp_len - 1 self.assertRaises(zlib.error, read_zlib_chunks, self.read, self.unpacked) self.unpacked.decomp_len = good_decomp_len + 1 self.assertRaises(zlib.error, read_zlib_chunks, self.read, self.unpacked) def test_decompress_truncated(self): read = BytesIO(self.comp[:10]).read self.assertRaises(zlib.error, read_zlib_chunks, read, self.unpacked) read = BytesIO(self.comp).read self.assertRaises(zlib.error, read_zlib_chunks, read, self.unpacked) def test_decompress_empty(self): unpacked = UnpackedObject(Tree.type_num, None, 0, None) comp = zlib.compress(b'') read = BytesIO(comp + self.extra).read unused = read_zlib_chunks(read, unpacked) self.assertEqual(b'', b''.join(unpacked.decomp_chunks)) self.assertNotEqual(b'', unused) self.assertEqual(self.extra, unused + read()) def test_decompress_no_crc32(self): self.unpacked.crc32 = None read_zlib_chunks(self.read, self.unpacked) self.assertEqual(None, self.unpacked.crc32) def _do_decompress_test(self, buffer_size, **kwargs): unused = read_zlib_chunks(self.read, self.unpacked, buffer_size=buffer_size, **kwargs) self.assertEqual(self.decomp, b''.join(self.unpacked.decomp_chunks)) self.assertEqual(zlib.crc32(self.comp), self.unpacked.crc32) self.assertNotEqual(b'', unused) self.assertEqual(self.extra, unused + self.read()) def test_simple_decompress(self): self._do_decompress_test(4096) self.assertEqual(None, self.unpacked.comp_chunks) # These buffer sizes are not intended to be realistic, but rather simulate # larger buffer sizes that may end at various places. def test_decompress_buffer_size_1(self): self._do_decompress_test(1) def test_decompress_buffer_size_2(self): self._do_decompress_test(2) def test_decompress_buffer_size_3(self): self._do_decompress_test(3) def test_decompress_buffer_size_4(self): self._do_decompress_test(4) def test_decompress_include_comp(self): self._do_decompress_test(4096, include_comp=True) self.assertEqual(self.comp, b''.join(self.unpacked.comp_chunks)) class DeltifyTests(TestCase): def test_empty(self): self.assertEqual([], list(deltify_pack_objects([]))) def test_single(self): b = Blob.from_string(b"foo") self.assertEqual( [(b.type_num, b.sha().digest(), None, b.as_raw_string())], list(deltify_pack_objects([(b, b"")]))) def test_simple_delta(self): b1 = Blob.from_string(b"a" * 101) b2 = Blob.from_string(b"a" * 100) delta = create_delta(b1.as_raw_string(), b2.as_raw_string()) self.assertEqual([ (b1.type_num, b1.sha().digest(), None, b1.as_raw_string()), (b2.type_num, b2.sha().digest(), b1.sha().digest(), delta) ], list(deltify_pack_objects([(b1, b""), (b2, b"")]))) class TestPackStreamReader(TestCase): def test_read_objects_emtpy(self): f = BytesIO() build_pack(f, []) reader = PackStreamReader(f.read) self.assertEqual(0, len(list(reader.read_objects()))) def test_read_objects(self): f = BytesIO() entries = build_pack(f, [ (Blob.type_num, b'blob'), (OFS_DELTA, (0, b'blob1')), ]) reader = PackStreamReader(f.read) objects = list(reader.read_objects(compute_crc32=True)) self.assertEqual(2, len(objects)) unpacked_blob, unpacked_delta = objects self.assertEqual(entries[0][0], unpacked_blob.offset) self.assertEqual(Blob.type_num, unpacked_blob.pack_type_num) self.assertEqual(Blob.type_num, unpacked_blob.obj_type_num) self.assertEqual(None, unpacked_blob.delta_base) self.assertEqual(b'blob', b''.join(unpacked_blob.decomp_chunks)) self.assertEqual(entries[0][4], unpacked_blob.crc32) self.assertEqual(entries[1][0], unpacked_delta.offset) self.assertEqual(OFS_DELTA, unpacked_delta.pack_type_num) self.assertEqual(None, unpacked_delta.obj_type_num) self.assertEqual(unpacked_delta.offset - unpacked_blob.offset, unpacked_delta.delta_base) delta = create_delta(b'blob', b'blob1') self.assertEqual(delta, b''.join(unpacked_delta.decomp_chunks)) self.assertEqual(entries[1][4], unpacked_delta.crc32) def test_read_objects_buffered(self): f = BytesIO() build_pack(f, [ (Blob.type_num, b'blob'), (OFS_DELTA, (0, b'blob1')), ]) reader = PackStreamReader(f.read, zlib_bufsize=4) self.assertEqual(2, len(list(reader.read_objects()))) def test_read_objects_empty(self): reader = PackStreamReader(BytesIO().read) self.assertEqual([], list(reader.read_objects())) class TestPackIterator(DeltaChainIterator): _compute_crc32 = True def __init__(self, *args, **kwargs): super(TestPackIterator, self).__init__(*args, **kwargs) self._unpacked_offsets = set() def _result(self, unpacked): """Return entries in the same format as build_pack.""" return (unpacked.offset, unpacked.obj_type_num, b''.join(unpacked.obj_chunks), unpacked.sha(), unpacked.crc32) def _resolve_object(self, offset, pack_type_num, base_chunks): assert offset not in self._unpacked_offsets, ( 'Attempted to re-inflate offset %i' % offset) self._unpacked_offsets.add(offset) return super(TestPackIterator, self)._resolve_object( offset, pack_type_num, base_chunks) class DeltaChainIteratorTests(TestCase): def setUp(self): super(DeltaChainIteratorTests, self).setUp() self.store = MemoryObjectStore() self.fetched = set() def store_blobs(self, blobs_data): blobs = [] for data in blobs_data: blob = make_object(Blob, data=data) blobs.append(blob) self.store.add_object(blob) return blobs def get_raw_no_repeat(self, bin_sha): """Wrapper around store.get_raw that doesn't allow repeat lookups.""" hex_sha = sha_to_hex(bin_sha) self.assertFalse(hex_sha in self.fetched, 'Attempted to re-fetch object %s' % hex_sha) self.fetched.add(hex_sha) return self.store.get_raw(hex_sha) def make_pack_iter(self, f, thin=None): if thin is None: thin = bool(list(self.store)) resolve_ext_ref = thin and self.get_raw_no_repeat or None data = PackData('test.pack', file=f) return TestPackIterator.for_pack_data( data, resolve_ext_ref=resolve_ext_ref) def assertEntriesMatch(self, expected_indexes, entries, pack_iter): expected = [entries[i] for i in expected_indexes] self.assertEqual(expected, list(pack_iter._walk_all_chains())) def test_no_deltas(self): f = BytesIO() entries = build_pack(f, [ (Commit.type_num, b'commit'), (Blob.type_num, b'blob'), (Tree.type_num, b'tree'), ]) self.assertEntriesMatch([0, 1, 2], entries, self.make_pack_iter(f)) def test_ofs_deltas(self): f = BytesIO() entries = build_pack(f, [ (Blob.type_num, b'blob'), (OFS_DELTA, (0, b'blob1')), (OFS_DELTA, (0, b'blob2')), ]) self.assertEntriesMatch([0, 1, 2], entries, self.make_pack_iter(f)) def test_ofs_deltas_chain(self): f = BytesIO() entries = build_pack(f, [ (Blob.type_num, b'blob'), (OFS_DELTA, (0, b'blob1')), (OFS_DELTA, (1, b'blob2')), ]) self.assertEntriesMatch([0, 1, 2], entries, self.make_pack_iter(f)) def test_ref_deltas(self): f = BytesIO() entries = build_pack(f, [ (REF_DELTA, (1, b'blob1')), (Blob.type_num, (b'blob')), (REF_DELTA, (1, b'blob2')), ]) self.assertEntriesMatch([1, 0, 2], entries, self.make_pack_iter(f)) def test_ref_deltas_chain(self): f = BytesIO() entries = build_pack(f, [ (REF_DELTA, (2, b'blob1')), (Blob.type_num, (b'blob')), (REF_DELTA, (1, b'blob2')), ]) self.assertEntriesMatch([1, 2, 0], entries, self.make_pack_iter(f)) def test_ofs_and_ref_deltas(self): # Deltas pending on this offset are popped before deltas depending on # this ref. f = BytesIO() entries = build_pack(f, [ (REF_DELTA, (1, b'blob1')), (Blob.type_num, (b'blob')), (OFS_DELTA, (1, b'blob2')), ]) self.assertEntriesMatch([1, 2, 0], entries, self.make_pack_iter(f)) def test_mixed_chain(self): f = BytesIO() entries = build_pack(f, [ (Blob.type_num, b'blob'), (REF_DELTA, (2, b'blob2')), (OFS_DELTA, (0, b'blob1')), (OFS_DELTA, (1, b'blob3')), (OFS_DELTA, (0, b'bob')), ]) self.assertEntriesMatch([0, 2, 4, 1, 3], entries, self.make_pack_iter(f)) def test_long_chain(self): n = 100 objects_spec = [(Blob.type_num, b'blob')] for i in range(n): objects_spec.append((OFS_DELTA, (i, b'blob' + str(i).encode('ascii')))) f = BytesIO() entries = build_pack(f, objects_spec) self.assertEntriesMatch(range(n + 1), entries, self.make_pack_iter(f)) def test_branchy_chain(self): n = 100 objects_spec = [(Blob.type_num, b'blob')] for i in range(n): objects_spec.append((OFS_DELTA, (0, b'blob' + str(i).encode('ascii')))) f = BytesIO() entries = build_pack(f, objects_spec) self.assertEntriesMatch(range(n + 1), entries, self.make_pack_iter(f)) def test_ext_ref(self): blob, = self.store_blobs([b'blob']) f = BytesIO() entries = build_pack(f, [(REF_DELTA, (blob.id, b'blob1'))], store=self.store) pack_iter = self.make_pack_iter(f) self.assertEntriesMatch([0], entries, pack_iter) self.assertEqual([hex_to_sha(blob.id)], pack_iter.ext_refs()) def test_ext_ref_chain(self): blob, = self.store_blobs([b'blob']) f = BytesIO() entries = build_pack(f, [ (REF_DELTA, (1, b'blob2')), (REF_DELTA, (blob.id, b'blob1')), ], store=self.store) pack_iter = self.make_pack_iter(f) self.assertEntriesMatch([1, 0], entries, pack_iter) self.assertEqual([hex_to_sha(blob.id)], pack_iter.ext_refs()) def test_ext_ref_chain_degenerate(self): # Test a degenerate case where the sender is sending a REF_DELTA # object that expands to an object already in the repository. blob, = self.store_blobs([b'blob']) blob2, = self.store_blobs([b'blob2']) assert blob.id < blob2.id f = BytesIO() entries = build_pack(f, [ (REF_DELTA, (blob.id, b'blob2')), (REF_DELTA, (0, b'blob3')), ], store=self.store) pack_iter = self.make_pack_iter(f) self.assertEntriesMatch([0, 1], entries, pack_iter) self.assertEqual([hex_to_sha(blob.id)], pack_iter.ext_refs()) def test_ext_ref_multiple_times(self): blob, = self.store_blobs([b'blob']) f = BytesIO() entries = build_pack(f, [ (REF_DELTA, (blob.id, b'blob1')), (REF_DELTA, (blob.id, b'blob2')), ], store=self.store) pack_iter = self.make_pack_iter(f) self.assertEntriesMatch([0, 1], entries, pack_iter) self.assertEqual([hex_to_sha(blob.id)], pack_iter.ext_refs()) def test_multiple_ext_refs(self): b1, b2 = self.store_blobs([b'foo', b'bar']) f = BytesIO() entries = build_pack(f, [ (REF_DELTA, (b1.id, b'foo1')), (REF_DELTA, (b2.id, b'bar2')), ], store=self.store) pack_iter = self.make_pack_iter(f) self.assertEntriesMatch([0, 1], entries, pack_iter) self.assertEqual([hex_to_sha(b1.id), hex_to_sha(b2.id)], pack_iter.ext_refs()) def test_bad_ext_ref_non_thin_pack(self): blob, = self.store_blobs([b'blob']) f = BytesIO() entries = build_pack(f, [(REF_DELTA, (blob.id, b'blob1'))], store=self.store) pack_iter = self.make_pack_iter(f, thin=False) try: list(pack_iter._walk_all_chains()) self.fail() except KeyError as e: self.assertEqual(([blob.id],), e.args) def test_bad_ext_ref_thin_pack(self): b1, b2, b3 = self.store_blobs([b'foo', b'bar', b'baz']) f = BytesIO() build_pack(f, [ (REF_DELTA, (1, b'foo99')), (REF_DELTA, (b1.id, b'foo1')), (REF_DELTA, (b2.id, b'bar2')), (REF_DELTA, (b3.id, b'baz3')), ], store=self.store) del self.store[b2.id] del self.store[b3.id] pack_iter = self.make_pack_iter(f) try: list(pack_iter._walk_all_chains()) self.fail() except KeyError as e: self.assertEqual((sorted([b2.id, b3.id]),), (sorted(e.args[0]),)) class DeltaEncodeSizeTests(TestCase): def test_basic(self): self.assertEqual(b'\x00', _delta_encode_size(0)) self.assertEqual(b'\x01', _delta_encode_size(1)) self.assertEqual(b'\xfa\x01', _delta_encode_size(250)) self.assertEqual(b'\xe8\x07', _delta_encode_size(1000)) self.assertEqual(b'\xa0\x8d\x06', _delta_encode_size(100000)) class EncodeCopyOperationTests(TestCase): def test_basic(self): self.assertEqual(b'\x80', _encode_copy_operation(0, 0)) self.assertEqual(b'\x91\x01\x0a', _encode_copy_operation(1, 10)) self.assertEqual(b'\xb1\x64\xe8\x03', _encode_copy_operation(100, 1000)) self.assertEqual(b'\x93\xe8\x03\x01', _encode_copy_operation(1000, 1)) diff --git a/dulwich/tests/test_repository.py b/dulwich/tests/test_repository.py index 9c853576..52a6a8c7 100644 --- a/dulwich/tests/test_repository.py +++ b/dulwich/tests/test_repository.py @@ -1,864 +1,876 @@ # -*- coding: utf-8 -*- # test_repository.py -- tests for repository.py # Copyright (C) 2007 James Westby # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # for a copy of the GNU General Public License # and for a copy of the Apache # License, Version 2.0. # """Tests for the repository.""" import locale import os import stat import shutil import sys import tempfile import warnings from dulwich import errors from dulwich.object_store import ( tree_lookup_path, ) from dulwich import objects from dulwich.config import Config from dulwich.errors import NotGitRepository from dulwich.repo import ( Repo, MemoryRepo, ) from dulwich.tests import ( TestCase, skipIf, ) from dulwich.tests.utils import ( open_repo, tear_down_repo, setup_warning_catcher, ) missing_sha = b'b91fa4d900e17e99b433218e988c4eb4a3e9a097' class CreateRepositoryTests(TestCase): def assertFileContentsEqual(self, expected, repo, path): f = repo.get_named_file(path) if not f: self.assertEqual(expected, None) else: with f: self.assertEqual(expected, f.read()) def _check_repo_contents(self, repo, expect_bare): self.assertEqual(expect_bare, repo.bare) self.assertFileContentsEqual(b'Unnamed repository', repo, 'description') self.assertFileContentsEqual(b'', repo, os.path.join('info', 'exclude')) self.assertFileContentsEqual(None, repo, 'nonexistent file') barestr = b'bare = ' + str(expect_bare).lower().encode('ascii') with repo.get_named_file('config') as f: config_text = f.read() self.assertTrue(barestr in config_text, "%r" % config_text) expect_filemode = sys.platform != 'win32' barestr = b'filemode = ' + str(expect_filemode).lower().encode('ascii') with repo.get_named_file('config') as f: config_text = f.read() self.assertTrue(barestr in config_text, "%r" % config_text) def test_create_memory(self): repo = MemoryRepo.init_bare([], {}) self._check_repo_contents(repo, True) def test_create_disk_bare(self): tmp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, tmp_dir) repo = Repo.init_bare(tmp_dir) self.assertEqual(tmp_dir, repo._controldir) self._check_repo_contents(repo, True) def test_create_disk_non_bare(self): tmp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, tmp_dir) repo = Repo.init(tmp_dir) self.assertEqual(os.path.join(tmp_dir, '.git'), repo._controldir) self._check_repo_contents(repo, False) class MemoryRepoTests(TestCase): def test_set_description(self): r = MemoryRepo.init_bare([], {}) description = b"Some description" r.set_description(description) self.assertEqual(description, r.get_description()) class RepositoryRootTests(TestCase): def mkdtemp(self): return tempfile.mkdtemp() def open_repo(self, name): temp_dir = self.mkdtemp() repo = open_repo(name, temp_dir) self.addCleanup(tear_down_repo, repo) return repo def test_simple_props(self): r = self.open_repo('a.git') self.assertEqual(r.controldir(), r.path) def test_setitem(self): r = self.open_repo('a.git') r[b"refs/tags/foo"] = b'a90fa2d900a17e99b433217e988c4eb4a2e9a097' self.assertEqual(b'a90fa2d900a17e99b433217e988c4eb4a2e9a097', r[b"refs/tags/foo"].id) def test_getitem_unicode(self): r = self.open_repo('a.git') test_keys = [ (b'refs/heads/master', True), (b'a90fa2d900a17e99b433217e988c4eb4a2e9a097', True), (b'11' * 19 + b'--', False), ] for k, contained in test_keys: self.assertEqual(k in r, contained) # Avoid deprecation warning under Py3.2+ if getattr(self, 'assertRaisesRegex', None): assertRaisesRegexp = self.assertRaisesRegex else: assertRaisesRegexp = self.assertRaisesRegexp for k, _ in test_keys: assertRaisesRegexp( TypeError, "'name' must be bytestring, not int", r.__getitem__, 12 ) def test_delitem(self): r = self.open_repo('a.git') del r[b'refs/heads/master'] self.assertRaises(KeyError, lambda: r[b'refs/heads/master']) del r[b'HEAD'] self.assertRaises(KeyError, lambda: r[b'HEAD']) self.assertRaises(ValueError, r.__delitem__, b'notrefs/foo') def test_get_refs(self): r = self.open_repo('a.git') self.assertEqual({ b'HEAD': b'a90fa2d900a17e99b433217e988c4eb4a2e9a097', b'refs/heads/master': b'a90fa2d900a17e99b433217e988c4eb4a2e9a097', b'refs/tags/mytag': b'28237f4dc30d0d462658d6b937b08a0f0b6ef55a', b'refs/tags/mytag-packed': b'b0931cadc54336e78a1d980420e3268903b57a50', }, r.get_refs()) def test_head(self): r = self.open_repo('a.git') self.assertEqual(r.head(), b'a90fa2d900a17e99b433217e988c4eb4a2e9a097') def test_get_object(self): r = self.open_repo('a.git') obj = r.get_object(r.head()) self.assertEqual(obj.type_name, b'commit') def test_get_object_non_existant(self): r = self.open_repo('a.git') self.assertRaises(KeyError, r.get_object, missing_sha) def test_contains_object(self): r = self.open_repo('a.git') self.assertTrue(r.head() in r) def test_contains_ref(self): r = self.open_repo('a.git') self.assertTrue(b"HEAD" in r) def test_get_no_description(self): r = self.open_repo('a.git') self.assertIs(None, r.get_description()) def test_get_description(self): r = self.open_repo('a.git') with open(os.path.join(r.path, 'description'), 'wb') as f: f.write(b"Some description") self.assertEqual(b"Some description", r.get_description()) def test_set_description(self): r = self.open_repo('a.git') description = b"Some description" r.set_description(description) self.assertEqual(description, r.get_description()) def test_contains_missing(self): r = self.open_repo('a.git') self.assertFalse(b"bar" in r) def test_get_peeled(self): # unpacked ref r = self.open_repo('a.git') tag_sha = b'28237f4dc30d0d462658d6b937b08a0f0b6ef55a' self.assertNotEqual(r[tag_sha].sha().hexdigest(), r.head()) self.assertEqual(r.get_peeled(b'refs/tags/mytag'), r.head()) # packed ref with cached peeled value packed_tag_sha = b'b0931cadc54336e78a1d980420e3268903b57a50' parent_sha = r[r.head()].parents[0] self.assertNotEqual(r[packed_tag_sha].sha().hexdigest(), parent_sha) self.assertEqual(r.get_peeled(b'refs/tags/mytag-packed'), parent_sha) # TODO: add more corner cases to test repo def test_get_peeled_not_tag(self): r = self.open_repo('a.git') self.assertEqual(r.get_peeled(b'HEAD'), r.head()) def test_get_walker(self): r = self.open_repo('a.git') # include defaults to [r.head()] self.assertEqual([e.commit.id for e in r.get_walker()], [r.head(), b'2a72d929692c41d8554c07f6301757ba18a65d91']) self.assertEqual( [e.commit.id for e in r.get_walker([b'2a72d929692c41d8554c07f6301757ba18a65d91'])], [b'2a72d929692c41d8554c07f6301757ba18a65d91']) self.assertEqual( [e.commit.id for e in r.get_walker(b'2a72d929692c41d8554c07f6301757ba18a65d91')], [b'2a72d929692c41d8554c07f6301757ba18a65d91']) def test_clone(self): r = self.open_repo('a.git') tmp_dir = self.mkdtemp() self.addCleanup(shutil.rmtree, tmp_dir) with r.clone(tmp_dir, mkdir=False) as t: self.assertEqual({ b'HEAD': b'a90fa2d900a17e99b433217e988c4eb4a2e9a097', b'refs/remotes/origin/master': b'a90fa2d900a17e99b433217e988c4eb4a2e9a097', b'refs/heads/master': b'a90fa2d900a17e99b433217e988c4eb4a2e9a097', b'refs/tags/mytag': b'28237f4dc30d0d462658d6b937b08a0f0b6ef55a', b'refs/tags/mytag-packed': b'b0931cadc54336e78a1d980420e3268903b57a50', }, t.refs.as_dict()) shas = [e.commit.id for e in r.get_walker()] self.assertEqual(shas, [t.head(), b'2a72d929692c41d8554c07f6301757ba18a65d91']) c = t.get_config() encoded_path = r.path if not isinstance(encoded_path, bytes): encoded_path = encoded_path.encode(sys.getfilesystemencoding()) self.assertEqual(encoded_path, c.get((b'remote', b'origin'), b'url')) self.assertEqual( b'+refs/heads/*:refs/remotes/origin/*', c.get((b'remote', b'origin'), b'fetch')) def test_clone_no_head(self): temp_dir = self.mkdtemp() self.addCleanup(shutil.rmtree, temp_dir) repo_dir = os.path.join(os.path.dirname(__file__), 'data', 'repos') dest_dir = os.path.join(temp_dir, 'a.git') shutil.copytree(os.path.join(repo_dir, 'a.git'), dest_dir, symlinks=True) r = Repo(dest_dir) del r.refs[b"refs/heads/master"] del r.refs[b"HEAD"] t = r.clone(os.path.join(temp_dir, 'b.git'), mkdir=True) self.assertEqual({ b'refs/tags/mytag': b'28237f4dc30d0d462658d6b937b08a0f0b6ef55a', b'refs/tags/mytag-packed': b'b0931cadc54336e78a1d980420e3268903b57a50', }, t.refs.as_dict()) def test_clone_empty(self): """Test clone() doesn't crash if HEAD points to a non-existing ref. This simulates cloning server-side bare repository either when it is still empty or if user renames master branch and pushes private repo to the server. Non-bare repo HEAD always points to an existing ref. """ r = self.open_repo('empty.git') tmp_dir = self.mkdtemp() self.addCleanup(shutil.rmtree, tmp_dir) r.clone(tmp_dir, mkdir=False, bare=True) def test_merge_history(self): r = self.open_repo('simple_merge.git') shas = [e.commit.id for e in r.get_walker()] self.assertEqual(shas, [b'5dac377bdded4c9aeb8dff595f0faeebcc8498cc', b'ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd', b'4cffe90e0a41ad3f5190079d7c8f036bde29cbe6', b'60dacdc733de308bb77bb76ce0fb0f9b44c9769e', b'0d89f20333fbb1d2f3a94da77f4981373d8f4310']) def test_out_of_order_merge(self): """Test that revision history is ordered by date, not parent order.""" r = self.open_repo('ooo_merge.git') shas = [e.commit.id for e in r.get_walker()] self.assertEqual(shas, [b'7601d7f6231db6a57f7bbb79ee52e4d462fd44d1', b'f507291b64138b875c28e03469025b1ea20bc614', b'fb5b0425c7ce46959bec94d54b9a157645e114f5', b'f9e39b120c68182a4ba35349f832d0e4e61f485c']) def test_get_tags_empty(self): r = self.open_repo('ooo_merge.git') self.assertEqual({}, r.refs.as_dict(b'refs/tags')) def test_get_config(self): r = self.open_repo('ooo_merge.git') self.assertIsInstance(r.get_config(), Config) def test_get_config_stack(self): r = self.open_repo('ooo_merge.git') self.assertIsInstance(r.get_config_stack(), Config) @skipIf(not getattr(os, 'symlink', None), 'Requires symlink support') def test_submodule(self): temp_dir = self.mkdtemp() self.addCleanup(shutil.rmtree, temp_dir) repo_dir = os.path.join(os.path.dirname(__file__), 'data', 'repos') shutil.copytree(os.path.join(repo_dir, 'a.git'), os.path.join(temp_dir, 'a.git'), symlinks=True) rel = os.path.relpath(os.path.join(repo_dir, 'submodule'), temp_dir) os.symlink(os.path.join(rel, 'dotgit'), os.path.join(temp_dir, '.git')) with Repo(temp_dir) as r: self.assertEqual(r.head(), b'a90fa2d900a17e99b433217e988c4eb4a2e9a097') def test_common_revisions(self): """ This test demonstrates that ``find_common_revisions()`` actually returns common heads, not revisions; dulwich already uses ``find_common_revisions()`` in such a manner (see ``Repo.fetch_objects()``). """ expected_shas = set([b'60dacdc733de308bb77bb76ce0fb0f9b44c9769e']) # Source for objects. r_base = self.open_repo('simple_merge.git') # Re-create each-side of the merge in simple_merge.git. # # Since the trees and blobs are missing, the repository created is # corrupted, but we're only checking for commits for the purpose of this # test, so it's immaterial. r1_dir = self.mkdtemp() self.addCleanup(shutil.rmtree, r1_dir) r1_commits = [b'ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd', # HEAD b'60dacdc733de308bb77bb76ce0fb0f9b44c9769e', b'0d89f20333fbb1d2f3a94da77f4981373d8f4310'] r2_dir = self.mkdtemp() self.addCleanup(shutil.rmtree, r2_dir) r2_commits = [b'4cffe90e0a41ad3f5190079d7c8f036bde29cbe6', # HEAD b'60dacdc733de308bb77bb76ce0fb0f9b44c9769e', b'0d89f20333fbb1d2f3a94da77f4981373d8f4310'] r1 = Repo.init_bare(r1_dir) for c in r1_commits: r1.object_store.add_object(r_base.get_object(c)) r1.refs[b'HEAD'] = r1_commits[0] r2 = Repo.init_bare(r2_dir) for c in r2_commits: r2.object_store.add_object(r_base.get_object(c)) r2.refs[b'HEAD'] = r2_commits[0] # Finally, the 'real' testing! shas = r2.object_store.find_common_revisions(r1.get_graph_walker()) self.assertEqual(set(shas), expected_shas) shas = r1.object_store.find_common_revisions(r2.get_graph_walker()) self.assertEqual(set(shas), expected_shas) def test_shell_hook_pre_commit(self): if os.name != 'posix': self.skipTest('shell hook tests requires POSIX shell') pre_commit_fail = """#!/bin/sh exit 1 """ pre_commit_success = """#!/bin/sh exit 0 """ repo_dir = os.path.join(self.mkdtemp()) - r = Repo.init(repo_dir) self.addCleanup(shutil.rmtree, repo_dir) + r = Repo.init(repo_dir) + self.addCleanup(r.close) pre_commit = os.path.join(r.controldir(), 'hooks', 'pre-commit') with open(pre_commit, 'w') as f: f.write(pre_commit_fail) os.chmod(pre_commit, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) self.assertRaises(errors.CommitError, r.do_commit, 'failed commit', committer='Test Committer ', author='Test Author ', commit_timestamp=12345, commit_timezone=0, author_timestamp=12345, author_timezone=0) with open(pre_commit, 'w') as f: f.write(pre_commit_success) os.chmod(pre_commit, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) commit_sha = r.do_commit( b'empty commit', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0) self.assertEqual([], r[commit_sha].parents) def test_shell_hook_commit_msg(self): if os.name != 'posix': self.skipTest('shell hook tests requires POSIX shell') commit_msg_fail = """#!/bin/sh exit 1 """ commit_msg_success = """#!/bin/sh exit 0 """ repo_dir = self.mkdtemp() - r = Repo.init(repo_dir) self.addCleanup(shutil.rmtree, repo_dir) + r = Repo.init(repo_dir) + self.addCleanup(r.close) commit_msg = os.path.join(r.controldir(), 'hooks', 'commit-msg') with open(commit_msg, 'w') as f: f.write(commit_msg_fail) os.chmod(commit_msg, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) self.assertRaises(errors.CommitError, r.do_commit, b'failed commit', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12345, commit_timezone=0, author_timestamp=12345, author_timezone=0) with open(commit_msg, 'w') as f: f.write(commit_msg_success) os.chmod(commit_msg, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) commit_sha = r.do_commit( b'empty commit', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0) self.assertEqual([], r[commit_sha].parents) def test_shell_hook_post_commit(self): if os.name != 'posix': self.skipTest('shell hook tests requires POSIX shell') repo_dir = self.mkdtemp() + self.addCleanup(shutil.rmtree, repo_dir) r = Repo.init(repo_dir) - self.addCleanup(shutil.rmtree, repo_dir) + self.addCleanup(r.close) (fd, path) = tempfile.mkstemp(dir=repo_dir) os.close(fd) post_commit_msg = """#!/bin/sh rm """ + path + """ """ root_sha = r.do_commit( b'empty commit', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12345, commit_timezone=0, author_timestamp=12345, author_timezone=0) self.assertEqual([], r[root_sha].parents) post_commit = os.path.join(r.controldir(), 'hooks', 'post-commit') with open(post_commit, 'wb') as f: f.write(post_commit_msg.encode(locale.getpreferredencoding())) os.chmod(post_commit, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) commit_sha = r.do_commit( b'empty commit', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12345, commit_timezone=0, author_timestamp=12345, author_timezone=0) self.assertEqual([root_sha], r[commit_sha].parents) self.assertFalse(os.path.exists(path)) post_commit_msg_fail = """#!/bin/sh exit 1 """ with open(post_commit, 'w') as f: f.write(post_commit_msg_fail) os.chmod(post_commit, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) warnings.simplefilter("always", UserWarning) self.addCleanup(warnings.resetwarnings) warnings_list, restore_warnings = setup_warning_catcher() self.addCleanup(restore_warnings) commit_sha2 = r.do_commit( b'empty commit', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12345, commit_timezone=0, author_timestamp=12345, author_timezone=0) - self.assertEqual(len(warnings_list), 1, warnings_list) - self.assertIsInstance(warnings_list[-1], UserWarning) - self.assertTrue("post-commit hook failed: " in str(warnings_list[-1])) + expected_warning = UserWarning( + 'post-commit hook failed: Hook post-commit exited with ' + 'non-zero status',) + for w in warnings_list: + if (type(w) == type(expected_warning) and + w.args == expected_warning.args): + break + else: + raise AssertionError('Expected warning %r not in %r' % + (expected_warning, warnings_list)) self.assertEqual([commit_sha], r[commit_sha2].parents) def test_as_dict(self): def check(repo): self.assertEqual(repo.refs.subkeys(b'refs/tags'), repo.refs.subkeys(b'refs/tags/')) self.assertEqual(repo.refs.as_dict(b'refs/tags'), repo.refs.as_dict(b'refs/tags/')) self.assertEqual(repo.refs.as_dict(b'refs/heads'), repo.refs.as_dict(b'refs/heads/')) bare = self.open_repo('a.git') tmp_dir = self.mkdtemp() self.addCleanup(shutil.rmtree, tmp_dir) with bare.clone(tmp_dir, mkdir=False) as nonbare: check(nonbare) check(bare) def test_working_tree(self): temp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, temp_dir) worktree_temp_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, worktree_temp_dir) r = Repo.init(temp_dir) + self.addCleanup(r.close) root_sha = r.do_commit( b'empty commit', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12345, commit_timezone=0, author_timestamp=12345, author_timezone=0) r.refs[b'refs/heads/master'] = root_sha w = Repo._init_new_working_directory(worktree_temp_dir, r) + self.addCleanup(w.close) new_sha = w.do_commit( b'new commit', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12345, commit_timezone=0, author_timestamp=12345, author_timezone=0) w.refs[b'HEAD'] = new_sha self.assertEqual(os.path.abspath(r.controldir()), os.path.abspath(w.commondir())) self.assertEqual(r.refs.keys(), w.refs.keys()) self.assertNotEqual(r.head(), w.head()) class BuildRepoRootTests(TestCase): """Tests that build on-disk repos from scratch. Repos live in a temp dir and are torn down after each test. They start with a single commit in master having single file named 'a'. """ def get_repo_dir(self): return os.path.join(tempfile.mkdtemp(), 'test') def setUp(self): super(BuildRepoRootTests, self).setUp() self._repo_dir = self.get_repo_dir() os.makedirs(self._repo_dir) r = self._repo = Repo.init(self._repo_dir) self.addCleanup(tear_down_repo, r) self.assertFalse(r.bare) self.assertEqual(b'ref: refs/heads/master', r.refs.read_ref(b'HEAD')) self.assertRaises(KeyError, lambda: r.refs[b'refs/heads/master']) with open(os.path.join(r.path, 'a'), 'wb') as f: f.write(b'file contents') r.stage(['a']) commit_sha = r.do_commit(b'msg', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12345, commit_timezone=0, author_timestamp=12345, author_timezone=0) self.assertEqual([], r[commit_sha].parents) self._root_commit = commit_sha def test_build_repo(self): r = self._repo self.assertEqual(b'ref: refs/heads/master', r.refs.read_ref(b'HEAD')) self.assertEqual(self._root_commit, r.refs[b'refs/heads/master']) expected_blob = objects.Blob.from_string(b'file contents') self.assertEqual(expected_blob.data, r[expected_blob.id].data) actual_commit = r[self._root_commit] self.assertEqual(b'msg', actual_commit.message) def test_commit_modified(self): r = self._repo with open(os.path.join(r.path, 'a'), 'wb') as f: f.write(b'new contents') r.stage(['a']) commit_sha = r.do_commit(b'modified a', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0) self.assertEqual([self._root_commit], r[commit_sha].parents) a_mode, a_id = tree_lookup_path(r.get_object, r[commit_sha].tree, b'a') self.assertEqual(stat.S_IFREG | 0o644, a_mode) self.assertEqual(b'new contents', r[a_id].data) @skipIf(not getattr(os, 'symlink', None), 'Requires symlink support') def test_commit_symlink(self): r = self._repo os.symlink('a', os.path.join(r.path, 'b')) r.stage(['a', 'b']) commit_sha = r.do_commit(b'Symlink b', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0) self.assertEqual([self._root_commit], r[commit_sha].parents) b_mode, b_id = tree_lookup_path(r.get_object, r[commit_sha].tree, b'b') self.assertTrue(stat.S_ISLNK(b_mode)) self.assertEqual(b'a', r[b_id].data) def test_commit_deleted(self): r = self._repo os.remove(os.path.join(r.path, 'a')) r.stage(['a']) commit_sha = r.do_commit(b'deleted a', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0) self.assertEqual([self._root_commit], r[commit_sha].parents) self.assertEqual([], list(r.open_index())) tree = r[r[commit_sha].tree] self.assertEqual([], list(tree.iteritems())) def test_commit_follows(self): r = self._repo r.refs.set_symbolic_ref(b'HEAD', b'refs/heads/bla') commit_sha = r.do_commit(b'commit with strange character', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0, ref=b'HEAD') self.assertEqual(commit_sha, r[b'refs/heads/bla'].id) def test_commit_encoding(self): r = self._repo commit_sha = r.do_commit(b'commit with strange character \xee', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0, encoding=b"iso8859-1") self.assertEqual(b"iso8859-1", r[commit_sha].encoding) def test_commit_config_identity(self): # commit falls back to the users' identity if it wasn't specified r = self._repo c = r.get_config() c.set((b"user", ), b"name", b"Jelmer") c.set((b"user", ), b"email", b"jelmer@apache.org") c.write_to_path() commit_sha = r.do_commit(b'message') self.assertEqual( b"Jelmer ", r[commit_sha].author) self.assertEqual( b"Jelmer ", r[commit_sha].committer) def test_commit_config_identity_in_memoryrepo(self): # commit falls back to the users' identity if it wasn't specified r = MemoryRepo.init_bare([], {}) c = r.get_config() c.set((b"user", ), b"name", b"Jelmer") c.set((b"user", ), b"email", b"jelmer@apache.org") commit_sha = r.do_commit(b'message', tree=objects.Tree().id) self.assertEqual( b"Jelmer ", r[commit_sha].author) self.assertEqual( b"Jelmer ", r[commit_sha].committer) def test_commit_fail_ref(self): r = self._repo def set_if_equals(name, old_ref, new_ref): return False r.refs.set_if_equals = set_if_equals def add_if_new(name, new_ref): self.fail('Unexpected call to add_if_new') r.refs.add_if_new = add_if_new old_shas = set(r.object_store) self.assertRaises(errors.CommitError, r.do_commit, b'failed commit', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12345, commit_timezone=0, author_timestamp=12345, author_timezone=0) new_shas = set(r.object_store) - old_shas self.assertEqual(1, len(new_shas)) # Check that the new commit (now garbage) was added. new_commit = r[new_shas.pop()] self.assertEqual(r[self._root_commit].tree, new_commit.tree) self.assertEqual(b'failed commit', new_commit.message) def test_commit_branch(self): r = self._repo commit_sha = r.do_commit(b'commit to branch', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0, ref=b"refs/heads/new_branch") self.assertEqual(self._root_commit, r[b"HEAD"].id) self.assertEqual(commit_sha, r[b"refs/heads/new_branch"].id) self.assertEqual([], r[commit_sha].parents) self.assertTrue(b"refs/heads/new_branch" in r) new_branch_head = commit_sha commit_sha = r.do_commit(b'commit to branch 2', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0, ref=b"refs/heads/new_branch") self.assertEqual(self._root_commit, r[b"HEAD"].id) self.assertEqual(commit_sha, r[b"refs/heads/new_branch"].id) self.assertEqual([new_branch_head], r[commit_sha].parents) def test_commit_merge_heads(self): r = self._repo merge_1 = r.do_commit(b'commit to branch 2', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0, ref=b"refs/heads/new_branch") commit_sha = r.do_commit(b'commit with merge', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0, merge_heads=[merge_1]) self.assertEqual( [self._root_commit, merge_1], r[commit_sha].parents) def test_commit_dangling_commit(self): r = self._repo old_shas = set(r.object_store) old_refs = r.get_refs() commit_sha = r.do_commit(b'commit with no ref', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0, ref=None) new_shas = set(r.object_store) - old_shas # New sha is added, but no new refs self.assertEqual(1, len(new_shas)) new_commit = r[new_shas.pop()] self.assertEqual(r[self._root_commit].tree, new_commit.tree) self.assertEqual([], r[commit_sha].parents) self.assertEqual(old_refs, r.get_refs()) def test_commit_dangling_commit_with_parents(self): r = self._repo old_shas = set(r.object_store) old_refs = r.get_refs() commit_sha = r.do_commit(b'commit with no ref', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0, ref=None, merge_heads=[self._root_commit]) new_shas = set(r.object_store) - old_shas # New sha is added, but no new refs self.assertEqual(1, len(new_shas)) new_commit = r[new_shas.pop()] self.assertEqual(r[self._root_commit].tree, new_commit.tree) self.assertEqual([self._root_commit], r[commit_sha].parents) self.assertEqual(old_refs, r.get_refs()) def test_stage_absolute(self): r = self._repo os.remove(os.path.join(r.path, 'a')) self.assertRaises(ValueError, r.stage, [os.path.join(r.path, 'a')]) def test_stage_deleted(self): r = self._repo os.remove(os.path.join(r.path, 'a')) r.stage(['a']) r.stage(['a']) # double-stage a deleted path def test_commit_no_encode_decode(self): r = self._repo repo_path_bytes = r.path.encode(sys.getfilesystemencoding()) encodings = ('utf8', 'latin1') names = [u'À'.encode(encoding) for encoding in encodings] for name, encoding in zip(names, encodings): full_path = os.path.join(repo_path_bytes, name) with open(full_path, 'wb') as f: f.write(encoding.encode('ascii')) # These files are break tear_down_repo, so cleanup these files # ourselves. self.addCleanup(os.remove, full_path) r.stage(names) commit_sha = r.do_commit(b'Files with different encodings', committer=b'Test Committer ', author=b'Test Author ', commit_timestamp=12395, commit_timezone=0, author_timestamp=12395, author_timezone=0, ref=None, merge_heads=[self._root_commit]) for name, encoding in zip(names, encodings): mode, id = tree_lookup_path(r.get_object, r[commit_sha].tree, name) self.assertEqual(stat.S_IFREG | 0o644, mode) self.assertEqual(encoding.encode('ascii'), r[id].data) def test_discover_intended(self): path = os.path.join(self._repo_dir, 'b/c') r = Repo.discover(path) self.assertEqual(r.head(), self._repo.head()) def test_discover_isrepo(self): r = Repo.discover(self._repo_dir) self.assertEqual(r.head(), self._repo.head()) def test_discover_notrepo(self): with self.assertRaises(NotGitRepository): Repo.discover('/') diff --git a/setup.py b/setup.py index 5a3ad387..8c7407bc 100755 --- a/setup.py +++ b/setup.py @@ -1,106 +1,108 @@ #!/usr/bin/python # encoding: utf-8 # Setup file for dulwich # Copyright (C) 2008-2016 Jelmer Vernooij try: from setuptools import setup, Extension except ImportError: from distutils.core import setup, Extension from distutils.core import Distribution -dulwich_version_string = '0.17.1' +dulwich_version_string = '0.17.3' include_dirs = [] # Windows MSVC support import os import sys if sys.platform == 'win32': include_dirs.append('dulwich') class DulwichDistribution(Distribution): def is_pure(self): if self.pure: return True def has_ext_modules(self): return not self.pure global_options = Distribution.global_options + [ ('pure', None, "use pure Python code instead of C " "extensions (slower on CPython)")] pure = False if sys.platform == 'darwin' and os.path.exists('/usr/bin/xcodebuild'): # XCode 4.0 dropped support for ppc architecture, which is hardcoded in # distutils.sysconfig import subprocess p = subprocess.Popen( ['/usr/bin/xcodebuild', '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env={}) out, err = p.communicate() for l in out.splitlines(): l = l.decode("utf8") # Also parse only first digit, because 3.2.1 can't be parsed nicely if l.startswith('Xcode') and int(l.split()[1].split('.')[0]) >= 4: os.environ['ARCHFLAGS'] = '' tests_require = ['fastimport'] if not '__pypy__' in sys.modules and not sys.platform == 'win32': tests_require.extend([ 'gevent', 'geventhttpclient', 'mock', 'setuptools>=17.1']) if sys.version_info[0] > 2 and sys.platform == 'win32': # C Modules don't build for python3 windows, and prevent tests from running ext_modules = [] else: ext_modules = [ Extension('dulwich._objects', ['dulwich/_objects.c'], include_dirs=include_dirs), Extension('dulwich._pack', ['dulwich/_pack.c'], include_dirs=include_dirs), Extension('dulwich._diff_tree', ['dulwich/_diff_tree.c'], include_dirs=include_dirs), ] setup(name='dulwich', description='Python Git Library', keywords='git', version=dulwich_version_string, url='https://www.dulwich.io/', license='Apachev2 or later or GPLv2', author='Jelmer Vernooij', author_email='jelmer@jelmer.uk', long_description=""" Python implementation of the Git file formats and protocols, without the need to have git installed. All functionality is available in pure Python. Optional C extensions can be built for improved performance. The project is named after the part of London that Mr. and Mrs. Git live in in the particular Monty Python sketch. """, packages=['dulwich', 'dulwich.tests', 'dulwich.tests.compat', 'dulwich.contrib'], package_data={'': ['../docs/tutorial/*.txt']}, scripts=['bin/dulwich', 'bin/dul-receive-pack', 'bin/dul-upload-pack'], classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Operating System :: POSIX', 'Topic :: Software Development :: Version Control', ], ext_modules=ext_modules, test_suite='dulwich.tests.test_suite', tests_require=tests_require, distclass=DulwichDistribution, include_package_data=True, ) diff --git a/tox.ini b/tox.ini index 46888a20..b75a4b70 100644 --- a/tox.ini +++ b/tox.ini @@ -1,24 +1,24 @@ [tox] downloadcache = {toxworkdir}/cache/ -envlist = py27, pypy, py27-noext, pypy-noext, py34, py34-noext, py35, py35-noext, py36, py36-noext +envlist = py27, pypy, py27-noext, pypy-noext, py33, py33-noext, py34, py34-noext, py35, py35-noext, py36, py36-noext [testenv] commands = make check recreate = True whitelist_externals = make [testenv:py27-noext] commands = make check-noextensions [testenv:pypy-noext] commands = make check-noextensions [testenv:py34-noext] commands = make check-noextensions [testenv:py35-noext] commands = make check-noextensions [testenv:py36-noext] commands = make check-noextensions