diff --git a/dulwich/objects.py b/dulwich/objects.py index 05d1a586..6d6aaec6 100644 --- a/dulwich/objects.py +++ b/dulwich/objects.py @@ -1,1300 +1,1300 @@ # objects.py -- Access to base git objects # Copyright (C) 2007 James Westby # Copyright (C) 2008-2013 Jelmer Vernooij # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # for a copy of the GNU General Public License # and for a copy of the Apache # License, Version 2.0. # """Access to base git objects.""" import binascii from io import BytesIO from collections import namedtuple import os import posixpath import stat import warnings import zlib from hashlib import sha1 from dulwich.errors import ( ChecksumMismatch, NotBlobError, NotCommitError, NotTagError, NotTreeError, ObjectFormatException, ) from dulwich.file import GitFile ZERO_SHA = b'0' * 40 # Header fields for commits _TREE_HEADER = b'tree' _PARENT_HEADER = b'parent' _AUTHOR_HEADER = b'author' _COMMITTER_HEADER = b'committer' _ENCODING_HEADER = b'encoding' _MERGETAG_HEADER = b'mergetag' _GPGSIG_HEADER = b'gpgsig' # Header fields for objects _OBJECT_HEADER = b'object' _TYPE_HEADER = b'type' _TAG_HEADER = b'tag' _TAGGER_HEADER = b'tagger' S_IFGITLINK = 0o160000 def S_ISGITLINK(m): """Check if a mode indicates a submodule. :param m: Mode to check :return: a ``boolean`` """ return (stat.S_IFMT(m) == S_IFGITLINK) def _decompress(string): dcomp = zlib.decompressobj() dcomped = dcomp.decompress(string) dcomped += dcomp.flush() return dcomped def sha_to_hex(sha): """Takes a string and returns the hex of the sha within""" hexsha = binascii.hexlify(sha) assert len(hexsha) == 40, "Incorrect length of sha1 string: %d" % hexsha return hexsha def hex_to_sha(hex): """Takes a hex sha and returns a binary sha""" assert len(hex) == 40, "Incorrect length of hexsha: %s" % hex try: return binascii.unhexlify(hex) except TypeError as exc: if not isinstance(hex, bytes): raise raise ValueError(exc.args[0]) def valid_hexsha(hex): if len(hex) != 40: return False try: binascii.unhexlify(hex) except (TypeError, binascii.Error): return False else: return True def hex_to_filename(path, hex): """Takes a hex sha and returns its filename relative to the given path.""" # os.path.join accepts bytes or unicode, but all args must be of the same # type. Make sure that hex which is expected to be bytes, is the same type # as path. if getattr(path, 'encode', None) is not None: hex = hex.decode('ascii') dir = hex[:2] file = hex[2:] # Check from object dir return os.path.join(path, dir, file) def filename_to_hex(filename): """Takes an object filename and returns its corresponding hex sha.""" # grab the last (up to) two path components names = filename.rsplit(os.path.sep, 2)[-2:] errmsg = "Invalid object filename: %s" % filename assert len(names) == 2, errmsg base, rest = names assert len(base) == 2 and len(rest) == 38, errmsg hex = (base + rest).encode('ascii') hex_to_sha(hex) return hex def object_header(num_type, length): """Return an object header for the given numeric type and text length.""" return object_class(num_type).type_name + b' ' + str(length).encode('ascii') + b'\0' def serializable_property(name, docstring=None): """A property that helps tracking whether serialization is necessary. """ def set(obj, value): setattr(obj, "_"+name, value) obj._needs_serialization = True def get(obj): return getattr(obj, "_"+name) return property(get, set, doc=docstring) def object_class(type): """Get the object class corresponding to the given type. :param type: Either a type name string or a numeric type. :return: The ShaFile subclass corresponding to the given type, or None if type is not a valid type name/number. """ return _TYPE_MAP.get(type, None) def check_hexsha(hex, error_msg): """Check if a string is a valid hex sha string. :param hex: Hex string to check :param error_msg: Error message to use in exception :raise ObjectFormatException: Raised when the string is not valid """ if not valid_hexsha(hex): raise ObjectFormatException("%s %s" % (error_msg, hex)) def check_identity(identity, error_msg): """Check if the specified identity is valid. This will raise an exception if the identity is not valid. :param identity: Identity string :param error_msg: Error message to use in exception """ email_start = identity.find(b'<') email_end = identity.find(b'>') if (email_start < 0 or email_end < 0 or email_end <= email_start or identity.find(b'<', email_start + 1) >= 0 or identity.find(b'>', email_end + 1) >= 0 or not identity.endswith(b'>')): raise ObjectFormatException(error_msg) def git_line(*items): """Formats items into a space sepreated line.""" return b' '.join(items) + b'\n' class FixedSha(object): """SHA object that behaves like hashlib's but is given a fixed value.""" __slots__ = ('_hexsha', '_sha') def __init__(self, hexsha): if getattr(hexsha, 'encode', None) is not None: hexsha = hexsha.encode('ascii') if not isinstance(hexsha, bytes): raise TypeError('Expected bytes for hexsha, got %r' % hexsha) self._hexsha = hexsha self._sha = hex_to_sha(hexsha) def digest(self): """Return the raw SHA digest.""" return self._sha def hexdigest(self): """Return the hex SHA digest.""" return self._hexsha.decode('ascii') class ShaFile(object): """A git SHA file.""" __slots__ = ('_chunked_text', '_sha', '_needs_serialization') @staticmethod def _parse_legacy_object_header(magic, f): """Parse a legacy object, creating it but not reading the file.""" bufsize = 1024 decomp = zlib.decompressobj() header = decomp.decompress(magic) start = 0 end = -1 while end < 0: extra = f.read(bufsize) header += decomp.decompress(extra) magic += extra end = header.find(b'\0', start) start = len(header) header = header[:end] type_name, size = header.split(b' ', 1) size = int(size) # sanity check obj_class = object_class(type_name) if not obj_class: raise ObjectFormatException("Not a known type: %s" % type_name) return obj_class() def _parse_legacy_object(self, map): """Parse a legacy object, setting the raw string.""" text = _decompress(map) header_end = text.find(b'\0') if header_end < 0: raise ObjectFormatException("Invalid object header, no \\0") self.set_raw_string(text[header_end+1:]) def as_legacy_object_chunks(self): """Return chunks representing the object in the experimental format. :return: List of strings """ compobj = zlib.compressobj() yield compobj.compress(self._header()) for chunk in self.as_raw_chunks(): yield compobj.compress(chunk) yield compobj.flush() def as_legacy_object(self): """Return string representing the object in the experimental format. """ return b''.join(self.as_legacy_object_chunks()) def as_raw_chunks(self): """Return chunks with serialization of the object. :return: List of strings, not necessarily one per line """ if self._needs_serialization: self._sha = None self._chunked_text = self._serialize() self._needs_serialization = False return self._chunked_text def as_raw_string(self): """Return raw string with serialization of the object. :return: String object """ return b''.join(self.as_raw_chunks()) def __str__(self): """Return raw string serialization of this object.""" return self.as_raw_string() def __hash__(self): """Return unique hash for this object.""" return hash(self.id) def as_pretty_string(self): """Return a string representing this object, fit for display.""" return self.as_raw_string() def set_raw_string(self, text, sha=None): """Set the contents of this object from a serialized string.""" if not isinstance(text, bytes): raise TypeError('Expected bytes for text, got %r' % text) self.set_raw_chunks([text], sha) def set_raw_chunks(self, chunks, sha=None): """Set the contents of this object from a list of chunks.""" self._chunked_text = chunks self._deserialize(chunks) if sha is None: self._sha = None else: self._sha = FixedSha(sha) self._needs_serialization = False @staticmethod def _parse_object_header(magic, f): """Parse a new style object, creating it but not reading the file.""" num_type = (ord(magic[0:1]) >> 4) & 7 obj_class = object_class(num_type) if not obj_class: raise ObjectFormatException("Not a known type %d" % num_type) return obj_class() def _parse_object(self, map): """Parse a new style object, setting self._text.""" # skip type and size; type must have already been determined, and # we trust zlib to fail if it's otherwise corrupted byte = ord(map[0:1]) used = 1 while (byte & 0x80) != 0: byte = ord(map[used:used+1]) used += 1 raw = map[used:] self.set_raw_string(_decompress(raw)) @classmethod def _is_legacy_object(cls, magic): b0 = ord(magic[0:1]) b1 = ord(magic[1:2]) word = (b0 << 8) + b1 return (b0 & 0x8F) == 0x08 and (word % 31) == 0 @classmethod def _parse_file(cls, f): map = f.read() if cls._is_legacy_object(map): obj = cls._parse_legacy_object_header(map, f) obj._parse_legacy_object(map) else: obj = cls._parse_object_header(map, f) obj._parse_object(map) return obj def __init__(self): """Don't call this directly""" self._sha = None self._chunked_text = [] self._needs_serialization = True def _deserialize(self, chunks): raise NotImplementedError(self._deserialize) def _serialize(self): raise NotImplementedError(self._serialize) @classmethod def from_path(cls, path): """Open a SHA file from disk.""" with GitFile(path, 'rb') as f: return cls.from_file(f) @classmethod def from_file(cls, f): """Get the contents of a SHA file on disk.""" try: obj = cls._parse_file(f) obj._sha = None return obj except (IndexError, ValueError): raise ObjectFormatException("invalid object header") @staticmethod def from_raw_string(type_num, string, sha=None): """Creates an object of the indicated type from the raw string given. :param type_num: The numeric type of the object. :param string: The raw uncompressed contents. :param sha: Optional known sha for the object """ obj = object_class(type_num)() obj.set_raw_string(string, sha) return obj @staticmethod def from_raw_chunks(type_num, chunks, sha=None): """Creates an object of the indicated type from the raw chunks given. :param type_num: The numeric type of the object. :param chunks: An iterable of the raw uncompressed contents. :param sha: Optional known sha for the object """ obj = object_class(type_num)() obj.set_raw_chunks(chunks, sha) return obj @classmethod def from_string(cls, string): """Create a ShaFile from a string.""" obj = cls() obj.set_raw_string(string) return obj def _check_has_member(self, member, error_msg): """Check that the object has a given member variable. :param member: the member variable to check for :param error_msg: the message for an error if the member is missing :raise ObjectFormatException: with the given error_msg if member is missing or is None """ if getattr(self, member, None) is None: raise ObjectFormatException(error_msg) def check(self): """Check this object for internal consistency. :raise ObjectFormatException: if the object is malformed in some way :raise ChecksumMismatch: if the object was created with a SHA that does not match its contents """ # TODO: if we find that error-checking during object parsing is a # performance bottleneck, those checks should be moved to the class's # check() method during optimization so we can still check the object # when necessary. old_sha = self.id try: self._deserialize(self.as_raw_chunks()) self._sha = None new_sha = self.id except Exception as e: raise ObjectFormatException(e) if old_sha != new_sha: raise ChecksumMismatch(new_sha, old_sha) def _header(self): return object_header(self.type, self.raw_length()) def raw_length(self): """Returns the length of the raw string of this object.""" ret = 0 for chunk in self.as_raw_chunks(): ret += len(chunk) return ret def sha(self): """The SHA1 object that is the name of this object.""" if self._sha is None or self._needs_serialization: # this is a local because as_raw_chunks() overwrites self._sha new_sha = sha1() new_sha.update(self._header()) for chunk in self.as_raw_chunks(): new_sha.update(chunk) self._sha = new_sha return self._sha def copy(self): """Create a new copy of this SHA1 object from its raw string""" obj_class = object_class(self.get_type()) return obj_class.from_raw_string( self.get_type(), self.as_raw_string(), self.id) @property def id(self): """The hex SHA of this object.""" return self.sha().hexdigest().encode('ascii') def get_type(self): """Return the type number for this object class.""" return self.type_num def set_type(self, type): """Set the type number for this object class.""" self.type_num = type # DEPRECATED: use type_num or type_name as needed. type = property(get_type, set_type) def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.id) def __ne__(self, other): return not isinstance(other, ShaFile) or self.id != other.id def __eq__(self, other): """Return True if the SHAs of the two objects match. It doesn't make sense to talk about an order on ShaFiles, so we don't override the rich comparison methods (__le__, etc.). """ return isinstance(other, ShaFile) and self.id == other.id def __lt__(self, other): if not isinstance(other, ShaFile): raise TypeError return self.id < other.id def __le__(self, other): if not isinstance(other, ShaFile): raise TypeError return self.id <= other.id def __cmp__(self, other): if not isinstance(other, ShaFile): raise TypeError return cmp(self.id, other.id) class Blob(ShaFile): """A Git Blob object.""" __slots__ = () type_name = b'blob' type_num = 3 def __init__(self): super(Blob, self).__init__() self._chunked_text = [] self._needs_serialization = False def _get_data(self): return self.as_raw_string() def _set_data(self, data): self.set_raw_string(data) data = property(_get_data, _set_data, "The text contained within the blob object.") def _get_chunked(self): return self._chunked_text def _set_chunked(self, chunks): self._chunked_text = chunks def _serialize(self): return self._chunked_text def _deserialize(self, chunks): self._chunked_text = chunks chunked = property(_get_chunked, _set_chunked, "The text within the blob object, as chunks (not necessarily lines).") @classmethod def from_path(cls, path): blob = ShaFile.from_path(path) if not isinstance(blob, cls): raise NotBlobError(path) return blob def check(self): """Check this object for internal consistency. :raise ObjectFormatException: if the object is malformed in some way """ super(Blob, self).check() def splitlines(self): """Return list of lines in this blob. This preserves the original line endings. """ chunks = self.chunked if not chunks: return [] if len(chunks) == 1: return chunks[0].splitlines(True) remaining = None ret = [] for chunk in chunks: lines = chunk.splitlines(True) if len(lines) > 1: - ret.append((remaining or "") + lines[0]) + ret.append((remaining or b"") + lines[0]) ret.extend(lines[1:-1]) remaining = lines[-1] elif len(lines) == 1: if remaining is None: remaining = lines.pop() else: remaining += lines.pop() if remaining is not None: ret.append(remaining) return ret def _parse_message(chunks): """Parse a message with a list of fields and a body. :param chunks: the raw chunks of the tag or commit object. :return: iterator of tuples of (field, value), one per header line, in the order read from the text, possibly including duplicates. Includes a field named None for the freeform tag/commit text. """ f = BytesIO(b''.join(chunks)) k = None v = "" eof = False # Parse the headers # # Headers can contain newlines. The next line is indented with a space. # We store the latest key as 'k', and the accumulated value as 'v'. for l in f: if l.startswith(b' '): # Indented continuation of the previous line v += l[1:] else: if k is not None: # We parsed a new header, return its value yield (k, v.rstrip(b'\n')) if l == b'\n': # Empty line indicates end of headers break (k, v) = l.split(b' ', 1) else: # We reached end of file before the headers ended. We still need to # return the previous header, then we need to return a None field for # the text. eof = True if k is not None: yield (k, v.rstrip(b'\n')) yield (None, None) if not eof: # We didn't reach the end of file while parsing headers. We can return # the rest of the file as a message. yield (None, f.read()) f.close() class Tag(ShaFile): """A Git Tag object.""" type_name = b'tag' type_num = 4 __slots__ = ('_tag_timezone_neg_utc', '_name', '_object_sha', '_object_class', '_tag_time', '_tag_timezone', '_tagger', '_message') def __init__(self): super(Tag, self).__init__() self._tag_timezone_neg_utc = False @classmethod def from_path(cls, filename): tag = ShaFile.from_path(filename) if not isinstance(tag, cls): raise NotTagError(filename) return tag def check(self): """Check this object for internal consistency. :raise ObjectFormatException: if the object is malformed in some way """ super(Tag, self).check() self._check_has_member("_object_sha", "missing object sha") self._check_has_member("_object_class", "missing object type") self._check_has_member("_name", "missing tag name") if not self._name: raise ObjectFormatException("empty tag name") check_hexsha(self._object_sha, "invalid object sha") if getattr(self, "_tagger", None): check_identity(self._tagger, "invalid tagger") last = None for field, _ in _parse_message(self._chunked_text): if field == _OBJECT_HEADER and last is not None: raise ObjectFormatException("unexpected object") elif field == _TYPE_HEADER and last != _OBJECT_HEADER: raise ObjectFormatException("unexpected type") elif field == _TAG_HEADER and last != _TYPE_HEADER: raise ObjectFormatException("unexpected tag name") elif field == _TAGGER_HEADER and last != _TAG_HEADER: raise ObjectFormatException("unexpected tagger") last = field def _serialize(self): chunks = [] chunks.append(git_line(_OBJECT_HEADER, self._object_sha)) chunks.append(git_line(_TYPE_HEADER, self._object_class.type_name)) chunks.append(git_line(_TAG_HEADER, self._name)) if self._tagger: if self._tag_time is None: chunks.append(git_line(_TAGGER_HEADER, self._tagger)) else: chunks.append(git_line( _TAGGER_HEADER, self._tagger, str(self._tag_time).encode('ascii'), format_timezone(self._tag_timezone, self._tag_timezone_neg_utc))) if self._message is not None: chunks.append(b'\n') # To close headers chunks.append(self._message) return chunks def _deserialize(self, chunks): """Grab the metadata attached to the tag""" self._tagger = None for field, value in _parse_message(chunks): if field == _OBJECT_HEADER: self._object_sha = value elif field == _TYPE_HEADER: obj_class = object_class(value) if not obj_class: raise ObjectFormatException("Not a known type: %s" % value) self._object_class = obj_class elif field == _TAG_HEADER: self._name = value elif field == _TAGGER_HEADER: try: sep = value.index(b'> ') except ValueError: self._tagger = value self._tag_time = None self._tag_timezone = None self._tag_timezone_neg_utc = False else: self._tagger = value[0:sep+1] try: (timetext, timezonetext) = value[sep+2:].rsplit(b' ', 1) self._tag_time = int(timetext) self._tag_timezone, self._tag_timezone_neg_utc = \ parse_timezone(timezonetext) except ValueError as e: raise ObjectFormatException(e) elif field is None: self._message = value else: raise ObjectFormatException("Unknown field %s" % field) def _get_object(self): """Get the object pointed to by this tag. :return: tuple of (object class, sha). """ return (self._object_class, self._object_sha) def _set_object(self, value): (self._object_class, self._object_sha) = value self._needs_serialization = True object = property(_get_object, _set_object) name = serializable_property("name", "The name of this tag") tagger = serializable_property("tagger", "Returns the name of the person who created this tag") tag_time = serializable_property("tag_time", "The creation timestamp of the tag. As the number of seconds " "since the epoch") tag_timezone = serializable_property("tag_timezone", "The timezone that tag_time is in.") message = serializable_property( "message", "The message attached to this tag") class TreeEntry(namedtuple('TreeEntry', ['path', 'mode', 'sha'])): """Named tuple encapsulating a single tree entry.""" def in_path(self, path): """Return a copy of this entry with the given path prepended.""" if not isinstance(self.path, bytes): raise TypeError('Expected bytes for path, got %r' % path) return TreeEntry(posixpath.join(path, self.path), self.mode, self.sha) def parse_tree(text, strict=False): """Parse a tree text. :param text: Serialized text to parse :return: iterator of tuples of (name, mode, sha) :raise ObjectFormatException: if the object was malformed in some way """ count = 0 l = len(text) while count < l: mode_end = text.index(b' ', count) mode_text = text[count:mode_end] if strict and mode_text.startswith(b'0'): raise ObjectFormatException("Invalid mode '%s'" % mode_text) try: mode = int(mode_text, 8) except ValueError: raise ObjectFormatException("Invalid mode '%s'" % mode_text) name_end = text.index(b'\0', mode_end) name = text[mode_end+1:name_end] count = name_end+21 sha = text[name_end+1:count] if len(sha) != 20: raise ObjectFormatException("Sha has invalid length") hexsha = sha_to_hex(sha) yield (name, mode, hexsha) def serialize_tree(items): """Serialize the items in a tree to a text. :param items: Sorted iterable over (name, mode, sha) tuples :return: Serialized tree text as chunks """ for name, mode, hexsha in items: yield ("%04o" % mode).encode('ascii') + b' ' + name + b'\0' + hex_to_sha(hexsha) def sorted_tree_items(entries, name_order): """Iterate over a tree entries dictionary. :param name_order: If True, iterate entries in order of their name. If False, iterate entries in tree order, that is, treat subtree entries as having '/' appended. :param entries: Dictionary mapping names to (mode, sha) tuples :return: Iterator over (name, mode, hexsha) """ key_func = name_order and key_entry_name_order or key_entry for name, entry in sorted(entries.items(), key=key_func): mode, hexsha = entry # Stricter type checks than normal to mirror checks in the C version. mode = int(mode) if not isinstance(hexsha, bytes): raise TypeError('Expected bytes for SHA, got %r' % hexsha) yield TreeEntry(name, mode, hexsha) def key_entry(entry): """Sort key for tree entry. :param entry: (name, value) tuplee """ (name, value) = entry if stat.S_ISDIR(value[0]): name += b'/' return name def key_entry_name_order(entry): """Sort key for tree entry in name order.""" return entry[0] def pretty_format_tree_entry(name, mode, hexsha, encoding="utf-8"): """Pretty format tree entry. :param name: Name of the directory entry :param mode: Mode of entry :param hexsha: Hexsha of the referenced object :return: string describing the tree entry """ if mode & stat.S_IFDIR: kind = "tree" else: kind = "blob" return "%04o %s %s\t%s\n" % ( mode, kind, hexsha.decode('ascii'), name.decode(encoding, 'replace')) class Tree(ShaFile): """A Git tree object""" type_name = b'tree' type_num = 2 __slots__ = ('_entries') def __init__(self): super(Tree, self).__init__() self._entries = {} @classmethod def from_path(cls, filename): tree = ShaFile.from_path(filename) if not isinstance(tree, cls): raise NotTreeError(filename) return tree def __contains__(self, name): return name in self._entries def __getitem__(self, name): return self._entries[name] def __setitem__(self, name, value): """Set a tree entry by name. :param name: The name of the entry, as a string. :param value: A tuple of (mode, hexsha), where mode is the mode of the entry as an integral type and hexsha is the hex SHA of the entry as a string. """ mode, hexsha = value self._entries[name] = (mode, hexsha) self._needs_serialization = True def __delitem__(self, name): del self._entries[name] self._needs_serialization = True def __len__(self): return len(self._entries) def __iter__(self): return iter(self._entries) def add(self, name, mode, hexsha): """Add an entry to the tree. :param mode: The mode of the entry as an integral type. Not all possible modes are supported by git; see check() for details. :param name: The name of the entry, as a string. :param hexsha: The hex SHA of the entry as a string. """ if isinstance(name, int) and isinstance(mode, bytes): (name, mode) = (mode, name) warnings.warn( "Please use Tree.add(name, mode, hexsha)", category=DeprecationWarning, stacklevel=2) self._entries[name] = mode, hexsha self._needs_serialization = True def iteritems(self, name_order=False): """Iterate over entries. :param name_order: If True, iterate in name order instead of tree order. :return: Iterator over (name, mode, sha) tuples """ return sorted_tree_items(self._entries, name_order) def items(self): """Return the sorted entries in this tree. :return: List with (name, mode, sha) tuples """ return list(self.iteritems()) def _deserialize(self, chunks): """Grab the entries in the tree""" try: parsed_entries = parse_tree(b''.join(chunks)) except ValueError as e: raise ObjectFormatException(e) # TODO: list comprehension is for efficiency in the common (small) # case; if memory efficiency in the large case is a concern, use a genexp. self._entries = dict([(n, (m, s)) for n, m, s in parsed_entries]) def check(self): """Check this object for internal consistency. :raise ObjectFormatException: if the object is malformed in some way """ super(Tree, self).check() last = None allowed_modes = (stat.S_IFREG | 0o755, stat.S_IFREG | 0o644, stat.S_IFLNK, stat.S_IFDIR, S_IFGITLINK, # TODO: optionally exclude as in git fsck --strict stat.S_IFREG | 0o664) for name, mode, sha in parse_tree(b''.join(self._chunked_text), True): check_hexsha(sha, 'invalid sha %s' % sha) if b'/' in name or name in (b'', b'.', b'..'): raise ObjectFormatException('invalid name %s' % name) if mode not in allowed_modes: raise ObjectFormatException('invalid mode %06o' % mode) entry = (name, (mode, sha)) if last: if key_entry(last) > key_entry(entry): raise ObjectFormatException('entries not sorted') if name == last[0]: raise ObjectFormatException('duplicate entry %s' % name) last = entry def _serialize(self): return list(serialize_tree(self.iteritems())) def as_pretty_string(self): text = [] for name, mode, hexsha in self.iteritems(): text.append(pretty_format_tree_entry(name, mode, hexsha)) return "".join(text) def lookup_path(self, lookup_obj, path): """Look up an object in a Git tree. :param lookup_obj: Callback for retrieving object by SHA1 :param path: Path to lookup :return: A tuple of (mode, SHA) of the resulting path. """ parts = path.split(b'/') sha = self.id mode = None for p in parts: if not p: continue obj = lookup_obj(sha) if not isinstance(obj, Tree): raise NotTreeError(sha) mode, sha = obj[p] return mode, sha def parse_timezone(text): """Parse a timezone text fragment (e.g. '+0100'). :param text: Text to parse. :return: Tuple with timezone as seconds difference to UTC and a boolean indicating whether this was a UTC timezone prefixed with a negative sign (-0000). """ # cgit parses the first character as the sign, and the rest # as an integer (using strtol), which could also be negative. # We do the same for compatibility. See #697828. if not text[0] in b'+-': raise ValueError("Timezone must start with + or - (%(text)s)" % vars()) sign = text[:1] offset = int(text[1:]) if sign == b'-': offset = -offset unnecessary_negative_timezone = (offset >= 0 and sign == b'-') signum = (offset < 0) and -1 or 1 offset = abs(offset) hours = int(offset / 100) minutes = (offset % 100) return (signum * (hours * 3600 + minutes * 60), unnecessary_negative_timezone) def format_timezone(offset, unnecessary_negative_timezone=False): """Format a timezone for Git serialization. :param offset: Timezone offset as seconds difference to UTC :param unnecessary_negative_timezone: Whether to use a minus sign for UTC or positive timezones (-0000 and --700 rather than +0000 / +0700). """ if offset % 60 != 0: raise ValueError("Unable to handle non-minute offset.") if offset < 0 or unnecessary_negative_timezone: sign = '-' offset = -offset else: sign = '+' return ('%c%02d%02d' % (sign, offset / 3600, (offset / 60) % 60)).encode('ascii') def parse_commit(chunks): """Parse a commit object from chunks. :param chunks: Chunks to parse :return: Tuple of (tree, parents, author_info, commit_info, encoding, mergetag, gpgsig, message, extra) """ parents = [] extra = [] tree = None author_info = (None, None, (None, None)) commit_info = (None, None, (None, None)) encoding = None mergetag = [] message = None gpgsig = None for field, value in _parse_message(chunks): # TODO(jelmer): Enforce ordering if field == _TREE_HEADER: tree = value elif field == _PARENT_HEADER: parents.append(value) elif field == _AUTHOR_HEADER: author, timetext, timezonetext = value.rsplit(b' ', 2) author_time = int(timetext) author_info = (author, author_time, parse_timezone(timezonetext)) elif field == _COMMITTER_HEADER: committer, timetext, timezonetext = value.rsplit(b' ', 2) commit_time = int(timetext) commit_info = (committer, commit_time, parse_timezone(timezonetext)) elif field == _ENCODING_HEADER: encoding = value elif field == _MERGETAG_HEADER: mergetag.append(Tag.from_string(value + b'\n')) elif field == _GPGSIG_HEADER: gpgsig = value elif field is None: message = value else: extra.append((field, value)) return (tree, parents, author_info, commit_info, encoding, mergetag, gpgsig, message, extra) class Commit(ShaFile): """A git commit object""" type_name = b'commit' type_num = 1 __slots__ = ('_parents', '_encoding', '_extra', '_author_timezone_neg_utc', '_commit_timezone_neg_utc', '_commit_time', '_author_time', '_author_timezone', '_commit_timezone', '_author', '_committer', '_parents', '_extra', '_encoding', '_tree', '_message', '_mergetag', '_gpgsig') def __init__(self): super(Commit, self).__init__() self._parents = [] self._encoding = None self._mergetag = [] self._gpgsig = None self._extra = [] self._author_timezone_neg_utc = False self._commit_timezone_neg_utc = False @classmethod def from_path(cls, path): commit = ShaFile.from_path(path) if not isinstance(commit, cls): raise NotCommitError(path) return commit def _deserialize(self, chunks): (self._tree, self._parents, author_info, commit_info, self._encoding, self._mergetag, self._gpgsig, self._message, self._extra) = ( parse_commit(chunks)) (self._author, self._author_time, (self._author_timezone, self._author_timezone_neg_utc)) = author_info (self._committer, self._commit_time, (self._commit_timezone, self._commit_timezone_neg_utc)) = commit_info def check(self): """Check this object for internal consistency. :raise ObjectFormatException: if the object is malformed in some way """ super(Commit, self).check() self._check_has_member("_tree", "missing tree") self._check_has_member("_author", "missing author") self._check_has_member("_committer", "missing committer") # times are currently checked when set for parent in self._parents: check_hexsha(parent, "invalid parent sha") check_hexsha(self._tree, "invalid tree sha") check_identity(self._author, "invalid author") check_identity(self._committer, "invalid committer") last = None for field, _ in _parse_message(self._chunked_text): if field == _TREE_HEADER and last is not None: raise ObjectFormatException("unexpected tree") elif field == _PARENT_HEADER and last not in (_PARENT_HEADER, _TREE_HEADER): raise ObjectFormatException("unexpected parent") elif field == _AUTHOR_HEADER and last not in (_TREE_HEADER, _PARENT_HEADER): raise ObjectFormatException("unexpected author") elif field == _COMMITTER_HEADER and last != _AUTHOR_HEADER: raise ObjectFormatException("unexpected committer") elif field == _ENCODING_HEADER and last != _COMMITTER_HEADER: raise ObjectFormatException("unexpected encoding") last = field # TODO: optionally check for duplicate parents def _serialize(self): chunks = [] tree_bytes = self._tree.id if isinstance(self._tree, Tree) else self._tree chunks.append(git_line(_TREE_HEADER, tree_bytes)) for p in self._parents: chunks.append(git_line(_PARENT_HEADER, p)) chunks.append(git_line( _AUTHOR_HEADER, self._author, str(self._author_time).encode('ascii'), format_timezone(self._author_timezone, self._author_timezone_neg_utc))) chunks.append(git_line( _COMMITTER_HEADER, self._committer, str(self._commit_time).encode('ascii'), format_timezone(self._commit_timezone, self._commit_timezone_neg_utc))) if self.encoding: chunks.append(git_line(_ENCODING_HEADER, self.encoding)) for mergetag in self.mergetag: mergetag_chunks = mergetag.as_raw_string().split(b'\n') chunks.append(git_line(_MERGETAG_HEADER, mergetag_chunks[0])) # Embedded extra header needs leading space for chunk in mergetag_chunks[1:]: chunks.append(b' ' + chunk + b'\n') # No trailing empty line chunks[-1] = chunks[-1].rstrip(b' \n') for k, v in self.extra: if b'\n' in k or b'\n' in v: raise AssertionError( "newline in extra data: %r -> %r" % (k, v)) chunks.append(git_line(k, v)) if self.gpgsig: sig_chunks = self.gpgsig.split(b'\n') chunks.append(git_line(_GPGSIG_HEADER, sig_chunks[0])) for chunk in sig_chunks[1:]: chunks.append(git_line(b'', chunk)) chunks.append(b'\n') # There must be a new line after the headers chunks.append(self._message) return chunks tree = serializable_property( "tree", "Tree that is the state of this commit") def _get_parents(self): """Return a list of parents of this commit.""" return self._parents def _set_parents(self, value): """Set a list of parents of this commit.""" self._needs_serialization = True self._parents = value parents = property(_get_parents, _set_parents, doc="Parents of this commit, by their SHA1.") def _get_extra(self): """Return extra settings of this commit.""" return self._extra extra = property(_get_extra, doc="Extra header fields not understood (presumably added in a " "newer version of git). Kept verbatim so the object can " "be correctly reserialized. For private commit metadata, use " "pseudo-headers in Commit.message, rather than this field.") author = serializable_property("author", "The name of the author of the commit") committer = serializable_property("committer", "The name of the committer of the commit") message = serializable_property( "message", "The commit message") commit_time = serializable_property("commit_time", "The timestamp of the commit. As the number of seconds since the epoch.") commit_timezone = serializable_property("commit_timezone", "The zone the commit time is in") author_time = serializable_property("author_time", "The timestamp the commit was written. As the number of " "seconds since the epoch.") author_timezone = serializable_property( "author_timezone", "Returns the zone the author time is in.") encoding = serializable_property( "encoding", "Encoding of the commit message.") mergetag = serializable_property( "mergetag", "Associated signed tag.") gpgsig = serializable_property( "gpgsig", "GPG Signature.") OBJECT_CLASSES = ( Commit, Tree, Blob, Tag, ) _TYPE_MAP = {} for cls in OBJECT_CLASSES: _TYPE_MAP[cls.type_name] = cls _TYPE_MAP[cls.type_num] = cls # Hold on to the pure-python implementations for testing _parse_tree_py = parse_tree _sorted_tree_items_py = sorted_tree_items try: # Try to import C versions from dulwich._objects import parse_tree, sorted_tree_items except ImportError: pass diff --git a/dulwich/porcelain.py b/dulwich/porcelain.py index fb3a5fb8..a290bfdf 100644 --- a/dulwich/porcelain.py +++ b/dulwich/porcelain.py @@ -1,965 +1,965 @@ # porcelain.py -- Porcelain-like layer on top of Dulwich # Copyright (C) 2013 Jelmer Vernooij # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # for a copy of the GNU General Public License # and for a copy of the Apache # License, Version 2.0. # """Simple wrapper that provides porcelain-like functions on top of Dulwich. Currently implemented: * archive * add * branch{_create,_delete,_list} * clone * commit * commit-tree * daemon * diff-tree * fetch * init * ls-remote * ls-tree * pull * push * rm * receive-pack * reset * rev-list * tag{_create,_delete,_list} * upload-pack * update-server-info * status * symbolic-ref These functions are meant to behave similarly to the git subcommands. Differences in behaviour are considered bugs. """ __docformat__ = 'restructuredText' from collections import namedtuple from contextlib import ( closing, contextmanager, ) import os import posixpath import stat import sys import time from dulwich.archive import ( tar_stream, ) from dulwich.client import ( get_transport_and_path, ) from dulwich.diff_tree import ( CHANGE_ADD, CHANGE_DELETE, CHANGE_MODIFY, CHANGE_RENAME, CHANGE_COPY, RENAME_CHANGE_TYPES, ) from dulwich.errors import ( SendPackError, UpdateRefsError, ) from dulwich.index import get_unstaged_changes from dulwich.objects import ( Commit, Tag, format_timezone, parse_timezone, pretty_format_tree_entry, ) from dulwich.objectspec import ( parse_object, parse_reftuples, ) from dulwich.pack import ( write_pack_index, write_pack_objects, ) from dulwich.patch import write_tree_diff from dulwich.protocol import ( Protocol, ZERO_SHA, ) from dulwich.repo import (BaseRepo, Repo) from dulwich.server import ( FileSystemBackend, TCPGitServer, ReceivePackHandler, UploadPackHandler, update_server_info as server_update_server_info, ) # Module level tuple definition for status output GitStatus = namedtuple('GitStatus', 'staged unstaged untracked') default_bytes_out_stream = getattr(sys.stdout, 'buffer', sys.stdout) default_bytes_err_stream = getattr(sys.stderr, 'buffer', sys.stderr) DEFAULT_ENCODING = 'utf-8' def encode_path(path, default_encoding=DEFAULT_ENCODING): """Encode a path as bytestring.""" if not isinstance(path, bytes): path = path.encode(default_encoding) return path def open_repo(path_or_repo): """Open an argument that can be a repository or a path for a repository.""" if isinstance(path_or_repo, BaseRepo): return path_or_repo return Repo(path_or_repo) @contextmanager def _noop_context_manager(obj): """Context manager that has the same api as closing but does nothing.""" yield obj def open_repo_closing(path_or_repo): """Open an argument that can be a repository or a path for a repository. returns a context manager that will close the repo on exit if the argument is a path, else does nothing if the argument is a repo. """ if isinstance(path_or_repo, BaseRepo): return _noop_context_manager(path_or_repo) return closing(Repo(path_or_repo)) def archive(repo, committish=None, outstream=default_bytes_out_stream, errstream=default_bytes_err_stream): """Create an archive. :param repo: Path of repository for which to generate an archive. :param committish: Commit SHA1 or ref to use :param outstream: Output stream (defaults to stdout) :param errstream: Error stream (defaults to stderr) """ if committish is None: committish = "HEAD" with open_repo_closing(repo) as repo_obj: c = repo_obj[committish] tree = c.tree for chunk in tar_stream(repo_obj.object_store, repo_obj.object_store[c.tree], c.commit_time): outstream.write(chunk) def update_server_info(repo="."): """Update server info files for a repository. :param repo: path to the repository """ with open_repo_closing(repo) as r: server_update_server_info(r) def symbolic_ref(repo, ref_name, force=False): """Set git symbolic ref into HEAD. :param repo: path to the repository :param ref_name: short name of the new ref :param force: force settings without checking if it exists in refs/heads """ with open_repo_closing(repo) as repo_obj: ref_path = b'refs/heads/' + ref_name if not force and ref_path not in repo_obj.refs.keys(): raise ValueError('fatal: ref `%s` is not a ref' % ref_name) repo_obj.refs.set_symbolic_ref(b'HEAD', ref_path) def commit(repo=".", message=None, author=None, committer=None): """Create a new commit. :param repo: Path to repository :param message: Optional commit message :param author: Optional author name and email :param committer: Optional committer name and email :return: SHA1 of the new commit """ # FIXME: Support --all argument # FIXME: Support --signoff argument with open_repo_closing(repo) as r: return r.do_commit(message=message, author=author, committer=committer) def commit_tree(repo, tree, message=None, author=None, committer=None): """Create a new commit object. :param repo: Path to repository :param tree: An existing tree object :param author: Optional author name and email :param committer: Optional committer name and email """ with open_repo_closing(repo) as r: return r.do_commit(message=message, tree=tree, committer=committer, author=author) def init(path=".", bare=False): """Create a new git repository. :param path: Path to repository. :param bare: Whether to create a bare repository. :return: A Repo instance """ if not os.path.exists(path): os.mkdir(path) if bare: return Repo.init_bare(path) else: return Repo.init(path) def clone(source, target=None, bare=False, checkout=None, errstream=default_bytes_err_stream, outstream=None, origin=b"origin"): """Clone a local or remote git repository. :param source: Path or URL for source repository :param target: Path to target repository (optional) :param bare: Whether or not to create a bare repository :param errstream: Optional stream to write progress to :param outstream: Optional stream to write progress to (deprecated) :return: The new repository """ if outstream is not None: import warnings warnings.warn("outstream= has been deprecated in favour of errstream=.", DeprecationWarning, stacklevel=3) errstream = outstream if checkout is None: checkout = (not bare) if checkout and bare: raise ValueError("checkout and bare are incompatible") client, host_path = get_transport_and_path(source) if target is None: target = host_path.split("/")[-1] if not os.path.exists(target): os.mkdir(target) if bare: r = Repo.init_bare(target) else: r = Repo.init(target) try: remote_refs = client.fetch(host_path, r, determine_wants=r.object_store.determine_wants_all, progress=errstream.write) r.refs.import_refs( b'refs/remotes/' + origin, {n[len(b'refs/heads/'):]: v for (n, v) in remote_refs.items() if n.startswith(b'refs/heads/')}) r.refs.import_refs( b'refs/tags', {n[len(b'refs/tags/'):]: v for (n, v) in remote_refs.items() if n.startswith(b'refs/tags/')}) r[b"HEAD"] = remote_refs[b"HEAD"] if checkout: errstream.write(b'Checking out HEAD\n') r.reset_index() except: r.close() raise return r def add(repo=".", paths=None): """Add files to the staging area. :param repo: Repository for the files :param paths: Paths to add. No value passed stages all modified files. """ # FIXME: Support patterns, directories. with open_repo_closing(repo) as r: if not paths: # If nothing is specified, add all non-ignored files. paths = [] for dirpath, dirnames, filenames in os.walk(r.path): # Skip .git and below. if '.git' in dirnames: dirnames.remove('.git') for filename in filenames: paths.append(os.path.join(dirpath[len(r.path)+1:], filename)) r.stage(paths) def rm(repo=".", paths=None): """Remove files from the staging area. :param repo: Repository for the files :param paths: Paths to remove """ with open_repo_closing(repo) as r: index = r.open_index() for p in paths: del index[p.encode(sys.getfilesystemencoding())] index.write() def commit_decode(commit, contents, default_encoding=DEFAULT_ENCODING): if commit.encoding is not None: return contents.decode(commit.encoding, "replace") return contents.decode(default_encoding, "replace") def print_commit(commit, decode, outstream=sys.stdout): """Write a human-readable commit log entry. :param commit: A `Commit` object :param outstream: A stream file to write to """ outstream.write("-" * 50 + "\n") outstream.write("commit: " + commit.id.decode('ascii') + "\n") if len(commit.parents) > 1: outstream.write("merge: " + "...".join([c.decode('ascii') for c in commit.parents[1:]]) + "\n") outstream.write("Author: " + decode(commit.author) + "\n") if commit.author != commit.committer: outstream.write("Committer: " + decode(commit.committer) + "\n") time_tuple = time.gmtime(commit.author_time + commit.author_timezone) time_str = time.strftime("%a %b %d %Y %H:%M:%S", time_tuple) - timezone_str = format_timezone(commit.author_timezone) + timezone_str = format_timezone(commit.author_timezone).decode('ascii') outstream.write("Date: " + time_str + " " + timezone_str + "\n") outstream.write("\n") outstream.write(decode(commit.message) + "\n") outstream.write("\n") def print_tag(tag, decode, outstream=sys.stdout): """Write a human-readable tag. :param tag: A `Tag` object :param decode: Function for decoding bytes to unicode string :param outstream: A stream to write to """ outstream.write("Tagger: " + decode(tag.tagger) + "\n") outstream.write("Date: " + decode(tag.tag_time) + "\n") outstream.write("\n") outstream.write(decode(tag.message) + "\n") outstream.write("\n") def show_blob(repo, blob, decode, outstream=sys.stdout): """Write a blob to a stream. :param repo: A `Repo` object :param blob: A `Blob` object :param decode: Function for decoding bytes to unicode string :param outstream: A stream file to write to """ outstream.write(decode(blob.data)) def show_commit(repo, commit, decode, outstream=sys.stdout): """Show a commit to a stream. :param repo: A `Repo` object :param commit: A `Commit` object :param decode: Function for decoding bytes to unicode string :param outstream: Stream to write to """ print_commit(commit, decode=decode, outstream=outstream) parent_commit = repo[commit.parents[0]] write_tree_diff(outstream, repo.object_store, parent_commit.tree, commit.tree) def show_tree(repo, tree, decode, outstream=sys.stdout): """Print a tree to a stream. :param repo: A `Repo` object :param tree: A `Tree` object :param decode: Function for decoding bytes to unicode string :param outstream: Stream to write to """ for n in tree: outstream.write(decode(n) + "\n") def show_tag(repo, tag, decode, outstream=sys.stdout): """Print a tag to a stream. :param repo: A `Repo` object :param tag: A `Tag` object :param decode: Function for decoding bytes to unicode string :param outstream: Stream to write to """ print_tag(tag, decode, outstream) show_object(repo, repo[tag.object[1]], outstream) def show_object(repo, obj, decode, outstream): return { b"tree": show_tree, b"blob": show_blob, b"commit": show_commit, b"tag": show_tag, }[obj.type_name](repo, obj, decode, outstream) def print_name_status(changes): """Print a simple status summary, listing changed files. """ for change in changes: if not change: continue if type(change) is list: change = change[0] if change.type == CHANGE_ADD: path1 = change.new.path path2 = '' kind = 'A' elif change.type == CHANGE_DELETE: path1 = change.old.path path2 = '' kind = 'D' elif change.type == CHANGE_MODIFY: path1 = change.new.path path2 = '' kind = 'M' elif change.type in RENAME_CHANGE_TYPES: path1 = change.old.path path2 = change.new.path if change.type == CHANGE_RENAME: kind = 'R' elif change.type == CHANGE_COPY: kind = 'C' yield '%-8s%-20s%-20s' % (kind, path1, path2) def log(repo=".", paths=None, outstream=sys.stdout, max_entries=None, reverse=False, name_status=False): """Write commit logs. :param repo: Path to repository :param paths: Optional set of specific paths to print entries for :param outstream: Stream to write log output to :param reverse: Reverse order in which entries are printed :param name_status: Print name status :param max_entries: Optional maximum number of entries to display """ with open_repo_closing(repo) as r: walker = r.get_walker( max_entries=max_entries, paths=paths, reverse=reverse) for entry in walker: decode = lambda x: commit_decode(entry.commit, x) print_commit(entry.commit, decode, outstream) if name_status: outstream.writelines( [l+'\n' for l in print_name_status(entry.changes())]) # TODO(jelmer): better default for encoding? def show(repo=".", objects=None, outstream=sys.stdout, default_encoding=DEFAULT_ENCODING): """Print the changes in a commit. :param repo: Path to repository :param objects: Objects to show (defaults to [HEAD]) :param outstream: Stream to write to :param default_encoding: Default encoding to use if none is set in the commit """ if objects is None: objects = ["HEAD"] if not isinstance(objects, list): objects = [objects] with open_repo_closing(repo) as r: for objectish in objects: o = parse_object(r, objectish) if isinstance(o, Commit): decode = lambda x: commit_decode(o, x, default_encoding) else: decode = lambda x: x.decode(default_encoding) show_object(r, o, decode, outstream) def diff_tree(repo, old_tree, new_tree, outstream=sys.stdout): """Compares the content and mode of blobs found via two tree objects. :param repo: Path to repository :param old_tree: Id of old tree :param new_tree: Id of new tree :param outstream: Stream to write to """ with open_repo_closing(repo) as r: write_tree_diff(outstream, r.object_store, old_tree, new_tree) def rev_list(repo, commits, outstream=sys.stdout): """Lists commit objects in reverse chronological order. :param repo: Path to repository :param commits: Commits over which to iterate :param outstream: Stream to write to """ with open_repo_closing(repo) as r: for entry in r.get_walker(include=[r[c].id for c in commits]): outstream.write(entry.commit.id + b"\n") def tag(*args, **kwargs): import warnings warnings.warn("tag has been deprecated in favour of tag_create.", DeprecationWarning) return tag_create(*args, **kwargs) def tag_create(repo, tag, author=None, message=None, annotated=False, objectish="HEAD", tag_time=None, tag_timezone=None): """Creates a tag in git via dulwich calls: :param repo: Path to repository :param tag: tag string :param author: tag author (optional, if annotated is set) :param message: tag message (optional) :param annotated: whether to create an annotated tag :param objectish: object the tag should point at, defaults to HEAD :param tag_time: Optional time for annotated tag :param tag_timezone: Optional timezone for annotated tag """ with open_repo_closing(repo) as r: object = parse_object(r, objectish) if annotated: # Create the tag object tag_obj = Tag() if author is None: # TODO(jelmer): Don't use repo private method. author = r._get_user_identity() tag_obj.tagger = author tag_obj.message = message tag_obj.name = tag tag_obj.object = (type(object), object.id) if tag_time is None: tag_time = int(time.time()) tag_obj.tag_time = tag_time if tag_timezone is None: # TODO(jelmer) Use current user timezone rather than UTC tag_timezone = 0 elif isinstance(tag_timezone, str): tag_timezone = parse_timezone(tag_timezone) tag_obj.tag_timezone = tag_timezone r.object_store.add_object(tag_obj) tag_id = tag_obj.id else: tag_id = object.id r.refs[b'refs/tags/' + tag] = tag_id def list_tags(*args, **kwargs): import warnings warnings.warn("list_tags has been deprecated in favour of tag_list.", DeprecationWarning) return tag_list(*args, **kwargs) def tag_list(repo, outstream=sys.stdout): """List all tags. :param repo: Path to repository :param outstream: Stream to write tags to """ with open_repo_closing(repo) as r: tags = list(r.refs.as_dict(b"refs/tags")) tags.sort() return tags def tag_delete(repo, name): """Remove a tag. :param repo: Path to repository :param name: Name of tag to remove """ with open_repo_closing(repo) as r: if isinstance(name, bytes): names = [name] elif isinstance(name, list): names = name else: raise TypeError("Unexpected tag name type %r" % name) for name in names: del r.refs[b"refs/tags/" + name] def reset(repo, mode, committish="HEAD"): """Reset current HEAD to the specified state. :param repo: Path to repository :param mode: Mode ("hard", "soft", "mixed") """ if mode != "hard": raise ValueError("hard is the only mode currently supported") with open_repo_closing(repo) as r: tree = r[committish].tree r.reset_index() def push(repo, remote_location, refspecs=None, outstream=default_bytes_out_stream, errstream=default_bytes_err_stream): """Remote push with dulwich via dulwich.client :param repo: Path to repository :param remote_location: Location of the remote :param refspecs: relative path to the refs to push to remote :param outstream: A stream file to write output :param errstream: A stream file to write errors """ # Open the repo with open_repo_closing(repo) as r: # Get the client and path client, path = get_transport_and_path(remote_location) selected_refs = [] def update_refs(refs): selected_refs.extend(parse_reftuples(r.refs, refs, refspecs)) new_refs = {} # TODO: Handle selected_refs == {None: None} for (lh, rh, force) in selected_refs: if lh is None: new_refs[rh] = ZERO_SHA else: new_refs[rh] = r.refs[lh] return new_refs err_encoding = getattr(errstream, 'encoding', None) or DEFAULT_ENCODING remote_location_bytes = client.get_url(path).encode(err_encoding) try: client.send_pack(path, update_refs, r.object_store.generate_pack_contents, progress=errstream.write) errstream.write(b"Push to " + remote_location_bytes + b" successful.\n") except (UpdateRefsError, SendPackError) as e: errstream.write(b"Push to " + remote_location_bytes + b" failed -> " + e.message.encode(err_encoding) + b"\n") def pull(repo, remote_location, refspecs=None, outstream=default_bytes_out_stream, errstream=default_bytes_err_stream): """Pull from remote via dulwich.client :param repo: Path to repository :param remote_location: Location of the remote :param refspec: refspecs to fetch :param outstream: A stream file to write to output :param errstream: A stream file to write to errors """ # Open the repo with open_repo_closing(repo) as r: if refspecs is None: refspecs = [b"HEAD"] selected_refs = [] def determine_wants(remote_refs): selected_refs.extend(parse_reftuples(remote_refs, r.refs, refspecs)) return [remote_refs[lh] for (lh, rh, force) in selected_refs] client, path = get_transport_and_path(remote_location) remote_refs = client.fetch(path, r, progress=errstream.write, determine_wants=determine_wants) for (lh, rh, force) in selected_refs: r.refs[rh] = remote_refs[lh] if selected_refs: r[b'HEAD'] = remote_refs[selected_refs[0][1]] # Perform 'git checkout .' - syncs staged changes tree = r[b"HEAD"].tree r.reset_index() def status(repo="."): """Returns staged, unstaged, and untracked changes relative to the HEAD. :param repo: Path to repository or repository object :return: GitStatus tuple, staged - list of staged paths (diff index/HEAD) unstaged - list of unstaged paths (diff index/working-tree) untracked - list of untracked, un-ignored & non-.git paths """ with open_repo_closing(repo) as r: # 1. Get status of staged tracked_changes = get_tree_changes(r) # 2. Get status of unstaged unstaged_changes = list(get_unstaged_changes(r.open_index(), r.path)) # TODO - Status of untracked - add untracked changes, need gitignore. untracked_changes = [] return GitStatus(tracked_changes, unstaged_changes, untracked_changes) def get_tree_changes(repo): """Return add/delete/modify changes to tree by comparing index to HEAD. :param repo: repo path or object :return: dict with lists for each type of change """ with open_repo_closing(repo) as r: index = r.open_index() # Compares the Index to the HEAD & determines changes # Iterate through the changes and report add/delete/modify # TODO: call out to dulwich.diff_tree somehow. tracked_changes = { 'add': [], 'delete': [], 'modify': [], } try: tree_id = r[b'HEAD'].tree except KeyError: tree_id = None for change in index.changes_from_tree(r.object_store, tree_id): if not change[0][0]: tracked_changes['add'].append(change[0][1]) elif not change[0][1]: tracked_changes['delete'].append(change[0][0]) elif change[0][0] == change[0][1]: tracked_changes['modify'].append(change[0][0]) else: raise AssertionError('git mv ops not yet supported') return tracked_changes def daemon(path=".", address=None, port=None): """Run a daemon serving Git requests over TCP/IP. :param path: Path to the directory to serve. :param address: Optional address to listen on (defaults to ::) :param port: Optional port to listen on (defaults to TCP_GIT_PORT) """ # TODO(jelmer): Support git-daemon-export-ok and --export-all. backend = FileSystemBackend(path) server = TCPGitServer(backend, address, port) server.serve_forever() def web_daemon(path=".", address=None, port=None): """Run a daemon serving Git requests over HTTP. :param path: Path to the directory to serve :param address: Optional address to listen on (defaults to ::) :param port: Optional port to listen on (defaults to 80) """ from dulwich.web import ( make_wsgi_chain, make_server, WSGIRequestHandlerLogger, WSGIServerLogger) backend = FileSystemBackend(path) app = make_wsgi_chain(backend) server = make_server(address, port, app, handler_class=WSGIRequestHandlerLogger, server_class=WSGIServerLogger) server.serve_forever() def upload_pack(path=".", inf=None, outf=None): """Upload a pack file after negotiating its contents using smart protocol. :param path: Path to the repository :param inf: Input stream to communicate with client :param outf: Output stream to communicate with client """ if outf is None: outf = getattr(sys.stdout, 'buffer', sys.stdout) if inf is None: inf = getattr(sys.stdin, 'buffer', sys.stdin) backend = FileSystemBackend(path) def send_fn(data): outf.write(data) outf.flush() proto = Protocol(inf.read, send_fn) handler = UploadPackHandler(backend, [path], proto) # FIXME: Catch exceptions and write a single-line summary to outf. handler.handle() return 0 def receive_pack(path=".", inf=None, outf=None): """Receive a pack file after negotiating its contents using smart protocol. :param path: Path to the repository :param inf: Input stream to communicate with client :param outf: Output stream to communicate with client """ if outf is None: outf = getattr(sys.stdout, 'buffer', sys.stdout) if inf is None: inf = getattr(sys.stdin, 'buffer', sys.stdin) backend = FileSystemBackend(path) def send_fn(data): outf.write(data) outf.flush() proto = Protocol(inf.read, send_fn) handler = ReceivePackHandler(backend, [path], proto) # FIXME: Catch exceptions and write a single-line summary to outf. handler.handle() return 0 def branch_delete(repo, name): """Delete a branch. :param repo: Path to the repository :param name: Name of the branch """ with open_repo_closing(repo) as r: if isinstance(name, bytes): names = [name] elif isinstance(name, list): names = name else: raise TypeError("Unexpected branch name type %r" % name) for name in names: del r.refs[b"refs/heads/" + name] def branch_create(repo, name, objectish=None, force=False): """Create a branch. :param repo: Path to the repository :param name: Name of the new branch :param objectish: Target object to point new branch at (defaults to HEAD) :param force: Force creation of branch, even if it already exists """ with open_repo_closing(repo) as r: if isinstance(name, bytes): names = [name] elif isinstance(name, list): names = name else: raise TypeError("Unexpected branch name type %r" % name) if objectish is None: objectish = "HEAD" object = parse_object(r, objectish) refname = b"refs/heads/" + name if refname in r.refs and not force: raise KeyError("Branch with name %s already exists." % name) r.refs[refname] = object.id def branch_list(repo): """List all branches. :param repo: Path to the repository """ with open_repo_closing(repo) as r: return r.refs.keys(base=b"refs/heads/") def fetch(repo, remote_location, outstream=sys.stdout, errstream=default_bytes_err_stream): """Fetch objects from a remote server. :param repo: Path to the repository :param remote_location: String identifying a remote server :param outstream: Output stream (defaults to stdout) :param errstream: Error stream (defaults to stderr) :return: Dictionary with refs on the remote """ with open_repo_closing(repo) as r: client, path = get_transport_and_path(remote_location) remote_refs = client.fetch(path, r, progress=errstream.write) return remote_refs def ls_remote(remote): client, host_path = get_transport_and_path(remote) return client.get_refs(encode_path(host_path)) def repack(repo): """Repack loose files in a repository. Currently this only packs loose objects. :param repo: Path to the repository """ with open_repo_closing(repo) as r: r.object_store.pack_loose_objects() def pack_objects(repo, object_ids, packf, idxf, delta_window_size=None): """Pack objects into a file. :param repo: Path to the repository :param object_ids: List of object ids to write :param packf: File-like object to write to :param idxf: File-like object to write to (can be None) """ with open_repo_closing(repo) as r: entries, data_sum = write_pack_objects( packf, r.object_store.iter_shas((oid, None) for oid in object_ids), delta_window_size=delta_window_size) if idxf is not None: entries = [(k, v[0], v[1]) for (k, v) in entries.items()] entries.sort() write_pack_index(idxf, entries, data_sum) def ls_tree(repo, tree_ish=None, outstream=sys.stdout, recursive=False, name_only=False): """List contents of a tree. :param repo: Path to the repository :param tree_ish: Tree id to list :param outstream: Output stream (defaults to stdout) :param recursive: Whether to recursively list files :param name_only: Only print item name """ def list_tree(store, treeid, base): for (name, mode, sha) in store[treeid].iteritems(): if base: name = posixpath.join(base, name) if name_only: outstream.write(name + b"\n") else: outstream.write(pretty_format_tree_entry(name, mode, sha)) if stat.S_ISDIR(mode): list_tree(store, sha, name) if tree_ish is None: tree_ish = "HEAD" with open_repo_closing(repo) as r: c = r[tree_ish] treeid = c.tree list_tree(r.object_store, treeid, "") diff --git a/dulwich/tests/test_objects.py b/dulwich/tests/test_objects.py index 67e6b1e6..bdb1842d 100644 --- a/dulwich/tests/test_objects.py +++ b/dulwich/tests/test_objects.py @@ -1,1170 +1,1170 @@ # test_objects.py -- tests for objects.py # Copyright (C) 2007 James Westby # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # for a copy of the GNU General Public License # and for a copy of the Apache # License, Version 2.0. # """Tests for git base objects.""" # TODO: Round-trip parse-serialize-parse and serialize-parse-serialize tests. from io import BytesIO import datetime from itertools import ( permutations, ) import os import stat import warnings from contextlib import contextmanager from dulwich.errors import ( ObjectFormatException, ) from dulwich.objects import ( Blob, Tree, Commit, ShaFile, Tag, TreeEntry, format_timezone, hex_to_sha, sha_to_hex, hex_to_filename, check_hexsha, check_identity, object_class, parse_timezone, pretty_format_tree_entry, parse_tree, _parse_tree_py, sorted_tree_items, _sorted_tree_items_py, ) from dulwich.tests import ( TestCase, ) from dulwich.tests.utils import ( make_commit, make_object, functest_builder, ext_functest_builder, ) a_sha = b'6f670c0fb53f9463760b7295fbb814e965fb20c8' b_sha = b'2969be3e8ee1c0222396a5611407e4769f14e54b' c_sha = b'954a536f7819d40e6f637f849ee187dd10066349' tree_sha = b'70c190eb48fa8bbb50ddc692a17b44cb781af7f6' tag_sha = b'71033db03a03c6a36721efcf1968dd8f8e0cf023' class TestHexToSha(TestCase): def test_simple(self): self.assertEqual(b'\xab\xcd' * 10, hex_to_sha(b'abcd' * 10)) def test_reverse(self): self.assertEqual(b'abcd' * 10, sha_to_hex(b'\xab\xcd' * 10)) class BlobReadTests(TestCase): """Test decompression of blobs""" def get_sha_file(self, cls, base, sha): dir = os.path.join(os.path.dirname(__file__), 'data', base) return cls.from_path(hex_to_filename(dir, sha)) def get_blob(self, sha): """Return the blob named sha from the test data dir""" return self.get_sha_file(Blob, 'blobs', sha) def get_tree(self, sha): return self.get_sha_file(Tree, 'trees', sha) def get_tag(self, sha): return self.get_sha_file(Tag, 'tags', sha) def commit(self, sha): return self.get_sha_file(Commit, 'commits', sha) def test_decompress_simple_blob(self): b = self.get_blob(a_sha) self.assertEqual(b.data, b'test 1\n') self.assertEqual(b.sha().hexdigest().encode('ascii'), a_sha) def test_hash(self): b = self.get_blob(a_sha) self.assertEqual(hash(b.id), hash(b)) def test_parse_empty_blob_object(self): sha = b'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391' b = self.get_blob(sha) self.assertEqual(b.data, b'') self.assertEqual(b.id, sha) self.assertEqual(b.sha().hexdigest().encode('ascii'), sha) def test_create_blob_from_string(self): string = b'test 2\n' b = Blob.from_string(string) self.assertEqual(b.data, string) self.assertEqual(b.sha().hexdigest().encode('ascii'), b_sha) def test_legacy_from_file(self): b1 = Blob.from_string(b'foo') b_raw = b1.as_legacy_object() b2 = b1.from_file(BytesIO(b_raw)) self.assertEqual(b1, b2) def test_chunks(self): string = b'test 5\n' b = Blob.from_string(string) self.assertEqual([string], b.chunked) def test_splitlines(self): for case in [ [], - ['foo\nbar\n'], - ['bl\na', 'blie'], - ['bl\na', 'blie', 'bloe\n'], - ['', 'bl\na', 'blie', 'bloe\n'], - ['', '', '', 'bla\n'], - ['', '', '', 'bla\n', ''], - ['bl', '', 'a\naaa'], - ['a\naaa', 'a'], + [b'foo\nbar\n'], + [b'bl\na', b'blie'], + [b'bl\na', b'blie', b'bloe\n'], + [b'', b'bl\na', b'blie', b'bloe\n'], + [b'', b'', b'', b'bla\n'], + [b'', b'', b'', b'bla\n', b''], + [b'bl', b'', b'a\naaa'], + [b'a\naaa', b'a'], ]: b = Blob() b.chunked = case self.assertEqual(b.data.splitlines(True), b.splitlines()) def test_set_chunks(self): b = Blob() b.chunked = [b'te', b'st', b' 5\n'] self.assertEqual(b'test 5\n', b.data) b.chunked = [b'te', b'st', b' 6\n'] self.assertEqual(b'test 6\n', b.as_raw_string()) def test_parse_legacy_blob(self): string = b'test 3\n' b = self.get_blob(c_sha) self.assertEqual(b.data, string) self.assertEqual(b.sha().hexdigest().encode('ascii'), c_sha) def test_eq(self): blob1 = self.get_blob(a_sha) blob2 = self.get_blob(a_sha) self.assertEqual(blob1, blob2) def test_read_tree_from_file(self): t = self.get_tree(tree_sha) self.assertEqual(t.items()[0], (b'a', 33188, a_sha)) self.assertEqual(t.items()[1], (b'b', 33188, b_sha)) def test_read_tree_from_file_parse_count(self): old_deserialize = Tree._deserialize def reset_deserialize(): Tree._deserialize = old_deserialize self.addCleanup(reset_deserialize) self.deserialize_count = 0 def counting_deserialize(*args, **kwargs): self.deserialize_count += 1 return old_deserialize(*args, **kwargs) Tree._deserialize = counting_deserialize t = self.get_tree(tree_sha) self.assertEqual(t.items()[0], (b'a', 33188, a_sha)) self.assertEqual(t.items()[1], (b'b', 33188, b_sha)) self.assertEqual(self.deserialize_count, 1) def test_read_tag_from_file(self): t = self.get_tag(tag_sha) self.assertEqual(t.object, (Commit, b'51b668fd5bf7061b7d6fa525f88803e6cfadaa51')) self.assertEqual(t.name, b'signed') self.assertEqual(t.tagger, b'Ali Sabil ') self.assertEqual(t.tag_time, 1231203091) self.assertEqual(t.message, b'This is a signed tag\n-----BEGIN PGP SIGNATURE-----\nVersion: GnuPG v1.4.9 (GNU/Linux)\n\niEYEABECAAYFAkliqx8ACgkQqSMmLy9u/kcx5ACfakZ9NnPl02tOyYP6pkBoEkU1\n5EcAn0UFgokaSvS371Ym/4W9iJj6vh3h\n=ql7y\n-----END PGP SIGNATURE-----\n') def test_read_commit_from_file(self): sha = b'60dacdc733de308bb77bb76ce0fb0f9b44c9769e' c = self.commit(sha) self.assertEqual(c.tree, tree_sha) self.assertEqual(c.parents, [b'0d89f20333fbb1d2f3a94da77f4981373d8f4310']) self.assertEqual(c.author, b'James Westby ') self.assertEqual(c.committer, b'James Westby ') self.assertEqual(c.commit_time, 1174759230) self.assertEqual(c.commit_timezone, 0) self.assertEqual(c.author_timezone, 0) self.assertEqual(c.message, b'Test commit\n') def test_read_commit_no_parents(self): sha = b'0d89f20333fbb1d2f3a94da77f4981373d8f4310' c = self.commit(sha) self.assertEqual(c.tree, b'90182552c4a85a45ec2a835cadc3451bebdfe870') self.assertEqual(c.parents, []) self.assertEqual(c.author, b'James Westby ') self.assertEqual(c.committer, b'James Westby ') self.assertEqual(c.commit_time, 1174758034) self.assertEqual(c.commit_timezone, 0) self.assertEqual(c.author_timezone, 0) self.assertEqual(c.message, b'Test commit\n') def test_read_commit_two_parents(self): sha = b'5dac377bdded4c9aeb8dff595f0faeebcc8498cc' c = self.commit(sha) self.assertEqual(c.tree, b'd80c186a03f423a81b39df39dc87fd269736ca86') self.assertEqual(c.parents, [b'ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd', b'4cffe90e0a41ad3f5190079d7c8f036bde29cbe6']) self.assertEqual(c.author, b'James Westby ') self.assertEqual(c.committer, b'James Westby ') self.assertEqual(c.commit_time, 1174773719) self.assertEqual(c.commit_timezone, 0) self.assertEqual(c.author_timezone, 0) self.assertEqual(c.message, b'Merge ../b\n') def test_stub_sha(self): sha = b'5' * 40 c = make_commit(id=sha, message=b'foo') self.assertTrue(isinstance(c, Commit)) self.assertEqual(sha, c.id) self.assertNotEqual(sha, c.sha()) class ShaFileCheckTests(TestCase): def assertCheckFails(self, cls, data): obj = cls() def do_check(): obj.set_raw_string(data) obj.check() self.assertRaises(ObjectFormatException, do_check) def assertCheckSucceeds(self, cls, data): obj = cls() obj.set_raw_string(data) self.assertEqual(None, obj.check()) small_buffer_zlib_object = ( b'\x48\x89\x15\xcc\x31\x0e\xc2\x30\x0c\x40\x51\xe6' b'\x9c\xc2\x3b\xaa\x64\x37\xc4\xc1\x12\x42\x5c\xc5' b'\x49\xac\x52\xd4\x92\xaa\x78\xe1\xf6\x94\xed\xeb' b'\x0d\xdf\x75\x02\xa2\x7c\xea\xe5\x65\xd5\x81\x8b' b'\x9a\x61\xba\xa0\xa9\x08\x36\xc9\x4c\x1a\xad\x88' b'\x16\xba\x46\xc4\xa8\x99\x6a\x64\xe1\xe0\xdf\xcd' b'\xa0\xf6\x75\x9d\x3d\xf8\xf1\xd0\x77\xdb\xfb\xdc' b'\x86\xa3\x87\xf1\x2f\x93\xed\x00\xb7\xc7\xd2\xab' b'\x2e\xcf\xfe\xf1\x3b\x50\xa4\x91\x53\x12\x24\x38' b'\x23\x21\x86\xf0\x03\x2f\x91\x24\x52' ) class ShaFileTests(TestCase): def test_deflated_smaller_window_buffer(self): # zlib on some systems uses smaller buffers, # resulting in a different header. # See https://github.com/libgit2/libgit2/pull/464 sf = ShaFile.from_file(BytesIO(small_buffer_zlib_object)) self.assertEqual(sf.type_name, b'tag') self.assertEqual(sf.tagger, b' <@localhost>') class CommitSerializationTests(TestCase): def make_commit(self, **kwargs): attrs = {'tree': b'd80c186a03f423a81b39df39dc87fd269736ca86', 'parents': [b'ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd', b'4cffe90e0a41ad3f5190079d7c8f036bde29cbe6'], 'author': b'James Westby ', 'committer': b'James Westby ', 'commit_time': 1174773719, 'author_time': 1174773719, 'commit_timezone': 0, 'author_timezone': 0, 'message': b'Merge ../b\n'} attrs.update(kwargs) return make_commit(**attrs) def test_encoding(self): c = self.make_commit(encoding=b'iso8859-1') self.assertTrue(b'encoding iso8859-1\n' in c.as_raw_string()) def test_short_timestamp(self): c = self.make_commit(commit_time=30) c1 = Commit() c1.set_raw_string(c.as_raw_string()) self.assertEqual(30, c1.commit_time) def test_full_tree(self): c = self.make_commit(commit_time=30) t = Tree() t.add(b'data-x', 0o644, Blob().id) c.tree = t c1 = Commit() c1.set_raw_string(c.as_raw_string()) self.assertEqual(t.id, c1.tree) self.assertEqual(c.as_raw_string(), c1.as_raw_string()) def test_raw_length(self): c = self.make_commit() self.assertEqual(len(c.as_raw_string()), c.raw_length()) def test_simple(self): c = self.make_commit() self.assertEqual(c.id, b'5dac377bdded4c9aeb8dff595f0faeebcc8498cc') self.assertEqual( b'tree d80c186a03f423a81b39df39dc87fd269736ca86\n' b'parent ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd\n' b'parent 4cffe90e0a41ad3f5190079d7c8f036bde29cbe6\n' b'author James Westby ' b'1174773719 +0000\n' b'committer James Westby ' b'1174773719 +0000\n' b'\n' b'Merge ../b\n', c.as_raw_string()) def test_timezone(self): c = self.make_commit(commit_timezone=(5 * 60)) self.assertTrue(b' +0005\n' in c.as_raw_string()) def test_neg_timezone(self): c = self.make_commit(commit_timezone=(-1 * 3600)) self.assertTrue(b' -0100\n' in c.as_raw_string()) def test_deserialize(self): c = self.make_commit() d = Commit() d._deserialize(c.as_raw_chunks()) self.assertEqual(c, d) def test_serialize_gpgsig(self): commit = self.make_commit(gpgsig=b"""-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCgAGBQJULCdfAAoJEACAbyvXKaRXuKwP/RyP9PA49uAvu8tQVCC/uBa8 vi975+xvO14R8Pp8k2nps7lSxCdtCd+xVT1VRHs0wNhOZo2YCVoU1HATkPejqSeV NScTHcxnk4/+bxyfk14xvJkNp7FlQ3npmBkA+lbV0Ubr33rvtIE5jiJPyz+SgWAg xdBG2TojV0squj00GoH/euK6aX7GgZtwdtpTv44haCQdSuPGDcI4TORqR6YSqvy3 GPE+3ZqXPFFb+KILtimkxitdwB7CpwmNse2vE3rONSwTvi8nq3ZoQYNY73CQGkUy qoFU0pDtw87U3niFin1ZccDgH0bB6624sLViqrjcbYJeg815Htsu4rmzVaZADEVC XhIO4MThebusdk0AcNGjgpf3HRHk0DPMDDlIjm+Oao0cqovvF6VyYmcb0C+RmhJj dodLXMNmbqErwTk3zEkW0yZvNIYXH7m9SokPCZa4eeIM7be62X6h1mbt0/IU6Th+ v18fS0iTMP/Viug5und+05C/v04kgDo0CPphAbXwWMnkE4B6Tl9sdyUYXtvQsL7x 0+WP1gL27ANqNZiI07Kz/BhbBAQI/+2TFT7oGr0AnFPQ5jHp+3GpUf6OKuT1wT3H ND189UFuRuubxb42vZhpcXRbqJVWnbECTKVUPsGZqat3enQUB63uM4i6/RdONDZA fDeF1m4qYs+cUXKNUZ03 =X6RT -----END PGP SIGNATURE-----""") self.maxDiff = None self.assertEqual(b"""\ tree d80c186a03f423a81b39df39dc87fd269736ca86 parent ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd parent 4cffe90e0a41ad3f5190079d7c8f036bde29cbe6 author James Westby 1174773719 +0000 committer James Westby 1174773719 +0000 gpgsig -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCgAGBQJULCdfAAoJEACAbyvXKaRXuKwP/RyP9PA49uAvu8tQVCC/uBa8 vi975+xvO14R8Pp8k2nps7lSxCdtCd+xVT1VRHs0wNhOZo2YCVoU1HATkPejqSeV NScTHcxnk4/+bxyfk14xvJkNp7FlQ3npmBkA+lbV0Ubr33rvtIE5jiJPyz+SgWAg xdBG2TojV0squj00GoH/euK6aX7GgZtwdtpTv44haCQdSuPGDcI4TORqR6YSqvy3 GPE+3ZqXPFFb+KILtimkxitdwB7CpwmNse2vE3rONSwTvi8nq3ZoQYNY73CQGkUy qoFU0pDtw87U3niFin1ZccDgH0bB6624sLViqrjcbYJeg815Htsu4rmzVaZADEVC XhIO4MThebusdk0AcNGjgpf3HRHk0DPMDDlIjm+Oao0cqovvF6VyYmcb0C+RmhJj dodLXMNmbqErwTk3zEkW0yZvNIYXH7m9SokPCZa4eeIM7be62X6h1mbt0/IU6Th+ v18fS0iTMP/Viug5und+05C/v04kgDo0CPphAbXwWMnkE4B6Tl9sdyUYXtvQsL7x 0+WP1gL27ANqNZiI07Kz/BhbBAQI/+2TFT7oGr0AnFPQ5jHp+3GpUf6OKuT1wT3H ND189UFuRuubxb42vZhpcXRbqJVWnbECTKVUPsGZqat3enQUB63uM4i6/RdONDZA fDeF1m4qYs+cUXKNUZ03 =X6RT -----END PGP SIGNATURE----- Merge ../b """, commit.as_raw_string()) def test_serialize_mergetag(self): tag = make_object( Tag, object=(Commit, b'a38d6181ff27824c79fc7df825164a212eff6a3f'), object_type_name=b'commit', name=b'v2.6.22-rc7', tag_time=1183319674, tag_timezone=0, tagger=b'Linus Torvalds ', message=default_message) commit = self.make_commit(mergetag=[tag]) self.assertEqual(b"""tree d80c186a03f423a81b39df39dc87fd269736ca86 parent ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd parent 4cffe90e0a41ad3f5190079d7c8f036bde29cbe6 author James Westby 1174773719 +0000 committer James Westby 1174773719 +0000 mergetag object a38d6181ff27824c79fc7df825164a212eff6a3f type commit tag v2.6.22-rc7 tagger Linus Torvalds 1183319674 +0000 Linux 2.6.22-rc7 -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.7 (GNU/Linux) iD8DBQBGiAaAF3YsRnbiHLsRAitMAKCiLboJkQECM/jpYsY3WPfvUgLXkACgg3ql OK2XeQOiEeXtT76rV4t2WR4= =ivrA -----END PGP SIGNATURE----- Merge ../b """, commit.as_raw_string()) def test_serialize_mergetags(self): tag = make_object( Tag, object=(Commit, b'a38d6181ff27824c79fc7df825164a212eff6a3f'), object_type_name=b'commit', name=b'v2.6.22-rc7', tag_time=1183319674, tag_timezone=0, tagger=b'Linus Torvalds ', message=default_message) commit = self.make_commit(mergetag=[tag, tag]) self.assertEqual(b"""tree d80c186a03f423a81b39df39dc87fd269736ca86 parent ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd parent 4cffe90e0a41ad3f5190079d7c8f036bde29cbe6 author James Westby 1174773719 +0000 committer James Westby 1174773719 +0000 mergetag object a38d6181ff27824c79fc7df825164a212eff6a3f type commit tag v2.6.22-rc7 tagger Linus Torvalds 1183319674 +0000 Linux 2.6.22-rc7 -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.7 (GNU/Linux) iD8DBQBGiAaAF3YsRnbiHLsRAitMAKCiLboJkQECM/jpYsY3WPfvUgLXkACgg3ql OK2XeQOiEeXtT76rV4t2WR4= =ivrA -----END PGP SIGNATURE----- mergetag object a38d6181ff27824c79fc7df825164a212eff6a3f type commit tag v2.6.22-rc7 tagger Linus Torvalds 1183319674 +0000 Linux 2.6.22-rc7 -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.7 (GNU/Linux) iD8DBQBGiAaAF3YsRnbiHLsRAitMAKCiLboJkQECM/jpYsY3WPfvUgLXkACgg3ql OK2XeQOiEeXtT76rV4t2WR4= =ivrA -----END PGP SIGNATURE----- Merge ../b """, commit.as_raw_string()) def test_deserialize_mergetag(self): tag = make_object( Tag, object=(Commit, b'a38d6181ff27824c79fc7df825164a212eff6a3f'), object_type_name=b'commit', name=b'v2.6.22-rc7', tag_time=1183319674, tag_timezone=0, tagger=b'Linus Torvalds ', message=default_message) commit = self.make_commit(mergetag=[tag]) d = Commit() d._deserialize(commit.as_raw_chunks()) self.assertEqual(commit, d) def test_deserialize_mergetags(self): tag = make_object( Tag, object=(Commit, b'a38d6181ff27824c79fc7df825164a212eff6a3f'), object_type_name=b'commit', name=b'v2.6.22-rc7', tag_time=1183319674, tag_timezone=0, tagger=b'Linus Torvalds ', message=default_message) commit = self.make_commit(mergetag=[tag, tag]) d = Commit() d._deserialize(commit.as_raw_chunks()) self.assertEqual(commit, d) default_committer = b'James Westby 1174773719 +0000' class CommitParseTests(ShaFileCheckTests): def make_commit_lines(self, tree=b'd80c186a03f423a81b39df39dc87fd269736ca86', parents=[b'ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd', b'4cffe90e0a41ad3f5190079d7c8f036bde29cbe6'], author=default_committer, committer=default_committer, encoding=None, message=b'Merge ../b\n', extra=None): lines = [] if tree is not None: lines.append(b'tree ' + tree) if parents is not None: lines.extend(b'parent ' + p for p in parents) if author is not None: lines.append(b'author ' + author) if committer is not None: lines.append(b'committer ' + committer) if encoding is not None: lines.append(b'encoding ' + encoding) if extra is not None: for name, value in sorted(extra.items()): lines.append(name + b' ' + value) lines.append(b'') if message is not None: lines.append(message) return lines def make_commit_text(self, **kwargs): return b'\n'.join(self.make_commit_lines(**kwargs)) def test_simple(self): c = Commit.from_string(self.make_commit_text()) self.assertEqual(b'Merge ../b\n', c.message) self.assertEqual(b'James Westby ', c.author) self.assertEqual(b'James Westby ', c.committer) self.assertEqual(b'd80c186a03f423a81b39df39dc87fd269736ca86', c.tree) self.assertEqual([b'ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd', b'4cffe90e0a41ad3f5190079d7c8f036bde29cbe6'], c.parents) expected_time = datetime.datetime(2007, 3, 24, 22, 1, 59) self.assertEqual(expected_time, datetime.datetime.utcfromtimestamp(c.commit_time)) self.assertEqual(0, c.commit_timezone) self.assertEqual(expected_time, datetime.datetime.utcfromtimestamp(c.author_time)) self.assertEqual(0, c.author_timezone) self.assertEqual(None, c.encoding) def test_custom(self): c = Commit.from_string(self.make_commit_text( extra={b'extra-field': b'data'})) self.assertEqual([(b'extra-field', b'data')], c.extra) def test_encoding(self): c = Commit.from_string(self.make_commit_text(encoding=b'UTF-8')) self.assertEqual(b'UTF-8', c.encoding) def test_check(self): self.assertCheckSucceeds(Commit, self.make_commit_text()) self.assertCheckSucceeds(Commit, self.make_commit_text(parents=None)) self.assertCheckSucceeds(Commit, self.make_commit_text(encoding=b'UTF-8')) self.assertCheckFails(Commit, self.make_commit_text(tree=b'xxx')) self.assertCheckFails(Commit, self.make_commit_text( parents=[a_sha, b'xxx'])) bad_committer = b'some guy without an email address 1174773719 +0000' self.assertCheckFails(Commit, self.make_commit_text(committer=bad_committer)) self.assertCheckFails(Commit, self.make_commit_text(author=bad_committer)) self.assertCheckFails(Commit, self.make_commit_text(author=None)) self.assertCheckFails(Commit, self.make_commit_text(committer=None)) self.assertCheckFails(Commit, self.make_commit_text( author=None, committer=None)) def test_check_duplicates(self): # duplicate each of the header fields for i in range(5): lines = self.make_commit_lines(parents=[a_sha], encoding=b'UTF-8') lines.insert(i, lines[i]) text = b'\n'.join(lines) if lines[i].startswith(b'parent'): # duplicate parents are ok for now self.assertCheckSucceeds(Commit, text) else: self.assertCheckFails(Commit, text) def test_check_order(self): lines = self.make_commit_lines(parents=[a_sha], encoding=b'UTF-8') headers = lines[:5] rest = lines[5:] # of all possible permutations, ensure only the original succeeds for perm in permutations(headers): perm = list(perm) text = b'\n'.join(perm + rest) if perm == headers: self.assertCheckSucceeds(Commit, text) else: self.assertCheckFails(Commit, text) def test_parse_gpgsig(self): c = Commit.from_string(b"""tree aaff74984cccd156a469afa7d9ab10e4777beb24 author Jelmer Vernooij 1412179807 +0200 committer Jelmer Vernooij 1412179807 +0200 gpgsig -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCgAGBQJULCdfAAoJEACAbyvXKaRXuKwP/RyP9PA49uAvu8tQVCC/uBa8 vi975+xvO14R8Pp8k2nps7lSxCdtCd+xVT1VRHs0wNhOZo2YCVoU1HATkPejqSeV NScTHcxnk4/+bxyfk14xvJkNp7FlQ3npmBkA+lbV0Ubr33rvtIE5jiJPyz+SgWAg xdBG2TojV0squj00GoH/euK6aX7GgZtwdtpTv44haCQdSuPGDcI4TORqR6YSqvy3 GPE+3ZqXPFFb+KILtimkxitdwB7CpwmNse2vE3rONSwTvi8nq3ZoQYNY73CQGkUy qoFU0pDtw87U3niFin1ZccDgH0bB6624sLViqrjcbYJeg815Htsu4rmzVaZADEVC XhIO4MThebusdk0AcNGjgpf3HRHk0DPMDDlIjm+Oao0cqovvF6VyYmcb0C+RmhJj dodLXMNmbqErwTk3zEkW0yZvNIYXH7m9SokPCZa4eeIM7be62X6h1mbt0/IU6Th+ v18fS0iTMP/Viug5und+05C/v04kgDo0CPphAbXwWMnkE4B6Tl9sdyUYXtvQsL7x 0+WP1gL27ANqNZiI07Kz/BhbBAQI/+2TFT7oGr0AnFPQ5jHp+3GpUf6OKuT1wT3H ND189UFuRuubxb42vZhpcXRbqJVWnbECTKVUPsGZqat3enQUB63uM4i6/RdONDZA fDeF1m4qYs+cUXKNUZ03 =X6RT -----END PGP SIGNATURE----- foo """) self.assertEqual(b'foo\n', c.message) self.assertEqual([], c.extra) self.assertEqual(b"""-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCgAGBQJULCdfAAoJEACAbyvXKaRXuKwP/RyP9PA49uAvu8tQVCC/uBa8 vi975+xvO14R8Pp8k2nps7lSxCdtCd+xVT1VRHs0wNhOZo2YCVoU1HATkPejqSeV NScTHcxnk4/+bxyfk14xvJkNp7FlQ3npmBkA+lbV0Ubr33rvtIE5jiJPyz+SgWAg xdBG2TojV0squj00GoH/euK6aX7GgZtwdtpTv44haCQdSuPGDcI4TORqR6YSqvy3 GPE+3ZqXPFFb+KILtimkxitdwB7CpwmNse2vE3rONSwTvi8nq3ZoQYNY73CQGkUy qoFU0pDtw87U3niFin1ZccDgH0bB6624sLViqrjcbYJeg815Htsu4rmzVaZADEVC XhIO4MThebusdk0AcNGjgpf3HRHk0DPMDDlIjm+Oao0cqovvF6VyYmcb0C+RmhJj dodLXMNmbqErwTk3zEkW0yZvNIYXH7m9SokPCZa4eeIM7be62X6h1mbt0/IU6Th+ v18fS0iTMP/Viug5und+05C/v04kgDo0CPphAbXwWMnkE4B6Tl9sdyUYXtvQsL7x 0+WP1gL27ANqNZiI07Kz/BhbBAQI/+2TFT7oGr0AnFPQ5jHp+3GpUf6OKuT1wT3H ND189UFuRuubxb42vZhpcXRbqJVWnbECTKVUPsGZqat3enQUB63uM4i6/RdONDZA fDeF1m4qYs+cUXKNUZ03 =X6RT -----END PGP SIGNATURE-----""", c.gpgsig) _TREE_ITEMS = { b'a.c': (0o100755, b'd80c186a03f423a81b39df39dc87fd269736ca86'), b'a': (stat.S_IFDIR, b'd80c186a03f423a81b39df39dc87fd269736ca86'), b'a/c': (stat.S_IFDIR, b'd80c186a03f423a81b39df39dc87fd269736ca86'), } _SORTED_TREE_ITEMS = [ TreeEntry(b'a.c', 0o100755, b'd80c186a03f423a81b39df39dc87fd269736ca86'), TreeEntry(b'a', stat.S_IFDIR, b'd80c186a03f423a81b39df39dc87fd269736ca86'), TreeEntry(b'a/c', stat.S_IFDIR, b'd80c186a03f423a81b39df39dc87fd269736ca86'), ] class TreeTests(ShaFileCheckTests): def test_add(self): myhexsha = b'd80c186a03f423a81b39df39dc87fd269736ca86' x = Tree() x.add(b'myname', 0o100755, myhexsha) self.assertEqual(x[b'myname'], (0o100755, myhexsha)) self.assertEqual(b'100755 myname\0' + hex_to_sha(myhexsha), x.as_raw_string()) def test_add_old_order(self): myhexsha = b'd80c186a03f423a81b39df39dc87fd269736ca86' x = Tree() warnings.simplefilter("ignore", DeprecationWarning) try: x.add(0o100755, b'myname', myhexsha) finally: warnings.resetwarnings() self.assertEqual(x[b'myname'], (0o100755, myhexsha)) self.assertEqual(b'100755 myname\0' + hex_to_sha(myhexsha), x.as_raw_string()) def test_simple(self): myhexsha = b'd80c186a03f423a81b39df39dc87fd269736ca86' x = Tree() x[b'myname'] = (0o100755, myhexsha) self.assertEqual(b'100755 myname\0' + hex_to_sha(myhexsha), x.as_raw_string()) def test_tree_update_id(self): x = Tree() x[b'a.c'] = (0o100755, b'd80c186a03f423a81b39df39dc87fd269736ca86') self.assertEqual(b'0c5c6bc2c081accfbc250331b19e43b904ab9cdd', x.id) x[b'a.b'] = (stat.S_IFDIR, b'd80c186a03f423a81b39df39dc87fd269736ca86') self.assertEqual(b'07bfcb5f3ada15bbebdfa3bbb8fd858a363925c8', x.id) def test_tree_iteritems_dir_sort(self): x = Tree() for name, item in _TREE_ITEMS.items(): x[name] = item self.assertEqual(_SORTED_TREE_ITEMS, x.items()) def test_tree_items_dir_sort(self): x = Tree() for name, item in _TREE_ITEMS.items(): x[name] = item self.assertEqual(_SORTED_TREE_ITEMS, x.items()) def _do_test_parse_tree(self, parse_tree): dir = os.path.join(os.path.dirname(__file__), 'data', 'trees') o = Tree.from_path(hex_to_filename(dir, tree_sha)) self.assertEqual([(b'a', 0o100644, a_sha), (b'b', 0o100644, b_sha)], list(parse_tree(o.as_raw_string()))) # test a broken tree that has a leading 0 on the file mode broken_tree = b'0100644 foo\0' + hex_to_sha(a_sha) def eval_parse_tree(*args, **kwargs): return list(parse_tree(*args, **kwargs)) self.assertEqual([(b'foo', 0o100644, a_sha)], eval_parse_tree(broken_tree)) self.assertRaises(ObjectFormatException, eval_parse_tree, broken_tree, strict=True) test_parse_tree = functest_builder(_do_test_parse_tree, _parse_tree_py) test_parse_tree_extension = ext_functest_builder(_do_test_parse_tree, parse_tree) def _do_test_sorted_tree_items(self, sorted_tree_items): def do_sort(entries): return list(sorted_tree_items(entries, False)) actual = do_sort(_TREE_ITEMS) self.assertEqual(_SORTED_TREE_ITEMS, actual) self.assertTrue(isinstance(actual[0], TreeEntry)) # C/Python implementations may differ in specific error types, but # should all error on invalid inputs. # For example, the C implementation has stricter type checks, so may # raise TypeError where the Python implementation raises AttributeError. errors = (TypeError, ValueError, AttributeError) self.assertRaises(errors, do_sort, b'foo') self.assertRaises(errors, do_sort, {b'foo': (1, 2, 3)}) myhexsha = b'd80c186a03f423a81b39df39dc87fd269736ca86' self.assertRaises(errors, do_sort, {b'foo': (b'xxx', myhexsha)}) self.assertRaises(errors, do_sort, {b'foo': (0o100755, 12345)}) test_sorted_tree_items = functest_builder(_do_test_sorted_tree_items, _sorted_tree_items_py) test_sorted_tree_items_extension = ext_functest_builder( _do_test_sorted_tree_items, sorted_tree_items) def _do_test_sorted_tree_items_name_order(self, sorted_tree_items): self.assertEqual([ TreeEntry(b'a', stat.S_IFDIR, b'd80c186a03f423a81b39df39dc87fd269736ca86'), TreeEntry(b'a.c', 0o100755, b'd80c186a03f423a81b39df39dc87fd269736ca86'), TreeEntry(b'a/c', stat.S_IFDIR, b'd80c186a03f423a81b39df39dc87fd269736ca86'), ], list(sorted_tree_items(_TREE_ITEMS, True))) test_sorted_tree_items_name_order = functest_builder( _do_test_sorted_tree_items_name_order, _sorted_tree_items_py) test_sorted_tree_items_name_order_extension = ext_functest_builder( _do_test_sorted_tree_items_name_order, sorted_tree_items) def test_check(self): t = Tree sha = hex_to_sha(a_sha) # filenames self.assertCheckSucceeds(t, b'100644 .a\0' + sha) self.assertCheckFails(t, b'100644 \0' + sha) self.assertCheckFails(t, b'100644 .\0' + sha) self.assertCheckFails(t, b'100644 a/a\0' + sha) self.assertCheckFails(t, b'100644 ..\0' + sha) # modes self.assertCheckSucceeds(t, b'100644 a\0' + sha) self.assertCheckSucceeds(t, b'100755 a\0' + sha) self.assertCheckSucceeds(t, b'160000 a\0' + sha) # TODO more whitelisted modes self.assertCheckFails(t, b'123456 a\0' + sha) self.assertCheckFails(t, b'123abc a\0' + sha) # should fail check, but parses ok self.assertCheckFails(t, b'0100644 foo\0' + sha) # shas self.assertCheckFails(t, b'100644 a\0' + (b'x' * 5)) self.assertCheckFails(t, b'100644 a\0' + (b'x' * 18) + b'\0') self.assertCheckFails(t, b'100644 a\0' + (b'x' * 21) + b'\n100644 b\0' + sha) # ordering sha2 = hex_to_sha(b_sha) self.assertCheckSucceeds(t, b'100644 a\0' + sha + b'\n100644 b\0' + sha) self.assertCheckSucceeds(t, b'100644 a\0' + sha + b'\n100644 b\0' + sha2) self.assertCheckFails(t, b'100644 a\0' + sha + b'\n100755 a\0' + sha2) self.assertCheckFails(t, b'100644 b\0' + sha2 + b'\n100644 a\0' + sha) def test_iter(self): t = Tree() t[b'foo'] = (0o100644, a_sha) self.assertEqual(set([b'foo']), set(t)) class TagSerializeTests(TestCase): def test_serialize_simple(self): x = make_object(Tag, tagger=b'Jelmer Vernooij ', name=b'0.1', message=b'Tag 0.1', object=(Blob, b'd80c186a03f423a81b39df39dc87fd269736ca86'), tag_time=423423423, tag_timezone=0) self.assertEqual((b'object d80c186a03f423a81b39df39dc87fd269736ca86\n' b'type blob\n' b'tag 0.1\n' b'tagger Jelmer Vernooij ' b'423423423 +0000\n' b'\n' b'Tag 0.1'), x.as_raw_string()) def test_serialize_none_message(self): x = make_object(Tag, tagger=b'Jelmer Vernooij ', name=b'0.1', message=None, object=(Blob, b'd80c186a03f423a81b39df39dc87fd269736ca86'), tag_time=423423423, tag_timezone=0) self.assertEqual((b'object d80c186a03f423a81b39df39dc87fd269736ca86\n' b'type blob\n' b'tag 0.1\n' b'tagger Jelmer Vernooij ' b'423423423 +0000\n'), x.as_raw_string()) default_tagger = (b'Linus Torvalds ' b'1183319674 -0700') default_message = b"""Linux 2.6.22-rc7 -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.7 (GNU/Linux) iD8DBQBGiAaAF3YsRnbiHLsRAitMAKCiLboJkQECM/jpYsY3WPfvUgLXkACgg3ql OK2XeQOiEeXtT76rV4t2WR4= =ivrA -----END PGP SIGNATURE----- """ class TagParseTests(ShaFileCheckTests): def make_tag_lines(self, object_sha=b'a38d6181ff27824c79fc7df825164a212eff6a3f', object_type_name=b'commit', name=b'v2.6.22-rc7', tagger=default_tagger, message=default_message): lines = [] if object_sha is not None: lines.append(b'object ' + object_sha) if object_type_name is not None: lines.append(b'type ' + object_type_name) if name is not None: lines.append(b'tag ' + name) if tagger is not None: lines.append(b'tagger ' + tagger) if message is not None: lines.append(b'') lines.append(message) return lines def make_tag_text(self, **kwargs): return b'\n'.join(self.make_tag_lines(**kwargs)) def test_parse(self): x = Tag() x.set_raw_string(self.make_tag_text()) self.assertEqual( b'Linus Torvalds ', x.tagger) self.assertEqual(b'v2.6.22-rc7', x.name) object_type, object_sha = x.object self.assertEqual(b'a38d6181ff27824c79fc7df825164a212eff6a3f', object_sha) self.assertEqual(Commit, object_type) self.assertEqual(datetime.datetime.utcfromtimestamp(x.tag_time), datetime.datetime(2007, 7, 1, 19, 54, 34)) self.assertEqual(-25200, x.tag_timezone) def test_parse_no_tagger(self): x = Tag() x.set_raw_string(self.make_tag_text(tagger=None)) self.assertEqual(None, x.tagger) self.assertEqual(b'v2.6.22-rc7', x.name) def test_parse_no_message(self): x = Tag() x.set_raw_string(self.make_tag_text(message=None)) self.assertEqual(None, x.message) self.assertEqual( b'Linus Torvalds ', x.tagger) self.assertEqual(datetime.datetime.utcfromtimestamp(x.tag_time), datetime.datetime(2007, 7, 1, 19, 54, 34)) self.assertEqual(-25200, x.tag_timezone) self.assertEqual(b'v2.6.22-rc7', x.name) def test_check(self): self.assertCheckSucceeds(Tag, self.make_tag_text()) self.assertCheckFails(Tag, self.make_tag_text(object_sha=None)) self.assertCheckFails(Tag, self.make_tag_text(object_type_name=None)) self.assertCheckFails(Tag, self.make_tag_text(name=None)) self.assertCheckFails(Tag, self.make_tag_text(name=b'')) self.assertCheckFails(Tag, self.make_tag_text( object_type_name=b'foobar')) self.assertCheckFails(Tag, self.make_tag_text( tagger=b'some guy without an email address 1183319674 -0700')) self.assertCheckFails(Tag, self.make_tag_text( tagger=(b'Linus Torvalds ' b'Sun 7 Jul 2007 12:54:34 +0700'))) self.assertCheckFails(Tag, self.make_tag_text(object_sha=b'xxx')) def test_check_duplicates(self): # duplicate each of the header fields for i in range(4): lines = self.make_tag_lines() lines.insert(i, lines[i]) self.assertCheckFails(Tag, b'\n'.join(lines)) def test_check_order(self): lines = self.make_tag_lines() headers = lines[:4] rest = lines[4:] # of all possible permutations, ensure only the original succeeds for perm in permutations(headers): perm = list(perm) text = b'\n'.join(perm + rest) if perm == headers: self.assertCheckSucceeds(Tag, text) else: self.assertCheckFails(Tag, text) def test_tree_copy_after_update(self): """Check Tree.id is correctly updated when the tree is copied after updated. """ shas = [] tree = Tree() shas.append(tree.id) tree.add(b'data', 0o644, Blob().id) copied = tree.copy() shas.append(tree.id) shas.append(copied.id) self.assertNotIn(shas[0], shas[1:]) self.assertEqual(shas[1], shas[2]) class CheckTests(TestCase): def test_check_hexsha(self): check_hexsha(a_sha, "failed to check good sha") self.assertRaises(ObjectFormatException, check_hexsha, b'1' * 39, 'sha too short') self.assertRaises(ObjectFormatException, check_hexsha, b'1' * 41, 'sha too long') self.assertRaises(ObjectFormatException, check_hexsha, b'x' * 40, 'invalid characters') def test_check_identity(self): check_identity(b'Dave Borowitz ', "failed to check good identity") check_identity(b'', "failed to check good identity") self.assertRaises(ObjectFormatException, check_identity, b'Dave Borowitz', "no email") self.assertRaises(ObjectFormatException, check_identity, b'Dave Borowitz ', "incomplete email") self.assertRaises(ObjectFormatException, check_identity, b'Dave Borowitz <', "typo") self.assertRaises(ObjectFormatException, check_identity, b'Dave Borowitz >', "typo") self.assertRaises(ObjectFormatException, check_identity, b'Dave Borowitz xxx', "trailing characters") class TimezoneTests(TestCase): def test_parse_timezone_utc(self): self.assertEqual((0, False), parse_timezone(b'+0000')) def test_parse_timezone_utc_negative(self): self.assertEqual((0, True), parse_timezone(b'-0000')) def test_generate_timezone_utc(self): self.assertEqual(b'+0000', format_timezone(0)) def test_generate_timezone_utc_negative(self): self.assertEqual(b'-0000', format_timezone(0, True)) def test_parse_timezone_cet(self): self.assertEqual((60 * 60, False), parse_timezone(b'+0100')) def test_format_timezone_cet(self): self.assertEqual(b'+0100', format_timezone(60 * 60)) def test_format_timezone_pdt(self): self.assertEqual(b'-0400', format_timezone(-4 * 60 * 60)) def test_parse_timezone_pdt(self): self.assertEqual((-4 * 60 * 60, False), parse_timezone(b'-0400')) def test_format_timezone_pdt_half(self): self.assertEqual(b'-0440', format_timezone(int(((-4 * 60) - 40) * 60))) def test_format_timezone_double_negative(self): self.assertEqual(b'--700', format_timezone(int(((7 * 60)) * 60), True)) def test_parse_timezone_pdt_half(self): self.assertEqual((((-4 * 60) - 40) * 60, False), parse_timezone(b'-0440')) def test_parse_timezone_double_negative(self): self.assertEqual( (int(((7 * 60)) * 60), False), parse_timezone(b'+700')) self.assertEqual( (int(((7 * 60)) * 60), True), parse_timezone(b'--700')) class ShaFileCopyTests(TestCase): def assert_copy(self, orig): oclass = object_class(orig.type_num) copy = orig.copy() self.assertTrue(isinstance(copy, oclass)) self.assertEqual(copy, orig) self.assertTrue(copy is not orig) def test_commit_copy(self): attrs = {'tree': b'd80c186a03f423a81b39df39dc87fd269736ca86', 'parents': [b'ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd', b'4cffe90e0a41ad3f5190079d7c8f036bde29cbe6'], 'author': b'James Westby ', 'committer': b'James Westby ', 'commit_time': 1174773719, 'author_time': 1174773719, 'commit_timezone': 0, 'author_timezone': 0, 'message': b'Merge ../b\n'} commit = make_commit(**attrs) self.assert_copy(commit) def test_blob_copy(self): blob = make_object(Blob, data=b'i am a blob') self.assert_copy(blob) def test_tree_copy(self): blob = make_object(Blob, data=b'i am a blob') tree = Tree() tree[b'blob'] = (stat.S_IFREG, blob.id) self.assert_copy(tree) def test_tag_copy(self): tag = make_object( Tag, name=b'tag', message=b'', tagger=b'Tagger ', tag_time=12345, tag_timezone=0, object=(Commit, b'0' * 40)) self.assert_copy(tag) class ShaFileSerializeTests(TestCase): """ Test that `ShaFile` objects only gets serialized once if they haven't changed. """ @contextmanager def assert_serialization_on_change(self, obj, needs_serialization_after_change=True): old_id = obj.id self.assertFalse(obj._needs_serialization) yield obj if needs_serialization_after_change: self.assertTrue(obj._needs_serialization) else: self.assertFalse(obj._needs_serialization) new_id = obj.id self.assertFalse(obj._needs_serialization) self.assertNotEqual(old_id, new_id) def test_commit_serialize(self): attrs = {'tree': b'd80c186a03f423a81b39df39dc87fd269736ca86', 'parents': [b'ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd', b'4cffe90e0a41ad3f5190079d7c8f036bde29cbe6'], 'author': b'James Westby ', 'committer': b'James Westby ', 'commit_time': 1174773719, 'author_time': 1174773719, 'commit_timezone': 0, 'author_timezone': 0, 'message': b'Merge ../b\n'} commit = make_commit(**attrs) with self.assert_serialization_on_change(commit): commit.parents = [b'ab64bbdcc51b170d21588e5c5d391ee5c0c96dfd'] def test_blob_serialize(self): blob = make_object(Blob, data=b'i am a blob') with self.assert_serialization_on_change(blob, needs_serialization_after_change=False): blob.data = b'i am another blob' def test_tree_serialize(self): blob = make_object(Blob, data=b'i am a blob') tree = Tree() tree[b'blob'] = (stat.S_IFREG, blob.id) with self.assert_serialization_on_change(tree): tree[b'blob2'] = (stat.S_IFREG, blob.id) def test_tag_serialize(self): tag = make_object( Tag, name=b'tag', message=b'', tagger=b'Tagger ', tag_time=12345, tag_timezone=0, object=(Commit, b'0' * 40)) with self.assert_serialization_on_change(tag): tag.message = b'new message' class PrettyFormatTreeEntryTests(TestCase): def test_format(self): self.assertEqual( '40000 tree 40820c38cfb182ce6c8b261555410d8382a5918b\tfoo\n', pretty_format_tree_entry(b"foo", 0o40000, b"40820c38cfb182ce6c8b261555410d8382a5918b"))