diff --git a/dulwich/pack.py b/dulwich/pack.py index ff78af75..db728175 100644 --- a/dulwich/pack.py +++ b/dulwich/pack.py @@ -1,1905 +1,1909 @@ # pack.py -- For dealing with packed git objects. # Copyright (C) 2007 James Westby # Copyright (C) 2008-2013 Jelmer Vernooij # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 # of the License or (at your option) a later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """Classes for dealing with packed git objects. A pack is a compact representation of a bunch of objects, stored using deltas where possible. They have two parts, the pack file, which stores the data, and an index that tells you where the data is. To find an object you look in all of the index files 'til you find a match for the object name. You then use the pointer got from this as a pointer in to the corresponding packfile. """ from collections import defaultdict import binascii from cStringIO import ( StringIO, ) from collections import ( deque, ) import difflib from itertools import ( chain, imap, izip, ) try: import mmap except ImportError: has_mmap = False else: has_mmap = True from hashlib import sha1 import os from os import ( SEEK_CUR, SEEK_END, ) import struct from struct import unpack_from import sys import warnings import zlib from dulwich.errors import ( ApplyDeltaError, ChecksumMismatch, ) from dulwich.file import GitFile from dulwich.lru_cache import ( LRUSizeCache, ) from dulwich.objects import ( ShaFile, hex_to_sha, sha_to_hex, object_header, ) supports_mmap_offset = (sys.version_info[0] >= 3 or (sys.version_info[0] == 2 and sys.version_info[1] >= 6)) OFS_DELTA = 6 REF_DELTA = 7 DELTA_TYPES = (OFS_DELTA, REF_DELTA) def take_msb_bytes(read, crc32=None): """Read bytes marked with most significant bit. :param read: Read function """ ret = [] while len(ret) == 0 or ret[-1] & 0x80: b = read(1) if crc32 is not None: crc32 = binascii.crc32(b, crc32) ret.append(ord(b)) return ret, crc32 class UnpackedObject(object): """Class encapsulating an object unpacked from a pack file. These objects should only be created from within unpack_object. Most members start out as empty and are filled in at various points by read_zlib_chunks, unpack_object, DeltaChainIterator, etc. End users of this object should take care that the function they're getting this object from is guaranteed to set the members they need. """ __slots__ = [ 'offset', # Offset in its pack. '_sha', # Cached binary SHA. 'obj_type_num', # Type of this object. 'obj_chunks', # Decompressed and delta-resolved chunks. 'pack_type_num', # Type of this object in the pack (may be a delta). 'delta_base', # Delta base offset or SHA. 'comp_chunks', # Compressed object chunks. 'decomp_chunks', # Decompressed object chunks. 'decomp_len', # Decompressed length of this object. 'crc32', # CRC32. ] # TODO(dborowitz): read_zlib_chunks and unpack_object could very well be # methods of this object. def __init__(self, pack_type_num, delta_base, decomp_len, crc32): self.offset = None self._sha = None self.pack_type_num = pack_type_num self.delta_base = delta_base self.comp_chunks = None self.decomp_chunks = [] self.decomp_len = decomp_len self.crc32 = crc32 if pack_type_num in DELTA_TYPES: self.obj_type_num = None self.obj_chunks = None else: self.obj_type_num = pack_type_num self.obj_chunks = self.decomp_chunks self.delta_base = delta_base def sha(self): """Return the binary SHA of this object.""" if self._sha is None: self._sha = obj_sha(self.obj_type_num, self.obj_chunks) return self._sha def sha_file(self): """Return a ShaFile from this object.""" return ShaFile.from_raw_chunks(self.obj_type_num, self.obj_chunks) # Only provided for backwards compatibility with code that expects either # chunks or a delta tuple. def _obj(self): """Return the decompressed chunks, or (delta base, delta chunks).""" if self.pack_type_num in DELTA_TYPES: return (self.delta_base, self.decomp_chunks) else: return self.decomp_chunks def __eq__(self, other): if not isinstance(other, UnpackedObject): return False for slot in self.__slots__: if getattr(self, slot) != getattr(other, slot): return False return True def __ne__(self, other): return not (self == other) def __repr__(self): data = ['%s=%r' % (s, getattr(self, s)) for s in self.__slots__] return '%s(%s)' % (self.__class__.__name__, ', '.join(data)) _ZLIB_BUFSIZE = 4096 def read_zlib_chunks(read_some, unpacked, include_comp=False, buffer_size=_ZLIB_BUFSIZE): """Read zlib data from a buffer. This function requires that the buffer have additional data following the compressed data, which is guaranteed to be the case for git pack files. :param read_some: Read function that returns at least one byte, but may return less than the requested size. :param unpacked: An UnpackedObject to write result data to. If its crc32 attr is not None, the CRC32 of the compressed bytes will be computed using this starting CRC32. After this function, will have the following attrs set: * comp_chunks (if include_comp is True) * decomp_chunks * decomp_len * crc32 :param include_comp: If True, include compressed data in the result. :param buffer_size: Size of the read buffer. :return: Leftover unused data from the decompression. :raise zlib.error: if a decompression error occurred. """ if unpacked.decomp_len <= -1: raise ValueError('non-negative zlib data stream size expected') decomp_obj = zlib.decompressobj() comp_chunks = [] decomp_chunks = unpacked.decomp_chunks decomp_len = 0 crc32 = unpacked.crc32 while True: add = read_some(buffer_size) if not add: raise zlib.error('EOF before end of zlib stream') comp_chunks.append(add) decomp = decomp_obj.decompress(add) decomp_len += len(decomp) decomp_chunks.append(decomp) unused = decomp_obj.unused_data if unused: left = len(unused) if crc32 is not None: crc32 = binascii.crc32(add[:-left], crc32) if include_comp: comp_chunks[-1] = add[:-left] break elif crc32 is not None: crc32 = binascii.crc32(add, crc32) if crc32 is not None: crc32 &= 0xffffffff if decomp_len != unpacked.decomp_len: raise zlib.error('decompressed data does not match expected size') unpacked.crc32 = crc32 if include_comp: unpacked.comp_chunks = comp_chunks return unused def iter_sha1(iter): """Return the hexdigest of the SHA1 over a set of names. :param iter: Iterator over string objects :return: 40-byte hex sha1 digest """ sha = sha1() for name in iter: sha.update(name) return sha.hexdigest() def load_pack_index(path): """Load an index file by path. :param filename: Path to the index file :return: A PackIndex loaded from the given path """ f = GitFile(path, 'rb') try: return load_pack_index_file(path, f) finally: f.close() def _load_file_contents(f, size=None): fileno = getattr(f, 'fileno', None) # Attempt to use mmap if possible if fileno is not None: fd = f.fileno() if size is None: size = os.fstat(fd).st_size if has_mmap: try: contents = mmap.mmap(fd, size, access=mmap.ACCESS_READ) except mmap.error: # Perhaps a socket? pass else: return contents, size contents = f.read() size = len(contents) return contents, size def load_pack_index_file(path, f): """Load an index file from a file-like object. :param path: Path for the index file :param f: File-like object :return: A PackIndex loaded from the given file """ contents, size = _load_file_contents(f) if contents[:4] == '\377tOc': version = struct.unpack('>L', contents[4:8])[0] if version == 2: return PackIndex2(path, file=f, contents=contents, size=size) else: raise KeyError('Unknown pack index format %d' % version) else: return PackIndex1(path, file=f, contents=contents, size=size) def bisect_find_sha(start, end, sha, unpack_name): """Find a SHA in a data blob with sorted SHAs. :param start: Start index of range to search :param end: End index of range to search :param sha: Sha to find :param unpack_name: Callback to retrieve SHA by index :return: Index of the SHA, or None if it wasn't found """ assert start <= end while start <= end: i = (start + end)/2 file_sha = unpack_name(i) x = cmp(file_sha, sha) if x < 0: start = i + 1 elif x > 0: end = i - 1 else: return i return None class PackIndex(object): """An index in to a packfile. Given a sha id of an object a pack index can tell you the location in the packfile of that object if it has it. """ def __eq__(self, other): if not isinstance(other, PackIndex): return False for (name1, _, _), (name2, _, _) in izip(self.iterentries(), other.iterentries()): if name1 != name2: return False return True def __ne__(self, other): return not self.__eq__(other) def __len__(self): """Return the number of entries in this pack index.""" raise NotImplementedError(self.__len__) def __iter__(self): """Iterate over the SHAs in this pack.""" return imap(sha_to_hex, self._itersha()) def iterentries(self): """Iterate over the entries in this pack index. :return: iterator over tuples with object name, offset in packfile and crc32 checksum. """ raise NotImplementedError(self.iterentries) def get_pack_checksum(self): """Return the SHA1 checksum stored for the corresponding packfile. :return: 20-byte binary digest """ raise NotImplementedError(self.get_pack_checksum) def object_index(self, sha): """Return the index in to the corresponding packfile for the object. Given the name of an object it will return the offset that object lives at within the corresponding pack file. If the pack file doesn't have the object then None will be returned. """ if len(sha) == 40: sha = hex_to_sha(sha) return self._object_index(sha) def _object_index(self, sha): """See object_index. :param sha: A *binary* SHA string. (20 characters long)_ """ raise NotImplementedError(self._object_index) def objects_sha1(self): """Return the hex SHA1 over all the shas of all objects in this pack. :note: This is used for the filename of the pack. """ return iter_sha1(self._itersha()) def _itersha(self): """Yield all the SHA1's of the objects in the index, sorted.""" raise NotImplementedError(self._itersha) class MemoryPackIndex(PackIndex): """Pack index that is stored entirely in memory.""" def __init__(self, entries, pack_checksum=None): """Create a new MemoryPackIndex. :param entries: Sequence of name, idx, crc32 (sorted) :param pack_checksum: Optional pack checksum """ self._by_sha = {} for name, idx, crc32 in entries: self._by_sha[name] = idx self._entries = entries self._pack_checksum = pack_checksum def get_pack_checksum(self): return self._pack_checksum def __len__(self): return len(self._entries) def _object_index(self, sha): return self._by_sha[sha][0] def _itersha(self): return iter(self._by_sha) def iterentries(self): return iter(self._entries) class FilePackIndex(PackIndex): """Pack index that is based on a file. To do the loop it opens the file, and indexes first 256 4 byte groups with the first byte of the sha id. The value in the four byte group indexed is the end of the group that shares the same starting byte. Subtract one from the starting byte and index again to find the start of the group. The values are sorted by sha id within the group, so do the math to find the start and end offset and then bisect in to find if the value is present. """ def __init__(self, filename, file=None, contents=None, size=None): """Create a pack index object. Provide it with the name of the index file to consider, and it will map it whenever required. """ self._filename = filename # Take the size now, so it can be checked each time we map the file to # ensure that it hasn't changed. if file is None: self._file = GitFile(filename, 'rb') else: self._file = file if contents is None: self._contents, self._size = _load_file_contents(self._file, size) else: self._contents, self._size = (contents, size) def __eq__(self, other): # Quick optimization: if (isinstance(other, FilePackIndex) and self._fan_out_table != other._fan_out_table): return False return super(FilePackIndex, self).__eq__(other) def close(self): self._file.close() if getattr(self._contents, "close", None) is not None: self._contents.close() def __len__(self): """Return the number of entries in this pack index.""" return self._fan_out_table[-1] def _unpack_entry(self, i): """Unpack the i-th entry in the index file. :return: Tuple with object name (SHA), offset in pack file and CRC32 checksum (if known). """ raise NotImplementedError(self._unpack_entry) def _unpack_name(self, i): """Unpack the i-th name from the index file.""" raise NotImplementedError(self._unpack_name) def _unpack_offset(self, i): """Unpack the i-th object offset from the index file.""" raise NotImplementedError(self._unpack_offset) def _unpack_crc32_checksum(self, i): """Unpack the crc32 checksum for the i-th object from the index file.""" raise NotImplementedError(self._unpack_crc32_checksum) def _itersha(self): for i in range(len(self)): yield self._unpack_name(i) def iterentries(self): """Iterate over the entries in this pack index. :return: iterator over tuples with object name, offset in packfile and crc32 checksum. """ for i in range(len(self)): yield self._unpack_entry(i) def _read_fan_out_table(self, start_offset): ret = [] for i in range(0x100): fanout_entry = self._contents[start_offset+i*4:start_offset+(i+1)*4] ret.append(struct.unpack('>L', fanout_entry)[0]) return ret def check(self): """Check that the stored checksum matches the actual checksum.""" actual = self.calculate_checksum() stored = self.get_stored_checksum() if actual != stored: raise ChecksumMismatch(stored, actual) def calculate_checksum(self): """Calculate the SHA1 checksum over this pack index. :return: This is a 20-byte binary digest """ return sha1(self._contents[:-20]).digest() def get_pack_checksum(self): """Return the SHA1 checksum stored for the corresponding packfile. :return: 20-byte binary digest """ return str(self._contents[-40:-20]) def get_stored_checksum(self): """Return the SHA1 checksum stored for this index. :return: 20-byte binary digest """ return str(self._contents[-20:]) def _object_index(self, sha): """See object_index. :param sha: A *binary* SHA string. (20 characters long)_ """ assert len(sha) == 20 idx = ord(sha[0]) if idx == 0: start = 0 else: start = self._fan_out_table[idx-1] end = self._fan_out_table[idx] i = bisect_find_sha(start, end, sha, self._unpack_name) if i is None: raise KeyError(sha) return self._unpack_offset(i) class PackIndex1(FilePackIndex): """Version 1 Pack Index file.""" def __init__(self, filename, file=None, contents=None, size=None): super(PackIndex1, self).__init__(filename, file, contents, size) self.version = 1 self._fan_out_table = self._read_fan_out_table(0) def _unpack_entry(self, i): (offset, name) = unpack_from('>L20s', self._contents, (0x100 * 4) + (i * 24)) return (name, offset, None) def _unpack_name(self, i): offset = (0x100 * 4) + (i * 24) + 4 return self._contents[offset:offset+20] def _unpack_offset(self, i): offset = (0x100 * 4) + (i * 24) return unpack_from('>L', self._contents, offset)[0] def _unpack_crc32_checksum(self, i): # Not stored in v1 index files return None class PackIndex2(FilePackIndex): """Version 2 Pack Index file.""" def __init__(self, filename, file=None, contents=None, size=None): super(PackIndex2, self).__init__(filename, file, contents, size) if self._contents[:4] != '\377tOc': raise AssertionError('Not a v2 pack index file') (self.version, ) = unpack_from('>L', self._contents, 4) if self.version != 2: raise AssertionError('Version was %d' % self.version) self._fan_out_table = self._read_fan_out_table(8) self._name_table_offset = 8 + 0x100 * 4 self._crc32_table_offset = self._name_table_offset + 20 * len(self) self._pack_offset_table_offset = (self._crc32_table_offset + 4 * len(self)) self._pack_offset_largetable_offset = (self._pack_offset_table_offset + 4 * len(self)) def _unpack_entry(self, i): return (self._unpack_name(i), self._unpack_offset(i), self._unpack_crc32_checksum(i)) def _unpack_name(self, i): offset = self._name_table_offset + i * 20 return self._contents[offset:offset+20] def _unpack_offset(self, i): offset = self._pack_offset_table_offset + i * 4 offset = unpack_from('>L', self._contents, offset)[0] if offset & (2**31): offset = self._pack_offset_largetable_offset + (offset&(2**31-1)) * 8L offset = unpack_from('>Q', self._contents, offset)[0] return offset def _unpack_crc32_checksum(self, i): return unpack_from('>L', self._contents, self._crc32_table_offset + i * 4)[0] def read_pack_header(read): """Read the header of a pack file. :param read: Read function :return: Tuple of (pack version, number of objects). If no data is available to read, returns (None, None). """ header = read(12) if not header: return None, None if header[:4] != 'PACK': raise AssertionError('Invalid pack header %r' % header) (version,) = unpack_from('>L', header, 4) if version not in (2, 3): raise AssertionError('Version was %d' % version) (num_objects,) = unpack_from('>L', header, 8) return (version, num_objects) def chunks_length(chunks): return sum(imap(len, chunks)) def unpack_object(read_all, read_some=None, compute_crc32=False, include_comp=False, zlib_bufsize=_ZLIB_BUFSIZE): """Unpack a Git object. :param read_all: Read function that blocks until the number of requested bytes are read. :param read_some: Read function that returns at least one byte, but may not return the number of bytes requested. :param compute_crc32: If True, compute the CRC32 of the compressed data. If False, the returned CRC32 will be None. :param include_comp: If True, include compressed data in the result. :param zlib_bufsize: An optional buffer size for zlib operations. :return: A tuple of (unpacked, unused), where unused is the unused data leftover from decompression, and unpacked in an UnpackedObject with the following attrs set: * obj_chunks (for non-delta types) * pack_type_num * delta_base (for delta types) * comp_chunks (if include_comp is True) * decomp_chunks * decomp_len * crc32 (if compute_crc32 is True) """ if read_some is None: read_some = read_all if compute_crc32: crc32 = 0 else: crc32 = None bytes, crc32 = take_msb_bytes(read_all, crc32=crc32) type_num = (bytes[0] >> 4) & 0x07 size = bytes[0] & 0x0f for i, byte in enumerate(bytes[1:]): size += (byte & 0x7f) << ((i * 7) + 4) raw_base = len(bytes) if type_num == OFS_DELTA: bytes, crc32 = take_msb_bytes(read_all, crc32=crc32) raw_base += len(bytes) if bytes[-1] & 0x80: raise AssertionError delta_base_offset = bytes[0] & 0x7f for byte in bytes[1:]: delta_base_offset += 1 delta_base_offset <<= 7 delta_base_offset += (byte & 0x7f) delta_base = delta_base_offset elif type_num == REF_DELTA: delta_base = read_all(20) if compute_crc32: crc32 = binascii.crc32(delta_base, crc32) raw_base += 20 else: delta_base = None unpacked = UnpackedObject(type_num, delta_base, size, crc32) unused = read_zlib_chunks(read_some, unpacked, buffer_size=zlib_bufsize, include_comp=include_comp) return unpacked, unused def _compute_object_size((num, obj)): """Compute the size of a unresolved object for use with LRUSizeCache.""" if num in DELTA_TYPES: return chunks_length(obj[1]) return chunks_length(obj) class PackStreamReader(object): """Class to read a pack stream. The pack is read from a ReceivableProtocol using read() or recv() as appropriate. """ def __init__(self, read_all, read_some=None, zlib_bufsize=_ZLIB_BUFSIZE): self.read_all = read_all if read_some is None: self.read_some = read_all else: self.read_some = read_some self.sha = sha1() self._offset = 0 self._rbuf = StringIO() # trailer is a deque to avoid memory allocation on small reads self._trailer = deque() self._zlib_bufsize = zlib_bufsize def _read(self, read, size): """Read up to size bytes using the given callback. As a side effect, update the verifier's hash (excluding the last 20 bytes read). :param read: The read callback to read from. :param size: The maximum number of bytes to read; the particular behavior is callback-specific. """ data = read(size) # maintain a trailer of the last 20 bytes we've read n = len(data) self._offset += n tn = len(self._trailer) if n >= 20: to_pop = tn to_add = 20 else: to_pop = max(n + tn - 20, 0) to_add = n for _ in xrange(to_pop): self.sha.update(self._trailer.popleft()) self._trailer.extend(data[-to_add:]) # hash everything but the trailer self.sha.update(data[:-to_add]) return data def _buf_len(self): buf = self._rbuf start = buf.tell() buf.seek(0, SEEK_END) end = buf.tell() buf.seek(start) return end - start @property def offset(self): return self._offset - self._buf_len() def read(self, size): """Read, blocking until size bytes are read.""" buf_len = self._buf_len() if buf_len >= size: return self._rbuf.read(size) buf_data = self._rbuf.read() self._rbuf = StringIO() return buf_data + self._read(self.read_all, size - buf_len) def recv(self, size): """Read up to size bytes, blocking until one byte is read.""" buf_len = self._buf_len() if buf_len: data = self._rbuf.read(size) if size >= buf_len: self._rbuf = StringIO() return data return self._read(self.read_some, size) def __len__(self): return self._num_objects def read_objects(self, compute_crc32=False): """Read the objects in this pack file. :param compute_crc32: If True, compute the CRC32 of the compressed data. If False, the returned CRC32 will be None. :return: Iterator over UnpackedObjects with the following members set: offset obj_type_num obj_chunks (for non-delta types) delta_base (for delta types) decomp_chunks decomp_len crc32 (if compute_crc32 is True) :raise ChecksumMismatch: if the checksum of the pack contents does not match the checksum in the pack trailer. :raise zlib.error: if an error occurred during zlib decompression. :raise IOError: if an error occurred writing to the output file. """ pack_version, self._num_objects = read_pack_header(self.read) if pack_version is None: return for i in xrange(self._num_objects): offset = self.offset unpacked, unused = unpack_object( self.read, read_some=self.recv, compute_crc32=compute_crc32, zlib_bufsize=self._zlib_bufsize) unpacked.offset = offset # prepend any unused data to current read buffer buf = StringIO() buf.write(unused) buf.write(self._rbuf.read()) buf.seek(0) self._rbuf = buf yield unpacked if self._buf_len() < 20: # If the read buffer is full, then the last read() got the whole # trailer off the wire. If not, it means there is still some of the # trailer to read. We need to read() all 20 bytes; N come from the # read buffer and (20 - N) come from the wire. self.read(20) pack_sha = ''.join(self._trailer) if pack_sha != self.sha.digest(): raise ChecksumMismatch(sha_to_hex(pack_sha), self.sha.hexdigest()) class PackStreamCopier(PackStreamReader): """Class to verify a pack stream as it is being read. The pack is read from a ReceivableProtocol using read() or recv() as appropriate and written out to the given file-like object. """ def __init__(self, read_all, read_some, outfile, delta_iter=None): """Initialize the copier. :param read_all: Read function that blocks until the number of requested bytes are read. :param read_some: Read function that returns at least one byte, but may not return the number of bytes requested. :param outfile: File-like object to write output through. :param delta_iter: Optional DeltaChainIterator to record deltas as we read them. """ super(PackStreamCopier, self).__init__(read_all, read_some=read_some) self.outfile = outfile self._delta_iter = delta_iter def _read(self, read, size): """Read data from the read callback and write it to the file.""" data = super(PackStreamCopier, self)._read(read, size) self.outfile.write(data) return data def verify(self): """Verify a pack stream and write it to the output file. See PackStreamReader.iterobjects for a list of exceptions this may throw. """ if self._delta_iter: for unpacked in self.read_objects(): self._delta_iter.record(unpacked) else: for _ in self.read_objects(): pass def obj_sha(type, chunks): """Compute the SHA for a numeric type and object chunks.""" sha = sha1() sha.update(object_header(type, chunks_length(chunks))) for chunk in chunks: sha.update(chunk) return sha.digest() def compute_file_sha(f, start_ofs=0, end_ofs=0, buffer_size=1<<16): """Hash a portion of a file into a new SHA. :param f: A file-like object to read from that supports seek(). :param start_ofs: The offset in the file to start reading at. :param end_ofs: The offset in the file to end reading at, relative to the end of the file. :param buffer_size: A buffer size for reading. :return: A new SHA object updated with data read from the file. """ sha = sha1() f.seek(0, SEEK_END) todo = f.tell() + end_ofs - start_ofs f.seek(start_ofs) while todo: data = f.read(min(todo, buffer_size)) sha.update(data) todo -= len(data) return sha class PackData(object): """The data contained in a packfile. Pack files can be accessed both sequentially for exploding a pack, and directly with the help of an index to retrieve a specific object. The objects within are either complete or a delta aginst another. The header is variable length. If the MSB of each byte is set then it indicates that the subsequent byte is still part of the header. For the first byte the next MS bits are the type, which tells you the type of object, and whether it is a delta. The LS byte is the lowest bits of the size. For each subsequent byte the LS 7 bits are the next MS bits of the size, i.e. the last byte of the header contains the MS bits of the size. For the complete objects the data is stored as zlib deflated data. The size in the header is the uncompressed object size, so to uncompress you need to just keep feeding data to zlib until you get an object back, or it errors on bad data. This is done here by just giving the complete buffer from the start of the deflated object on. This is bad, but until I get mmap sorted out it will have to do. Currently there are no integrity checks done. Also no attempt is made to try and detect the delta case, or a request for an object at the wrong position. It will all just throw a zlib or KeyError. """ def __init__(self, filename, file=None, size=None): """Create a PackData object representing the pack in the given filename. The file must exist and stay readable until the object is disposed of. It must also stay the same size. It will be mapped whenever needed. Currently there is a restriction on the size of the pack as the python mmap implementation is flawed. """ self._filename = filename self._size = size self._header_size = 12 if file is None: self._file = GitFile(self._filename, 'rb') else: self._file = file (version, self._num_objects) = read_pack_header(self._file.read) self._offset_cache = LRUSizeCache(1024*1024*20, compute_size=_compute_object_size) self.pack = None + @property + def filename(self): + return os.path.basename(self._filename) + @classmethod def from_file(cls, file, size): return cls(str(file), file=file, size=size) @classmethod def from_path(cls, path): return cls(filename=path) def close(self): self._file.close() def _get_size(self): if self._size is not None: return self._size self._size = os.path.getsize(self._filename) if self._size < self._header_size: errmsg = ('%s is too small for a packfile (%d < %d)' % (self._filename, self._size, self._header_size)) raise AssertionError(errmsg) return self._size def __len__(self): """Returns the number of objects in this pack.""" return self._num_objects def calculate_checksum(self): """Calculate the checksum for this pack. :return: 20-byte binary SHA1 digest """ return compute_file_sha(self._file, end_ofs=-20).digest() def get_ref(self, sha): """Get the object for a ref SHA, only looking in this pack.""" # TODO: cache these results if self.pack is None: raise KeyError(sha) try: offset = self.pack.index.object_index(sha) except KeyError: offset = None if offset: type, obj = self.get_object_at(offset) elif self.pack is not None and self.pack.resolve_ext_ref: type, obj = self.pack.resolve_ext_ref(sha) else: raise KeyError(sha) return offset, type, obj def resolve_object(self, offset, type, obj, get_ref=None): """Resolve an object, possibly resolving deltas when necessary. :return: Tuple with object type and contents. """ if type not in DELTA_TYPES: return type, obj if get_ref is None: get_ref = self.get_ref if type == OFS_DELTA: (delta_offset, delta) = obj # TODO: clean up asserts and replace with nicer error messages assert isinstance(offset, int) or isinstance(offset, long) assert isinstance(delta_offset, int) or isinstance(offset, long) base_offset = offset-delta_offset type, base_obj = self.get_object_at(base_offset) assert isinstance(type, int) elif type == REF_DELTA: (basename, delta) = obj assert isinstance(basename, str) and len(basename) == 20 base_offset, type, base_obj = get_ref(basename) assert isinstance(type, int) type, base_chunks = self.resolve_object(base_offset, type, base_obj) chunks = apply_delta(base_chunks, delta) # TODO(dborowitz): This can result in poor performance if large base # objects are separated from deltas in the pack. We should reorganize # so that we apply deltas to all objects in a chain one after the other # to optimize cache performance. if offset is not None: self._offset_cache[offset] = type, chunks return type, chunks def iterobjects(self, progress=None, compute_crc32=True): self._file.seek(self._header_size) for i in xrange(1, self._num_objects + 1): offset = self._file.tell() unpacked, unused = unpack_object( self._file.read, compute_crc32=compute_crc32) if progress is not None: progress(i, self._num_objects) yield (offset, unpacked.pack_type_num, unpacked._obj(), unpacked.crc32) self._file.seek(-len(unused), SEEK_CUR) # Back up over unused data. def _iter_unpacked(self): # TODO(dborowitz): Merge this with iterobjects, if we can change its # return type. self._file.seek(self._header_size) for _ in xrange(self._num_objects): offset = self._file.tell() unpacked, unused = unpack_object( self._file.read, compute_crc32=False) unpacked.offset = offset yield unpacked self._file.seek(-len(unused), SEEK_CUR) # Back up over unused data. def iterentries(self, progress=None): """Yield entries summarizing the contents of this pack. :param progress: Progress function, called with current and total object count. :return: iterator of tuples with (sha, offset, crc32) """ num_objects = self._num_objects resolve_ext_ref = ( self.pack.resolve_ext_ref if self.pack is not None else None) indexer = PackIndexer.for_pack_data( self, resolve_ext_ref=resolve_ext_ref) for i, result in enumerate(indexer): if progress is not None: progress(i, num_objects) yield result def sorted_entries(self, progress=None): """Return entries in this pack, sorted by SHA. :param progress: Progress function, called with current and total object count :return: List of tuples with (sha, offset, crc32) """ ret = list(self.iterentries(progress=progress)) ret.sort() return ret def create_index_v1(self, filename, progress=None): """Create a version 1 file for this data file. :param filename: Index filename. :param progress: Progress report function :return: Checksum of index file """ entries = self.sorted_entries(progress=progress) f = GitFile(filename, 'wb') try: return write_pack_index_v1(f, entries, self.calculate_checksum()) finally: f.close() def create_index_v2(self, filename, progress=None): """Create a version 2 index file for this data file. :param filename: Index filename. :param progress: Progress report function :return: Checksum of index file """ entries = self.sorted_entries(progress=progress) f = GitFile(filename, 'wb') try: return write_pack_index_v2(f, entries, self.calculate_checksum()) finally: f.close() def create_index(self, filename, progress=None, version=2): """Create an index file for this data file. :param filename: Index filename. :param progress: Progress report function :return: Checksum of index file """ if version == 1: return self.create_index_v1(filename, progress) elif version == 2: return self.create_index_v2(filename, progress) else: raise ValueError('unknown index format %d' % version) def get_stored_checksum(self): """Return the expected checksum stored in this pack.""" self._file.seek(-20, SEEK_END) return self._file.read(20) def check(self): """Check the consistency of this pack.""" actual = self.calculate_checksum() stored = self.get_stored_checksum() if actual != stored: raise ChecksumMismatch(stored, actual) def get_object_at(self, offset): """Given an offset in to the packfile return the object that is there. Using the associated index the location of an object can be looked up, and then the packfile can be asked directly for that object using this function. """ try: return self._offset_cache[offset] except KeyError: pass assert isinstance(offset, long) or isinstance(offset, int),\ 'offset was %r' % offset assert offset >= self._header_size self._file.seek(offset) unpacked, _ = unpack_object(self._file.read) return (unpacked.pack_type_num, unpacked._obj()) class DeltaChainIterator(object): """Abstract iterator over pack data based on delta chains. Each object in the pack is guaranteed to be inflated exactly once, regardless of how many objects reference it as a delta base. As a result, memory usage is proportional to the length of the longest delta chain. Subclasses can override _result to define the result type of the iterator. By default, results are UnpackedObjects with the following members set: * offset * obj_type_num * obj_chunks * pack_type_num * delta_base (for delta types) * comp_chunks (if _include_comp is True) * decomp_chunks * decomp_len * crc32 (if _compute_crc32 is True) """ _compute_crc32 = False _include_comp = False def __init__(self, file_obj, resolve_ext_ref=None): self._file = file_obj self._resolve_ext_ref = resolve_ext_ref self._pending_ofs = defaultdict(list) self._pending_ref = defaultdict(list) self._full_ofs = [] self._shas = {} self._ext_refs = [] @classmethod def for_pack_data(cls, pack_data, resolve_ext_ref=None): walker = cls(None, resolve_ext_ref=resolve_ext_ref) walker.set_pack_data(pack_data) for unpacked in pack_data._iter_unpacked(): walker.record(unpacked) return walker def record(self, unpacked): type_num = unpacked.pack_type_num offset = unpacked.offset if type_num == OFS_DELTA: base_offset = offset - unpacked.delta_base self._pending_ofs[base_offset].append(offset) elif type_num == REF_DELTA: self._pending_ref[unpacked.delta_base].append(offset) else: self._full_ofs.append((offset, type_num)) def set_pack_data(self, pack_data): self._file = pack_data._file def _walk_all_chains(self): for offset, type_num in self._full_ofs: for result in self._follow_chain(offset, type_num, None): yield result for result in self._walk_ref_chains(): yield result assert not self._pending_ofs def _ensure_no_pending(self): if self._pending_ref: raise KeyError([sha_to_hex(s) for s in self._pending_ref]) def _walk_ref_chains(self): if not self._resolve_ext_ref: self._ensure_no_pending() return for base_sha, pending in sorted(self._pending_ref.iteritems()): try: type_num, chunks = self._resolve_ext_ref(base_sha) except KeyError: # Not an external ref, but may depend on one. Either it will get # popped via a _follow_chain call, or we will raise an error # below. continue self._ext_refs.append(base_sha) self._pending_ref.pop(base_sha) for new_offset in pending: for result in self._follow_chain(new_offset, type_num, chunks): yield result self._ensure_no_pending() def _result(self, unpacked): return unpacked def _resolve_object(self, offset, obj_type_num, base_chunks): self._file.seek(offset) unpacked, _ = unpack_object( self._file.read, include_comp=self._include_comp, compute_crc32=self._compute_crc32) unpacked.offset = offset if base_chunks is None: assert unpacked.pack_type_num == obj_type_num else: assert unpacked.pack_type_num in DELTA_TYPES unpacked.obj_type_num = obj_type_num unpacked.obj_chunks = apply_delta(base_chunks, unpacked.decomp_chunks) return unpacked def _follow_chain(self, offset, obj_type_num, base_chunks): # Unlike PackData.get_object_at, there is no need to cache offsets as # this approach by design inflates each object exactly once. unpacked = self._resolve_object(offset, obj_type_num, base_chunks) yield self._result(unpacked) pending = chain(self._pending_ofs.pop(unpacked.offset, []), self._pending_ref.pop(unpacked.sha(), [])) for new_offset in pending: for new_result in self._follow_chain( new_offset, unpacked.obj_type_num, unpacked.obj_chunks): yield new_result def __iter__(self): return self._walk_all_chains() def ext_refs(self): return self._ext_refs class PackIndexer(DeltaChainIterator): """Delta chain iterator that yields index entries.""" _compute_crc32 = True def _result(self, unpacked): return unpacked.sha(), unpacked.offset, unpacked.crc32 class PackInflater(DeltaChainIterator): """Delta chain iterator that yields ShaFile objects.""" def _result(self, unpacked): return unpacked.sha_file() class SHA1Reader(object): """Wrapper around a file-like object that remembers the SHA1 of its data.""" def __init__(self, f): self.f = f self.sha1 = sha1('') def read(self, num=None): data = self.f.read(num) self.sha1.update(data) return data def check_sha(self): stored = self.f.read(20) if stored != self.sha1.digest(): raise ChecksumMismatch(self.sha1.hexdigest(), sha_to_hex(stored)) def close(self): return self.f.close() def tell(self): return self.f.tell() class SHA1Writer(object): """Wrapper around a file-like object that remembers the SHA1 of its data.""" def __init__(self, f): self.f = f self.length = 0 self.sha1 = sha1('') def write(self, data): self.sha1.update(data) self.f.write(data) self.length += len(data) def write_sha(self): sha = self.sha1.digest() assert len(sha) == 20 self.f.write(sha) self.length += len(sha) return sha def close(self): sha = self.write_sha() self.f.close() return sha def offset(self): return self.length def tell(self): return self.f.tell() def pack_object_header(type_num, delta_base, size): """Create a pack object header for the given object info. :param type_num: Numeric type of the object. :param delta_base: Delta base offset or ref, or None for whole objects. :param size: Uncompressed object size. :return: A header for a packed object. """ header = '' c = (type_num << 4) | (size & 15) size >>= 4 while size: header += (chr(c | 0x80)) c = size & 0x7f size >>= 7 header += chr(c) if type_num == OFS_DELTA: ret = [delta_base & 0x7f] delta_base >>= 7 while delta_base: delta_base -= 1 ret.insert(0, 0x80 | (delta_base & 0x7f)) delta_base >>= 7 header += ''.join([chr(x) for x in ret]) elif type_num == REF_DELTA: assert len(delta_base) == 20 header += delta_base return header def write_pack_object(f, type, object, sha=None): """Write pack object to a file. :param f: File to write to :param type: Numeric type of the object :param object: Object to write :return: Tuple with offset at which the object was written, and crc32 """ if type in DELTA_TYPES: delta_base, object = object else: delta_base = None header = pack_object_header(type, delta_base, len(object)) comp_data = zlib.compress(object) crc32 = 0 for data in (header, comp_data): f.write(data) if sha is not None: sha.update(data) crc32 = binascii.crc32(data, crc32) return crc32 & 0xffffffff def write_pack(filename, objects, num_objects=None): """Write a new pack data file. :param filename: Path to the new pack file (without .pack extension) :param objects: Iterable of (object, path) tuples to write. Should provide __len__ :return: Tuple with checksum of pack file and index file """ if num_objects is not None: warnings.warn('num_objects argument to write_pack is deprecated', DeprecationWarning) f = GitFile(filename + '.pack', 'wb') try: entries, data_sum = write_pack_objects(f, objects, num_objects=num_objects) finally: f.close() entries = [(k, v[0], v[1]) for (k, v) in entries.iteritems()] entries.sort() f = GitFile(filename + '.idx', 'wb') try: return data_sum, write_pack_index_v2(f, entries, data_sum) finally: f.close() def write_pack_header(f, num_objects): """Write a pack header for the given number of objects.""" f.write('PACK') # Pack header f.write(struct.pack('>L', 2)) # Pack version f.write(struct.pack('>L', num_objects)) # Number of objects in pack def deltify_pack_objects(objects, window=10): """Generate deltas for pack objects. :param objects: Objects to deltify :param window: Window size :return: Iterator over type_num, object id, delta_base, content delta_base is None for full text entries """ # Build a list of objects ordered by the magic Linus heuristic # This helps us find good objects to diff against us magic = [] for obj, path in objects: magic.append((obj.type_num, path, -obj.raw_length(), obj)) magic.sort() possible_bases = deque() for type_num, path, neg_length, o in magic: raw = o.as_raw_string() winner = raw winner_base = None for base in possible_bases: if base.type_num != type_num: continue delta = create_delta(base.as_raw_string(), raw) if len(delta) < len(winner): winner_base = base.sha().digest() winner = delta yield type_num, o.sha().digest(), winner_base, winner possible_bases.appendleft(o) while len(possible_bases) > window: possible_bases.pop() def write_pack_objects(f, objects, window=10, num_objects=None): """Write a new pack data file. :param f: File to write to :param objects: Iterable of (object, path) tuples to write. Should provide __len__ :param window: Sliding window size for searching for deltas; currently unimplemented :param num_objects: Number of objects (do not use, deprecated) :return: Dict mapping id -> (offset, crc32 checksum), pack checksum """ if num_objects is None: num_objects = len(objects) # FIXME: pack_contents = deltify_pack_objects(objects, window) pack_contents = ( (o.type_num, o.sha().digest(), None, o.as_raw_string()) for (o, path) in objects) return write_pack_data(f, num_objects, pack_contents) def write_pack_data(f, num_records, records): """Write a new pack data file. :param f: File to write to :param num_records: Number of records :param records: Iterator over type_num, object_id, delta_base, raw :return: Dict mapping id -> (offset, crc32 checksum), pack checksum """ # Write the pack entries = {} f = SHA1Writer(f) write_pack_header(f, num_records) for type_num, object_id, delta_base, raw in records: if delta_base is not None: try: base_offset, base_crc32 = entries[delta_base] except KeyError: type_num = REF_DELTA raw = (delta_base, raw) else: type_num = OFS_DELTA raw = (base_offset, raw) offset = f.offset() crc32 = write_pack_object(f, type_num, raw) entries[object_id] = (offset, crc32) return entries, f.write_sha() def write_pack_index_v1(f, entries, pack_checksum): """Write a new pack index file. :param f: A file-like object to write to :param entries: List of tuples with object name (sha), offset_in_pack, and crc32_checksum. :param pack_checksum: Checksum of the pack file. :return: The SHA of the written index file """ f = SHA1Writer(f) fan_out_table = defaultdict(lambda: 0) for (name, offset, entry_checksum) in entries: fan_out_table[ord(name[0])] += 1 # Fan-out table for i in range(0x100): f.write(struct.pack('>L', fan_out_table[i])) fan_out_table[i+1] += fan_out_table[i] for (name, offset, entry_checksum) in entries: if not (offset <= 0xffffffff): raise TypeError("pack format 1 only supports offsets < 2Gb") f.write(struct.pack('>L20s', offset, name)) assert len(pack_checksum) == 20 f.write(pack_checksum) return f.write_sha() def create_delta(base_buf, target_buf): """Use python difflib to work out how to transform base_buf to target_buf. :param base_buf: Base buffer :param target_buf: Target buffer """ assert isinstance(base_buf, str) assert isinstance(target_buf, str) out_buf = '' # write delta header def encode_size(size): ret = '' c = size & 0x7f size >>= 7 while size: ret += chr(c | 0x80) c = size & 0x7f size >>= 7 ret += chr(c) return ret out_buf += encode_size(len(base_buf)) out_buf += encode_size(len(target_buf)) # write out delta opcodes seq = difflib.SequenceMatcher(a=base_buf, b=target_buf) for opcode, i1, i2, j1, j2 in seq.get_opcodes(): # Git patch opcodes don't care about deletes! #if opcode == 'replace' or opcode == 'delete': # pass if opcode == 'equal': # If they are equal, unpacker will use data from base_buf # Write out an opcode that says what range to use scratch = '' op = 0x80 o = i1 for i in range(4): if o & 0xff << i*8: scratch += chr((o >> i*8) & 0xff) op |= 1 << i s = i2 - i1 for i in range(2): if s & 0xff << i*8: scratch += chr((s >> i*8) & 0xff) op |= 1 << (4+i) out_buf += chr(op) out_buf += scratch if opcode == 'replace' or opcode == 'insert': # If we are replacing a range or adding one, then we just # output it to the stream (prefixed by its size) s = j2 - j1 o = j1 while s > 127: out_buf += chr(127) out_buf += target_buf[o:o+127] s -= 127 o += 127 out_buf += chr(s) out_buf += target_buf[o:o+s] return out_buf def apply_delta(src_buf, delta): """Based on the similar function in git's patch-delta.c. :param src_buf: Source buffer :param delta: Delta instructions """ if type(src_buf) != str: src_buf = ''.join(src_buf) if type(delta) != str: delta = ''.join(delta) out = [] index = 0 delta_length = len(delta) def get_delta_header_size(delta, index): size = 0 i = 0 while delta: cmd = ord(delta[index]) index += 1 size |= (cmd & ~0x80) << i i += 7 if not cmd & 0x80: break return size, index src_size, index = get_delta_header_size(delta, index) dest_size, index = get_delta_header_size(delta, index) assert src_size == len(src_buf), '%d vs %d' % (src_size, len(src_buf)) while index < delta_length: cmd = ord(delta[index]) index += 1 if cmd & 0x80: cp_off = 0 for i in range(4): if cmd & (1 << i): x = ord(delta[index]) index += 1 cp_off |= x << (i * 8) cp_size = 0 for i in range(3): if cmd & (1 << (4+i)): x = ord(delta[index]) index += 1 cp_size |= x << (i * 8) if cp_size == 0: cp_size = 0x10000 if (cp_off + cp_size < cp_size or cp_off + cp_size > src_size or cp_size > dest_size): break out.append(src_buf[cp_off:cp_off+cp_size]) elif cmd != 0: out.append(delta[index:index+cmd]) index += cmd else: raise ApplyDeltaError('Invalid opcode 0') if index != delta_length: raise ApplyDeltaError('delta not empty: %r' % delta[index:]) if dest_size != chunks_length(out): raise ApplyDeltaError('dest size incorrect') return out def write_pack_index_v2(f, entries, pack_checksum): """Write a new pack index file. :param f: File-like object to write to :param entries: List of tuples with object name (sha), offset_in_pack, and crc32_checksum. :param pack_checksum: Checksum of the pack file. :return: The SHA of the index file written """ f = SHA1Writer(f) f.write('\377tOc') # Magic! f.write(struct.pack('>L', 2)) fan_out_table = defaultdict(lambda: 0) for (name, offset, entry_checksum) in entries: fan_out_table[ord(name[0])] += 1 # Fan-out table largetable = [] for i in range(0x100): f.write(struct.pack('>L', fan_out_table[i])) fan_out_table[i+1] += fan_out_table[i] for (name, offset, entry_checksum) in entries: f.write(name) for (name, offset, entry_checksum) in entries: f.write(struct.pack('>L', entry_checksum)) for (name, offset, entry_checksum) in entries: if offset < 2**31: f.write(struct.pack('>L', offset)) else: f.write(struct.pack('>L', 2**31 + len(largetable))) largetable.append(offset) for offset in largetable: f.write(struct.pack('>Q', offset)) assert len(pack_checksum) == 20 f.write(pack_checksum) return f.write_sha() class Pack(object): """A Git pack object.""" def __init__(self, basename, resolve_ext_ref=None): self._basename = basename self._data = None self._idx = None self._idx_path = self._basename + '.idx' self._data_path = self._basename + '.pack' self._data_load = lambda: PackData(self._data_path) self._idx_load = lambda: load_pack_index(self._idx_path) self.resolve_ext_ref = resolve_ext_ref @classmethod def from_lazy_objects(self, data_fn, idx_fn): """Create a new pack object from callables to load pack data and index objects.""" ret = Pack('') ret._data_load = data_fn ret._idx_load = idx_fn return ret @classmethod def from_objects(self, data, idx): """Create a new pack object from pack data and index objects.""" ret = Pack('') ret._data_load = lambda: data ret._idx_load = lambda: idx return ret def name(self): """The SHA over the SHAs of the objects in this pack.""" return self.index.objects_sha1() @property def data(self): """The pack data object being used.""" if self._data is None: self._data = self._data_load() self._data.pack = self self.check_length_and_checksum() return self._data @property def index(self): """The index being used. :note: This may be an in-memory index """ if self._idx is None: self._idx = self._idx_load() return self._idx def close(self): if self._data is not None: self._data.close() if self._idx is not None: self._idx.close() def __eq__(self, other): return type(self) == type(other) and self.index == other.index def __len__(self): """Number of entries in this pack.""" return len(self.index) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self._basename) def __iter__(self): """Iterate over all the sha1s of the objects in this pack.""" return iter(self.index) def check_length_and_checksum(self): """Sanity check the length and checksum of the pack index and data.""" assert len(self.index) == len(self.data) idx_stored_checksum = self.index.get_pack_checksum() data_stored_checksum = self.data.get_stored_checksum() if idx_stored_checksum != data_stored_checksum: raise ChecksumMismatch(sha_to_hex(idx_stored_checksum), sha_to_hex(data_stored_checksum)) def check(self): """Check the integrity of this pack. :raise ChecksumMismatch: if a checksum for the index or data is wrong """ self.index.check() self.data.check() for obj in self.iterobjects(): obj.check() # TODO: object connectivity checks def get_stored_checksum(self): return self.data.get_stored_checksum() def __contains__(self, sha1): """Check whether this pack contains a particular SHA1.""" try: self.index.object_index(sha1) return True except KeyError: return False def get_raw(self, sha1): offset = self.index.object_index(sha1) obj_type, obj = self.data.get_object_at(offset) type_num, chunks = self.data.resolve_object(offset, obj_type, obj) return type_num, ''.join(chunks) def __getitem__(self, sha1): """Retrieve the specified SHA1.""" type, uncomp = self.get_raw(sha1) return ShaFile.from_raw_string(type, uncomp, sha=sha1) def iterobjects(self): """Iterate over the objects in this pack.""" return iter(PackInflater.for_pack_data( self.data, resolve_ext_ref=self.resolve_ext_ref)) def pack_tuples(self): """Provide an iterable for use with write_pack_objects. :return: Object that can iterate over (object, path) tuples and provides __len__ """ class PackTupleIterable(object): def __init__(self, pack): self.pack = pack def __len__(self): return len(self.pack) def __iter__(self): return ((o, None) for o in self.pack.iterobjects()) return PackTupleIterable(self) def keep(self, msg=None): """Add a .keep file for the pack, preventing git from garbage collecting it. :param msg: A message written inside the .keep file; can be used later to determine whether or not a .keep file is obsolete. :return: The path of the .keep file, as a string. """ keepfile_name = '%s.keep' % self._basename keepfile = GitFile(keepfile_name, 'wb') try: if msg: keepfile.write(msg) keepfile.write('\n') finally: keepfile.close() return keepfile_name try: from dulwich._pack import apply_delta, bisect_find_sha except ImportError: pass diff --git a/dulwich/server.py b/dulwich/server.py index fed451f2..b6d61556 100644 --- a/dulwich/server.py +++ b/dulwich/server.py @@ -1,939 +1,939 @@ # server.py -- Implementation of the server side git protocols # Copyright (C) 2008 John Carr # Coprygith (C) 2011-2012 Jelmer Vernooij # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 # or (at your option) any later version of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """Git smart network protocol server implementation. For more detailed implementation on the network protocol, see the Documentation/technical directory in the cgit distribution, and in particular: * Documentation/technical/protocol-capabilities.txt * Documentation/technical/pack-protocol.txt Currently supported capabilities: * include-tag * thin-pack * multi_ack_detailed * multi_ack * side-band-64k * ofs-delta * no-progress * report-status * delete-refs Known capabilities that are not supported: * shallow (http://pad.lv/909524) """ import collections import os import socket import SocketServer import sys import zlib from dulwich.errors import ( ApplyDeltaError, ChecksumMismatch, GitProtocolError, NotGitRepository, UnexpectedCommandError, ObjectFormatException, ) from dulwich import log_utils from dulwich.objects import ( hex_to_sha, Commit, ) from dulwich.pack import ( write_pack_objects, ) from dulwich.protocol import ( BufferedPktLineWriter, MULTI_ACK, MULTI_ACK_DETAILED, Protocol, ProtocolFile, ReceivableProtocol, SINGLE_ACK, TCP_GIT_PORT, ZERO_SHA, ack_type, extract_capabilities, extract_want_line_capabilities, ) from dulwich.refs import ( write_info_refs, ) from dulwich.repo import ( Repo, ) logger = log_utils.getLogger(__name__) class Backend(object): """A backend for the Git smart server implementation.""" def open_repository(self, path): """Open the repository at a path. :param path: Path to the repository :raise NotGitRepository: no git repository was found at path :return: Instance of BackendRepo """ raise NotImplementedError(self.open_repository) class BackendRepo(object): """Repository abstraction used by the Git server. The methods required here are a subset of those provided by dulwich.repo.Repo. """ object_store = None refs = None def get_refs(self): """ Get all the refs in the repository :return: dict of name -> sha """ raise NotImplementedError def get_peeled(self, name): """Return the cached peeled value of a ref, if available. :param name: Name of the ref to peel :return: The peeled value of the ref. If the ref is known not point to a tag, this will be the SHA the ref refers to. If no cached information about a tag is available, this method may return None, but it should attempt to peel the tag if possible. """ return None def fetch_objects(self, determine_wants, graph_walker, progress, get_tagged=None): """ Yield the objects required for a list of commits. :param progress: is a callback to send progress messages to the client :param get_tagged: Function that returns a dict of pointed-to sha -> tag sha for including tags. """ raise NotImplementedError class DictBackend(Backend): """Trivial backend that looks up Git repositories in a dictionary.""" def __init__(self, repos): self.repos = repos def open_repository(self, path): logger.debug('Opening repository at %s', path) try: return self.repos[path] except KeyError: raise NotGitRepository( "No git repository was found at %(path)s" % dict(path=path) ) class FileSystemBackend(Backend): """Simple backend that looks up Git repositories in the local file system.""" def open_repository(self, path): logger.debug('opening repository at %s', path) return Repo(path) class Handler(object): """Smart protocol command handler base class.""" def __init__(self, backend, proto, http_req=None): self.backend = backend self.proto = proto self.http_req = http_req self._client_capabilities = None @classmethod def capability_line(cls): return " ".join(cls.capabilities()) @classmethod def capabilities(cls): raise NotImplementedError(cls.capabilities) @classmethod def innocuous_capabilities(cls): return ("include-tag", "thin-pack", "no-progress", "ofs-delta") @classmethod def required_capabilities(cls): """Return a list of capabilities that we require the client to have.""" return [] def set_client_capabilities(self, caps): allowable_caps = set(self.innocuous_capabilities()) allowable_caps.update(self.capabilities()) for cap in caps: if cap not in allowable_caps: raise GitProtocolError('Client asked for capability %s that ' 'was not advertised.' % cap) for cap in self.required_capabilities(): if cap not in caps: raise GitProtocolError('Client does not support required ' 'capability %s.' % cap) self._client_capabilities = set(caps) logger.info('Client capabilities: %s', caps) def has_capability(self, cap): if self._client_capabilities is None: raise GitProtocolError('Server attempted to access capability %s ' 'before asking client' % cap) return cap in self._client_capabilities class UploadPackHandler(Handler): """Protocol handler for uploading a pack to the server.""" def __init__(self, backend, args, proto, http_req=None, advertise_refs=False): Handler.__init__(self, backend, proto, http_req=http_req) self.repo = backend.open_repository(args[0]) self._graph_walker = None self.advertise_refs = advertise_refs @classmethod def capabilities(cls): return ("multi_ack_detailed", "multi_ack", "side-band-64k", "thin-pack", "ofs-delta", "no-progress", "include-tag", "shallow") @classmethod def required_capabilities(cls): return ("side-band-64k", "thin-pack", "ofs-delta") def progress(self, message): if self.has_capability("no-progress"): return self.proto.write_sideband(2, message) def get_tagged(self, refs=None, repo=None): """Get a dict of peeled values of tags to their original tag shas. :param refs: dict of refname -> sha of possible tags; defaults to all of the backend's refs. :param repo: optional Repo instance for getting peeled refs; defaults to the backend's repo, if available :return: dict of peeled_sha -> tag_sha, where tag_sha is the sha of a tag whose peeled value is peeled_sha. """ if not self.has_capability("include-tag"): return {} if refs is None: refs = self.repo.get_refs() if repo is None: repo = getattr(self.repo, "repo", None) if repo is None: # Bail if we don't have a Repo available; this is ok since # clients must be able to handle if the server doesn't include # all relevant tags. # TODO: fix behavior when missing return {} tagged = {} for name, sha in refs.iteritems(): peeled_sha = repo.get_peeled(name) if peeled_sha != sha: tagged[peeled_sha] = sha return tagged def handle(self): write = lambda x: self.proto.write_sideband(1, x) graph_walker = ProtocolGraphWalker(self, self.repo.object_store, self.repo.get_peeled) objects_iter = self.repo.fetch_objects( graph_walker.determine_wants, graph_walker, self.progress, get_tagged=self.get_tagged) # Did the process short-circuit (e.g. in a stateless RPC call)? Note # that the client still expects a 0-object pack in most cases. if len(objects_iter) == 0: return self.progress("dul-daemon says what\n") self.progress("counting objects: %d, done.\n" % len(objects_iter)) write_pack_objects(ProtocolFile(None, write), objects_iter) self.progress("how was that, then?\n") # we are done self.proto.write("0000") def _split_proto_line(line, allowed): """Split a line read from the wire. :param line: The line read from the wire. :param allowed: An iterable of command names that should be allowed. Command names not listed below as possible return values will be ignored. If None, any commands from the possible return values are allowed. :return: a tuple having one of the following forms: ('want', obj_id) ('have', obj_id) ('done', None) (None, None) (for a flush-pkt) :raise UnexpectedCommandError: if the line cannot be parsed into one of the allowed return values. """ if not line: fields = [None] else: fields = line.rstrip('\n').split(' ', 1) command = fields[0] if allowed is not None and command not in allowed: raise UnexpectedCommandError(command) try: if len(fields) == 1 and command in ('done', None): return (command, None) elif len(fields) == 2: if command in ('want', 'have', 'shallow', 'unshallow'): hex_to_sha(fields[1]) return tuple(fields) elif command == 'deepen': return command, int(fields[1]) except (TypeError, AssertionError), e: raise GitProtocolError(e) raise GitProtocolError('Received invalid line from client: %s' % line) def _find_shallow(store, heads, depth): """Find shallow commits according to a given depth. :param store: An ObjectStore for looking up objects. :param heads: Iterable of head SHAs to start walking from. :param depth: The depth of ancestors to include. :return: A tuple of (shallow, not_shallow), sets of SHAs that should be considered shallow and unshallow according to the arguments. Note that these sets may overlap if a commit is reachable along multiple paths. """ parents = {} def get_parents(sha): result = parents.get(sha, None) if not result: result = store[sha].parents parents[sha] = result return result todo = [] # stack of (sha, depth) for head_sha in heads: obj = store.peel_sha(head_sha) if isinstance(obj, Commit): todo.append((obj.id, 0)) not_shallow = set() shallow = set() while todo: sha, cur_depth = todo.pop() if cur_depth < depth: not_shallow.add(sha) new_depth = cur_depth + 1 todo.extend((p, new_depth) for p in get_parents(sha)) else: shallow.add(sha) return shallow, not_shallow class ProtocolGraphWalker(object): """A graph walker that knows the git protocol. As a graph walker, this class implements ack(), next(), and reset(). It also contains some base methods for interacting with the wire and walking the commit tree. The work of determining which acks to send is passed on to the implementation instance stored in _impl. The reason for this is that we do not know at object creation time what ack level the protocol requires. A call to set_ack_level() is required to set up the implementation, before any calls to next() or ack() are made. """ def __init__(self, handler, object_store, get_peeled): self.handler = handler self.store = object_store self.get_peeled = get_peeled self.proto = handler.proto self.http_req = handler.http_req self.advertise_refs = handler.advertise_refs self._wants = [] self.shallow = set() self.client_shallow = set() self.unshallow = set() self._cached = False self._cache = [] self._cache_index = 0 self._impl = None def determine_wants(self, heads): """Determine the wants for a set of heads. The given heads are advertised to the client, who then specifies which refs he wants using 'want' lines. This portion of the protocol is the same regardless of ack type, and in fact is used to set the ack type of the ProtocolGraphWalker. If the client has the 'shallow' capability, this method also reads and responds to the 'shallow' and 'deepen' lines from the client. These are not part of the wants per se, but they set up necessary state for walking the graph. Additionally, later code depends on this method consuming everything up to the first 'have' line. :param heads: a dict of refname->SHA1 to advertise :return: a list of SHA1s requested by the client """ if not heads: # The repo is empty, so short-circuit the whole process. self.proto.write_pkt_line(None) return [] values = set(heads.itervalues()) if self.advertise_refs or not self.http_req: for i, (ref, sha) in enumerate(sorted(heads.iteritems())): line = "%s %s" % (sha, ref) if not i: line = "%s\x00%s" % (line, self.handler.capability_line()) self.proto.write_pkt_line("%s\n" % line) peeled_sha = self.get_peeled(ref) if peeled_sha != sha: self.proto.write_pkt_line('%s %s^{}\n' % (peeled_sha, ref)) # i'm done.. self.proto.write_pkt_line(None) if self.advertise_refs: return [] # Now client will sending want want want commands want = self.proto.read_pkt_line() if not want: return [] line, caps = extract_want_line_capabilities(want) self.handler.set_client_capabilities(caps) self.set_ack_type(ack_type(caps)) allowed = ('want', 'shallow', 'deepen', None) command, sha = _split_proto_line(line, allowed) want_revs = [] while command == 'want': if sha not in values: raise GitProtocolError( 'Client wants invalid object %s' % sha) want_revs.append(sha) command, sha = self.read_proto_line(allowed) self.set_wants(want_revs) if command in ('shallow', 'deepen'): self.unread_proto_line(command, sha) self._handle_shallow_request(want_revs) if self.http_req and self.proto.eof(): # The client may close the socket at this point, expecting a # flush-pkt from the server. We might be ready to send a packfile at # this point, so we need to explicitly short-circuit in this case. return [] return want_revs def unread_proto_line(self, command, value): self.proto.unread_pkt_line('%s %s' % (command, value)) def ack(self, have_ref): return self._impl.ack(have_ref) def reset(self): self._cached = True self._cache_index = 0 def next(self): if not self._cached: if not self._impl and self.http_req: return None return self._impl.next() self._cache_index += 1 if self._cache_index > len(self._cache): return None return self._cache[self._cache_index] def read_proto_line(self, allowed): """Read a line from the wire. :param allowed: An iterable of command names that should be allowed. :return: A tuple of (command, value); see _split_proto_line. :raise UnexpectedCommandError: If an error occurred reading the line. """ return _split_proto_line(self.proto.read_pkt_line(), allowed) def _handle_shallow_request(self, wants): while True: command, val = self.read_proto_line(('deepen', 'shallow')) if command == 'deepen': depth = val break self.client_shallow.add(val) self.read_proto_line((None,)) # consume client's flush-pkt shallow, not_shallow = _find_shallow(self.store, wants, depth) # Update self.shallow instead of reassigning it since we passed a # reference to it before this method was called. self.shallow.update(shallow - not_shallow) new_shallow = self.shallow - self.client_shallow unshallow = self.unshallow = not_shallow & self.client_shallow for sha in sorted(new_shallow): self.proto.write_pkt_line('shallow %s' % sha) for sha in sorted(unshallow): self.proto.write_pkt_line('unshallow %s' % sha) self.proto.write_pkt_line(None) def send_ack(self, sha, ack_type=''): if ack_type: ack_type = ' %s' % ack_type self.proto.write_pkt_line('ACK %s%s\n' % (sha, ack_type)) def send_nak(self): self.proto.write_pkt_line('NAK\n') def set_wants(self, wants): self._wants = wants def _is_satisfied(self, haves, want, earliest): """Check whether a want is satisfied by a set of haves. A want, typically a branch tip, is "satisfied" only if there exists a path back from that want to one of the haves. :param haves: A set of commits we know the client has. :param want: The want to check satisfaction for. :param earliest: A timestamp beyond which the search for haves will be terminated, presumably because we're searching too far down the wrong branch. """ o = self.store[want] pending = collections.deque([o]) while pending: commit = pending.popleft() if commit.id in haves: return True if commit.type_name != "commit": # non-commit wants are assumed to be satisfied continue for parent in commit.parents: parent_obj = self.store[parent] # TODO: handle parents with later commit times than children if parent_obj.commit_time >= earliest: pending.append(parent_obj) return False def all_wants_satisfied(self, haves): """Check whether all the current wants are satisfied by a set of haves. :param haves: A set of commits we know the client has. :note: Wants are specified with set_wants rather than passed in since in the current interface they are determined outside this class. """ haves = set(haves) earliest = min([self.store[h].commit_time for h in haves]) for want in self._wants: if not self._is_satisfied(haves, want, earliest): return False return True def set_ack_type(self, ack_type): impl_classes = { MULTI_ACK: MultiAckGraphWalkerImpl, MULTI_ACK_DETAILED: MultiAckDetailedGraphWalkerImpl, SINGLE_ACK: SingleAckGraphWalkerImpl, } self._impl = impl_classes[ack_type](self) _GRAPH_WALKER_COMMANDS = ('have', 'done', None) class SingleAckGraphWalkerImpl(object): """Graph walker implementation that speaks the single-ack protocol.""" def __init__(self, walker): self.walker = walker self._sent_ack = False def ack(self, have_ref): if not self._sent_ack: self.walker.send_ack(have_ref) self._sent_ack = True def next(self): command, sha = self.walker.read_proto_line(_GRAPH_WALKER_COMMANDS) if command in (None, 'done'): if not self._sent_ack: self.walker.send_nak() return None elif command == 'have': return sha class MultiAckGraphWalkerImpl(object): """Graph walker implementation that speaks the multi-ack protocol.""" def __init__(self, walker): self.walker = walker self._found_base = False self._common = [] def ack(self, have_ref): self._common.append(have_ref) if not self._found_base: self.walker.send_ack(have_ref, 'continue') if self.walker.all_wants_satisfied(self._common): self._found_base = True # else we blind ack within next def next(self): while True: command, sha = self.walker.read_proto_line(_GRAPH_WALKER_COMMANDS) if command is None: self.walker.send_nak() # in multi-ack mode, a flush-pkt indicates the client wants to # flush but more have lines are still coming continue elif command == 'done': # don't nak unless no common commits were found, even if not # everything is satisfied if self._common: self.walker.send_ack(self._common[-1]) else: self.walker.send_nak() return None elif command == 'have': if self._found_base: # blind ack self.walker.send_ack(sha, 'continue') return sha class MultiAckDetailedGraphWalkerImpl(object): """Graph walker implementation speaking the multi-ack-detailed protocol.""" def __init__(self, walker): self.walker = walker self._found_base = False self._common = [] def ack(self, have_ref): self._common.append(have_ref) if not self._found_base: self.walker.send_ack(have_ref, 'common') if self.walker.all_wants_satisfied(self._common): self._found_base = True self.walker.send_ack(have_ref, 'ready') # else we blind ack within next def next(self): while True: command, sha = self.walker.read_proto_line(_GRAPH_WALKER_COMMANDS) if command is None: self.walker.send_nak() if self.walker.http_req: return None continue elif command == 'done': # don't nak unless no common commits were found, even if not # everything is satisfied if self._common: self.walker.send_ack(self._common[-1]) else: self.walker.send_nak() return None elif command == 'have': if self._found_base: # blind ack; can happen if the client has more requests # inflight self.walker.send_ack(sha, 'ready') return sha class ReceivePackHandler(Handler): """Protocol handler for downloading a pack from the client.""" def __init__(self, backend, args, proto, http_req=None, advertise_refs=False): Handler.__init__(self, backend, proto, http_req=http_req) self.repo = backend.open_repository(args[0]) self.advertise_refs = advertise_refs @classmethod def capabilities(cls): return ("report-status", "delete-refs", "side-band-64k") def _apply_pack(self, refs): all_exceptions = (IOError, OSError, ChecksumMismatch, ApplyDeltaError, AssertionError, socket.error, zlib.error, ObjectFormatException) status = [] will_send_pack = False for command in refs: if command[1] != ZERO_SHA: will_send_pack = True if will_send_pack: # TODO: more informative error messages than just the exception string try: recv = getattr(self.proto, "recv", None) p = self.repo.object_store.add_thin_pack(self.proto.read, recv) status.append(('unpack', 'ok')) except all_exceptions, e: status.append(('unpack', str(e).replace('\n', ''))) # The pack may still have been moved in, but it may contain broken # objects. We trust a later GC to clean it up. else: # The git protocol want to find a status entry related to unpack process # even if no pack data has been sent. status.append(('unpack', 'ok')) for oldsha, sha, ref in refs: ref_status = 'ok' try: if sha == ZERO_SHA: if not 'delete-refs' in self.capabilities(): raise GitProtocolError( 'Attempted to delete refs without delete-refs ' 'capability.') try: del self.repo.refs[ref] except all_exceptions: ref_status = 'failed to delete' else: try: self.repo.refs[ref] = sha except all_exceptions: ref_status = 'failed to write' except KeyError, e: ref_status = 'bad ref' status.append((ref, ref_status)) return status def _report_status(self, status): if self.has_capability('side-band-64k'): writer = BufferedPktLineWriter( lambda d: self.proto.write_sideband(1, d)) write = writer.write def flush(): writer.flush() self.proto.write_pkt_line(None) else: write = self.proto.write_pkt_line flush = lambda: None for name, msg in status: if name == 'unpack': write('unpack %s\n' % msg) elif msg == 'ok': write('ok %s\n' % name) else: write('ng %s %s\n' % (name, msg)) write(None) flush() def handle(self): refs = sorted(self.repo.get_refs().iteritems()) if self.advertise_refs or not self.http_req: if refs: self.proto.write_pkt_line( "%s %s\x00%s\n" % (refs[0][1], refs[0][0], self.capability_line())) for i in range(1, len(refs)): ref = refs[i] self.proto.write_pkt_line("%s %s\n" % (ref[1], ref[0])) else: self.proto.write_pkt_line("%s capabilities^{}\0%s" % ( ZERO_SHA, self.capability_line())) self.proto.write("0000") if self.advertise_refs: return client_refs = [] ref = self.proto.read_pkt_line() # if ref is none then client doesnt want to send us anything.. if ref is None: return ref, caps = extract_capabilities(ref) self.set_client_capabilities(caps) # client will now send us a list of (oldsha, newsha, ref) while ref: client_refs.append(ref.split()) ref = self.proto.read_pkt_line() # backend can now deal with this refs and read a pack using self.read status = self._apply_pack(client_refs) # when we have read all the pack from the client, send a status report # if the client asked for it if self.has_capability('report-status'): self._report_status(status) # Default handler classes for git services. DEFAULT_HANDLERS = { 'git-upload-pack': UploadPackHandler, 'git-receive-pack': ReceivePackHandler, } class TCPGitRequestHandler(SocketServer.StreamRequestHandler): def __init__(self, handlers, *args, **kwargs): self.handlers = handlers SocketServer.StreamRequestHandler.__init__(self, *args, **kwargs) def handle(self): proto = ReceivableProtocol(self.connection.recv, self.wfile.write) command, args = proto.read_cmd() logger.info('Handling %s request, args=%s', command, args) cls = self.handlers.get(command, None) if not callable(cls): raise GitProtocolError('Invalid service %s' % command) h = cls(self.server.backend, args, proto) h.handle() class TCPGitServer(SocketServer.TCPServer): allow_reuse_address = True serve = SocketServer.TCPServer.serve_forever def _make_handler(self, *args, **kwargs): return TCPGitRequestHandler(self.handlers, *args, **kwargs) def __init__(self, backend, listen_addr, port=TCP_GIT_PORT, handlers=None): self.handlers = dict(DEFAULT_HANDLERS) if handlers is not None: self.handlers.update(handlers) self.backend = backend logger.info('Listening for TCP connections on %s:%d', listen_addr, port) SocketServer.TCPServer.__init__(self, (listen_addr, port), self._make_handler) def verify_request(self, request, client_address): logger.info('Handling request from %s', client_address) return True def handle_error(self, request, client_address): logger.exception('Exception happened during processing of request ' 'from %s', client_address) def main(argv=sys.argv): """Entry point for starting a TCP git server.""" import optparse parser = optparse.OptionParser() parser.add_option("-b", "--backend", dest="backend", help="Select backend to use.", choices=["file"], default="file") options, args = parser.parse_args(argv) log_utils.default_logging_config() if options.backend == "file": if len(argv) > 1: gitdir = args[1] else: gitdir = '.' backend = DictBackend({'/': Repo(gitdir)}) else: raise Exception("No such backend %s." % backend) server = TCPGitServer(backend, 'localhost') server.serve_forever() def serve_command(handler_cls, argv=sys.argv, backend=None, inf=sys.stdin, outf=sys.stdout): """Serve a single command. This is mostly useful for the implementation of commands used by e.g. git+ssh. :param handler_cls: `Handler` class to use for the request :param argv: execv-style command-line arguments. Defaults to sys.argv. :param backend: `Backend` to use :param inf: File-like object to read from, defaults to standard input. :param outf: File-like object to write to, defaults to standard output. :return: Exit code for use with sys.exit. 0 on success, 1 on failure. """ if backend is None: backend = FileSystemBackend() def send_fn(data): outf.write(data) outf.flush() proto = Protocol(inf.read, send_fn) handler = handler_cls(backend, argv[1:], proto) # FIXME: Catch exceptions and write a single-line summary to outf. handler.handle() return 0 def generate_info_refs(repo): """Generate an info refs file.""" refs = repo.get_refs() return write_info_refs(repo.get_refs(), repo.object_store) def generate_objects_info_packs(repo): """Generate an index for for packs.""" for pack in repo.object_store.packs: - yield 'P pack-%s.pack\n' % pack.name() + yield 'P %s\n' % pack.data.filename def update_server_info(repo): """Generate server info for dumb file access. This generates info/refs and objects/info/packs, similar to "git update-server-info". """ repo._put_named_file(os.path.join('info', 'refs'), "".join(generate_info_refs(repo))) repo._put_named_file(os.path.join('objects', 'info', 'packs'), "".join(generate_objects_info_packs(repo))) if __name__ == '__main__': main() diff --git a/dulwich/tests/test_web.py b/dulwich/tests/test_web.py index 105f508a..ae46665f 100644 --- a/dulwich/tests/test_web.py +++ b/dulwich/tests/test_web.py @@ -1,488 +1,490 @@ # test_web.py -- Tests for the git HTTP server # Copyright (C) 2010 Google, Inc. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 # or (at your option) any later version of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """Tests for the Git HTTP server.""" from cStringIO import StringIO import gzip import re import os from dulwich.object_store import ( MemoryObjectStore, ) from dulwich.objects import ( Blob, Tag, ) from dulwich.repo import ( BaseRepo, MemoryRepo, ) from dulwich.server import ( DictBackend, ) from dulwich.tests import ( TestCase, ) from dulwich.web import ( HTTP_OK, HTTP_NOT_FOUND, HTTP_FORBIDDEN, HTTP_ERROR, GunzipFilter, send_file, get_text_file, get_loose_object, get_pack_file, get_idx_file, get_info_refs, get_info_packs, handle_service_request, _LengthLimitedFile, HTTPGitRequest, HTTPGitApplication, ) from dulwich.tests.utils import ( make_object, ) class TestHTTPGitRequest(HTTPGitRequest): """HTTPGitRequest with overridden methods to help test caching.""" def __init__(self, *args, **kwargs): HTTPGitRequest.__init__(self, *args, **kwargs) self.cached = None def nocache(self): self.cached = False def cache_forever(self): self.cached = True class WebTestCase(TestCase): """Base TestCase with useful instance vars and utility functions.""" _req_class = TestHTTPGitRequest def setUp(self): super(WebTestCase, self).setUp() self._environ = {} self._req = self._req_class(self._environ, self._start_response, handlers=self._handlers()) self._status = None self._headers = [] self._output = StringIO() def _start_response(self, status, headers): self._status = status self._headers = list(headers) return self._output.write def _handlers(self): return None def assertContentTypeEquals(self, expected): self.assertTrue(('Content-Type', expected) in self._headers) def _test_backend(objects, refs=None, named_files=None): if not refs: refs = {} if not named_files: named_files = {} repo = MemoryRepo.init_bare(objects, refs) for path, contents in named_files.iteritems(): repo._put_named_file(path, contents) return DictBackend({'/': repo}) class DumbHandlersTestCase(WebTestCase): def test_send_file_not_found(self): list(send_file(self._req, None, 'text/plain')) self.assertEqual(HTTP_NOT_FOUND, self._status) def test_send_file(self): f = StringIO('foobar') output = ''.join(send_file(self._req, f, 'some/thing')) self.assertEqual('foobar', output) self.assertEqual(HTTP_OK, self._status) self.assertContentTypeEquals('some/thing') self.assertTrue(f.closed) def test_send_file_buffered(self): bufsize = 10240 xs = 'x' * bufsize f = StringIO(2 * xs) self.assertEqual([xs, xs], list(send_file(self._req, f, 'some/thing'))) self.assertEqual(HTTP_OK, self._status) self.assertContentTypeEquals('some/thing') self.assertTrue(f.closed) def test_send_file_error(self): class TestFile(object): def __init__(self, exc_class): self.closed = False self._exc_class = exc_class def read(self, size=-1): raise self._exc_class() def close(self): self.closed = True f = TestFile(IOError) list(send_file(self._req, f, 'some/thing')) self.assertEqual(HTTP_ERROR, self._status) self.assertTrue(f.closed) self.assertFalse(self._req.cached) # non-IOErrors are reraised f = TestFile(AttributeError) self.assertRaises(AttributeError, list, send_file(self._req, f, 'some/thing')) self.assertTrue(f.closed) self.assertFalse(self._req.cached) def test_get_text_file(self): backend = _test_backend([], named_files={'description': 'foo'}) mat = re.search('.*', 'description') output = ''.join(get_text_file(self._req, backend, mat)) self.assertEqual('foo', output) self.assertEqual(HTTP_OK, self._status) self.assertContentTypeEquals('text/plain') self.assertFalse(self._req.cached) def test_get_loose_object(self): blob = make_object(Blob, data='foo') backend = _test_backend([blob]) mat = re.search('^(..)(.{38})$', blob.id) output = ''.join(get_loose_object(self._req, backend, mat)) self.assertEqual(blob.as_legacy_object(), output) self.assertEqual(HTTP_OK, self._status) self.assertContentTypeEquals('application/x-git-loose-object') self.assertTrue(self._req.cached) def test_get_loose_object_missing(self): mat = re.search('^(..)(.{38})$', '1' * 40) list(get_loose_object(self._req, _test_backend([]), mat)) self.assertEqual(HTTP_NOT_FOUND, self._status) def test_get_loose_object_error(self): blob = make_object(Blob, data='foo') backend = _test_backend([blob]) mat = re.search('^(..)(.{38})$', blob.id) def as_legacy_object_error(): raise IOError blob.as_legacy_object = as_legacy_object_error list(get_loose_object(self._req, backend, mat)) self.assertEqual(HTTP_ERROR, self._status) def test_get_pack_file(self): pack_name = os.path.join('objects', 'pack', 'pack-%s.pack' % ('1' * 40)) backend = _test_backend([], named_files={pack_name: 'pack contents'}) mat = re.search('.*', pack_name) output = ''.join(get_pack_file(self._req, backend, mat)) self.assertEqual('pack contents', output) self.assertEqual(HTTP_OK, self._status) self.assertContentTypeEquals('application/x-git-packed-objects') self.assertTrue(self._req.cached) def test_get_idx_file(self): idx_name = os.path.join('objects', 'pack', 'pack-%s.idx' % ('1' * 40)) backend = _test_backend([], named_files={idx_name: 'idx contents'}) mat = re.search('.*', idx_name) output = ''.join(get_idx_file(self._req, backend, mat)) self.assertEqual('idx contents', output) self.assertEqual(HTTP_OK, self._status) self.assertContentTypeEquals('application/x-git-packed-objects-toc') self.assertTrue(self._req.cached) def test_get_info_refs(self): self._environ['QUERY_STRING'] = '' blob1 = make_object(Blob, data='1') blob2 = make_object(Blob, data='2') blob3 = make_object(Blob, data='3') tag1 = make_object(Tag, name='tag-tag', tagger='Test ', tag_time=12345, tag_timezone=0, message='message', object=(Blob, blob2.id)) objects = [blob1, blob2, blob3, tag1] refs = { 'HEAD': '000', 'refs/heads/master': blob1.id, 'refs/tags/tag-tag': tag1.id, 'refs/tags/blob-tag': blob3.id, } backend = _test_backend(objects, refs=refs) mat = re.search('.*', '//info/refs') self.assertEqual(['%s\trefs/heads/master\n' % blob1.id, '%s\trefs/tags/blob-tag\n' % blob3.id, '%s\trefs/tags/tag-tag\n' % tag1.id, '%s\trefs/tags/tag-tag^{}\n' % blob2.id], list(get_info_refs(self._req, backend, mat))) self.assertEqual(HTTP_OK, self._status) self.assertContentTypeEquals('text/plain') self.assertFalse(self._req.cached) def test_get_info_packs(self): - class TestPack(object): + class TestPackData(object): + def __init__(self, sha): - self._sha = sha + self.filename = "pack-%s.pack" % sha - def name(self): - return self._sha + class TestPack(object): + def __init__(self, sha): + self.data = TestPackData(sha) packs = [TestPack(str(i) * 40) for i in xrange(1, 4)] class TestObjectStore(MemoryObjectStore): # property must be overridden, can't be assigned @property def packs(self): return packs store = TestObjectStore() repo = BaseRepo(store, None) backend = DictBackend({'/': repo}) mat = re.search('.*', '//info/packs') output = ''.join(get_info_packs(self._req, backend, mat)) expected = 'P pack-%s.pack\n' * 3 expected %= ('1' * 40, '2' * 40, '3' * 40) self.assertEqual(expected, output) self.assertEqual(HTTP_OK, self._status) self.assertContentTypeEquals('text/plain') self.assertFalse(self._req.cached) class SmartHandlersTestCase(WebTestCase): class _TestUploadPackHandler(object): def __init__(self, backend, args, proto, http_req=None, advertise_refs=False): self.args = args self.proto = proto self.http_req = http_req self.advertise_refs = advertise_refs def handle(self): self.proto.write('handled input: %s' % self.proto.recv(1024)) def _make_handler(self, *args, **kwargs): self._handler = self._TestUploadPackHandler(*args, **kwargs) return self._handler def _handlers(self): return {'git-upload-pack': self._make_handler} def test_handle_service_request_unknown(self): mat = re.search('.*', '/git-evil-handler') list(handle_service_request(self._req, 'backend', mat)) self.assertEqual(HTTP_FORBIDDEN, self._status) self.assertFalse(self._req.cached) def _run_handle_service_request(self, content_length=None): self._environ['wsgi.input'] = StringIO('foo') if content_length is not None: self._environ['CONTENT_LENGTH'] = content_length mat = re.search('.*', '/git-upload-pack') handler_output = ''.join( handle_service_request(self._req, 'backend', mat)) write_output = self._output.getvalue() # Ensure all output was written via the write callback. self.assertEqual('', handler_output) self.assertEqual('handled input: foo', write_output) self.assertContentTypeEquals('application/x-git-upload-pack-result') self.assertFalse(self._handler.advertise_refs) self.assertTrue(self._handler.http_req) self.assertFalse(self._req.cached) def test_handle_service_request(self): self._run_handle_service_request() def test_handle_service_request_with_length(self): self._run_handle_service_request(content_length='3') def test_handle_service_request_empty_length(self): self._run_handle_service_request(content_length='') def test_get_info_refs_unknown(self): self._environ['QUERY_STRING'] = 'service=git-evil-handler' list(get_info_refs(self._req, 'backend', None)) self.assertEqual(HTTP_FORBIDDEN, self._status) self.assertFalse(self._req.cached) def test_get_info_refs(self): self._environ['wsgi.input'] = StringIO('foo') self._environ['QUERY_STRING'] = 'service=git-upload-pack' mat = re.search('.*', '/git-upload-pack') handler_output = ''.join(get_info_refs(self._req, 'backend', mat)) write_output = self._output.getvalue() self.assertEqual(('001e# service=git-upload-pack\n' '0000' # input is ignored by the handler 'handled input: '), write_output) # Ensure all output was written via the write callback. self.assertEqual('', handler_output) self.assertTrue(self._handler.advertise_refs) self.assertTrue(self._handler.http_req) self.assertFalse(self._req.cached) class LengthLimitedFileTestCase(TestCase): def test_no_cutoff(self): f = _LengthLimitedFile(StringIO('foobar'), 1024) self.assertEqual('foobar', f.read()) def test_cutoff(self): f = _LengthLimitedFile(StringIO('foobar'), 3) self.assertEqual('foo', f.read()) self.assertEqual('', f.read()) def test_multiple_reads(self): f = _LengthLimitedFile(StringIO('foobar'), 3) self.assertEqual('fo', f.read(2)) self.assertEqual('o', f.read(2)) self.assertEqual('', f.read()) class HTTPGitRequestTestCase(WebTestCase): # This class tests the contents of the actual cache headers _req_class = HTTPGitRequest def test_not_found(self): self._req.cache_forever() # cache headers should be discarded message = 'Something not found' self.assertEqual(message, self._req.not_found(message)) self.assertEqual(HTTP_NOT_FOUND, self._status) self.assertEqual(set([('Content-Type', 'text/plain')]), set(self._headers)) def test_forbidden(self): self._req.cache_forever() # cache headers should be discarded message = 'Something not found' self.assertEqual(message, self._req.forbidden(message)) self.assertEqual(HTTP_FORBIDDEN, self._status) self.assertEqual(set([('Content-Type', 'text/plain')]), set(self._headers)) def test_respond_ok(self): self._req.respond() self.assertEqual([], self._headers) self.assertEqual(HTTP_OK, self._status) def test_respond(self): self._req.nocache() self._req.respond(status=402, content_type='some/type', headers=[('X-Foo', 'foo'), ('X-Bar', 'bar')]) self.assertEqual(set([ ('X-Foo', 'foo'), ('X-Bar', 'bar'), ('Content-Type', 'some/type'), ('Expires', 'Fri, 01 Jan 1980 00:00:00 GMT'), ('Pragma', 'no-cache'), ('Cache-Control', 'no-cache, max-age=0, must-revalidate'), ]), set(self._headers)) self.assertEqual(402, self._status) class HTTPGitApplicationTestCase(TestCase): def setUp(self): super(HTTPGitApplicationTestCase, self).setUp() self._app = HTTPGitApplication('backend') self._environ = { 'PATH_INFO': '/foo', 'REQUEST_METHOD': 'GET', } def _test_handler(self, req, backend, mat): # tests interface used by all handlers self.assertEqual(self._environ, req.environ) self.assertEqual('backend', backend) self.assertEqual('/foo', mat.group(0)) return 'output' def _add_handler(self, app): req = self._environ['REQUEST_METHOD'] app.services = { (req, re.compile('/foo$')): self._test_handler, } def test_call(self): self._add_handler(self._app) self.assertEqual('output', self._app(self._environ, None)) def test_fallback_app(self): def test_app(environ, start_response): return 'output' app = HTTPGitApplication('backend', fallback_app=test_app) self.assertEqual('output', app(self._environ, None)) class GunzipTestCase(HTTPGitApplicationTestCase): """TestCase for testing the GunzipFilter, ensuring the wsgi.input is correctly decompressed and headers are corrected. """ def setUp(self): super(GunzipTestCase, self).setUp() self._app = GunzipFilter(self._app) self._environ['HTTP_CONTENT_ENCODING'] = 'gzip' self._environ['REQUEST_METHOD'] = 'POST' def _get_zstream(self, text): zstream = StringIO() zfile = gzip.GzipFile(fileobj=zstream, mode='w') zfile.write(text) zfile.close() return zstream def test_call(self): self._add_handler(self._app.app) orig = self.__class__.__doc__ zstream = self._get_zstream(orig) zlength = zstream.tell() zstream.seek(0) self.assertLess(zlength, len(orig)) self.assertEqual(self._environ['HTTP_CONTENT_ENCODING'], 'gzip') self._environ['CONTENT_LENGTH'] = zlength self._environ['wsgi.input'] = zstream app_output = self._app(self._environ, None) buf = self._environ['wsgi.input'] self.assertIsNot(buf, zstream) buf.seek(0) self.assertEqual(orig, buf.read()) self.assertIs(None, self._environ.get('CONTENT_LENGTH')) self.assertNotIn('HTTP_CONTENT_ENCODING', self._environ)