Page Menu
Home
Software Heritage
Search
Configure Global Search
Log In
Files
F9343139
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
122 KB
Subscribers
None
View Options
diff --git a/swh/model/hypothesis_strategies.py b/swh/model/hypothesis_strategies.py
index 821343b..21e922e 100644
--- a/swh/model/hypothesis_strategies.py
+++ b/swh/model/hypothesis_strategies.py
@@ -1,461 +1,469 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
from hypothesis import assume
from hypothesis.extra.dateutil import timezones
from hypothesis.strategies import (
binary,
booleans,
builds,
characters,
composite,
datetimes,
dictionaries,
from_regex,
integers,
just,
+ lists,
none,
one_of,
sampled_from,
sets,
text,
tuples,
)
from .from_disk import DentryPerms
from .model import (
Person,
Timestamp,
TimestampWithTimezone,
Origin,
OriginVisit,
OriginVisitStatus,
Snapshot,
SnapshotBranch,
ObjectType,
TargetType,
Release,
Revision,
RevisionType,
BaseContent,
Directory,
DirectoryEntry,
Content,
SkippedContent,
)
from .identifiers import snapshot_identifier, identifier_to_bytes
pgsql_alphabet = characters(
blacklist_categories=("Cs",), blacklist_characters=["\u0000"]
) # postgresql does not like these
def optional(strategy):
return one_of(none(), strategy)
def pgsql_text():
return text(alphabet=pgsql_alphabet)
def sha1_git():
return binary(min_size=20, max_size=20)
def sha1():
return binary(min_size=20, max_size=20)
def aware_datetimes():
# datetimes in Software Heritage are not used for software artifacts
# (which may be much older than 2000), but only for objects like scheduler
# task runs, and origin visits, which were created by Software Heritage,
# so at least in 2015.
# We're forbidding old datetimes, because until 1956, many timezones had seconds
# in their "UTC offsets" (see
# <https://en.wikipedia.org/wiki/Time_zone#Worldwide_time_zones>), which is not
# encodable in ISO8601; and we need our datetimes to be ISO8601-encodable in the
# RPC protocol
min_value = datetime.datetime(2000, 1, 1, 0, 0, 0)
return datetimes(min_value=min_value, timezones=timezones())
@composite
def urls(draw):
protocol = draw(sampled_from(["git", "http", "https", "deb"]))
domain = draw(from_regex(r"\A([a-z]([a-z0-9-]*)\.){1,3}[a-z0-9]+\Z"))
return "%s://%s" % (protocol, domain)
@composite
def persons_d(draw):
fullname = draw(binary())
email = draw(optional(binary()))
name = draw(optional(binary()))
assume(not (len(fullname) == 32 and email is None and name is None))
return dict(fullname=fullname, name=name, email=email)
def persons():
return persons_d().map(Person.from_dict)
def timestamps_d():
max_seconds = datetime.datetime.max.replace(
tzinfo=datetime.timezone.utc
).timestamp()
min_seconds = datetime.datetime.min.replace(
tzinfo=datetime.timezone.utc
).timestamp()
return builds(
dict,
seconds=integers(min_seconds, max_seconds),
microseconds=integers(0, 1000000),
)
def timestamps():
return timestamps_d().map(Timestamp.from_dict)
@composite
def timestamps_with_timezone_d(
draw,
timestamp=timestamps_d(),
offset=integers(min_value=-14 * 60, max_value=14 * 60),
negative_utc=booleans(),
):
timestamp = draw(timestamp)
offset = draw(offset)
negative_utc = draw(negative_utc)
assume(not (negative_utc and offset))
return dict(timestamp=timestamp, offset=offset, negative_utc=negative_utc)
timestamps_with_timezone = timestamps_with_timezone_d().map(
TimestampWithTimezone.from_dict
)
def origins_d():
return builds(dict, url=urls())
def origins():
return origins_d().map(Origin.from_dict)
def origin_visits_d():
return builds(
dict,
visit=integers(1, 1000),
origin=urls(),
date=aware_datetimes(),
type=pgsql_text(),
)
def origin_visits():
return origin_visits_d().map(OriginVisit.from_dict)
def metadata_dicts():
return dictionaries(pgsql_text(), pgsql_text())
def origin_visit_statuses_d():
return builds(
dict,
visit=integers(1, 1000),
origin=urls(),
status=sampled_from(["created", "ongoing", "full", "partial"]),
date=aware_datetimes(),
snapshot=optional(sha1_git()),
metadata=optional(metadata_dicts()),
)
def origin_visit_statuses():
return origin_visit_statuses_d().map(OriginVisitStatus.from_dict)
@composite
def releases_d(draw):
target_type = sampled_from([x.value for x in ObjectType])
name = binary()
message = optional(binary())
synthetic = booleans()
target = sha1_git()
metadata = optional(revision_metadata())
return draw(
one_of(
builds(
dict,
name=name,
message=message,
synthetic=synthetic,
author=none(),
date=none(),
target=target,
target_type=target_type,
metadata=metadata,
),
builds(
dict,
name=name,
message=message,
synthetic=synthetic,
date=timestamps_with_timezone_d(),
author=persons_d(),
target=target,
target_type=target_type,
metadata=metadata,
),
)
)
def releases():
return releases_d().map(Release.from_dict)
revision_metadata = metadata_dicts
+def extra_headers():
+ return lists(
+ tuples(binary(min_size=0, max_size=50), binary(min_size=0, max_size=500))
+ ).map(tuple)
+
+
def revisions_d():
return builds(
dict,
message=optional(binary()),
synthetic=booleans(),
author=persons_d(),
committer=persons_d(),
date=timestamps_with_timezone_d(),
committer_date=timestamps_with_timezone_d(),
parents=tuples(sha1_git()),
directory=sha1_git(),
type=sampled_from([x.value for x in RevisionType]),
metadata=optional(revision_metadata()),
+ extra_headers=extra_headers(),
)
# TODO: metadata['extra_headers'] can have binary keys and values
def revisions():
return revisions_d().map(Revision.from_dict)
def directory_entries_d():
return builds(
dict,
name=binary(),
target=sha1_git(),
type=sampled_from(["file", "dir", "rev"]),
perms=sampled_from([perm.value for perm in DentryPerms]),
)
def directory_entries():
return directory_entries_d().map(DirectoryEntry)
def directories_d():
return builds(dict, entries=tuples(directory_entries_d()))
def directories():
return directories_d().map(Directory.from_dict)
def contents_d():
return one_of(present_contents_d(), skipped_contents_d())
def contents():
return one_of(present_contents(), skipped_contents())
def present_contents_d():
return builds(
dict,
data=binary(max_size=4096),
ctime=optional(aware_datetimes()),
status=one_of(just("visible"), just("hidden")),
)
def present_contents():
return present_contents_d().map(lambda d: Content.from_data(**d))
@composite
def skipped_contents_d(draw):
result = BaseContent._hash_data(draw(binary(max_size=4096)))
result.pop("data")
nullify_attrs = draw(
sets(sampled_from(["sha1", "sha1_git", "sha256", "blake2s256"]))
)
for k in nullify_attrs:
result[k] = None
result["reason"] = draw(pgsql_text())
result["status"] = "absent"
result["ctime"] = draw(optional(aware_datetimes()))
return result
def skipped_contents():
return skipped_contents_d().map(SkippedContent.from_dict)
def branch_names():
return binary(min_size=1)
def branch_targets_object_d():
return builds(
dict,
target=sha1_git(),
target_type=sampled_from(
[x.value for x in TargetType if x.value not in ("alias",)]
),
)
def branch_targets_alias_d():
return builds(
dict, target=sha1_git(), target_type=just("alias")
) # TargetType.ALIAS.value))
def branch_targets_d(*, only_objects=False):
if only_objects:
return branch_targets_object_d()
else:
return one_of(branch_targets_alias_d(), branch_targets_object_d())
def branch_targets(*, only_objects=False):
return builds(SnapshotBranch.from_dict, branch_targets_d(only_objects=only_objects))
@composite
def snapshots_d(draw, *, min_size=0, max_size=100, only_objects=False):
branches = draw(
dictionaries(
keys=branch_names(),
values=optional(branch_targets_d(only_objects=only_objects)),
min_size=min_size,
max_size=max_size,
)
)
if not only_objects:
# Make sure aliases point to actual branches
unresolved_aliases = {
branch: target["target"]
for branch, target in branches.items()
if (
target
and target["target_type"] == "alias"
and target["target"] not in branches
)
}
for alias_name, alias_target in unresolved_aliases.items():
# Override alias branch with one pointing to a real object
# if max_size constraint is reached
alias = alias_target if len(branches) < max_size else alias_name
branches[alias] = draw(branch_targets_d(only_objects=True))
# Ensure no cycles between aliases
while True:
try:
id_ = snapshot_identifier(
{
"branches": {
name: branch or None for (name, branch) in branches.items()
}
}
)
except ValueError as e:
for (source, target) in e.args[1]:
branches[source] = draw(branch_targets_d(only_objects=True))
else:
break
return dict(id=identifier_to_bytes(id_), branches=branches)
def snapshots(*, min_size=0, max_size=100, only_objects=False):
return snapshots_d(
min_size=min_size, max_size=max_size, only_objects=only_objects
).map(Snapshot.from_dict)
def objects(blacklist_types=("origin_visit_status",), split_content=False):
"""generates a random couple (type, obj)
which obj is an instance of the Model class corresponding to obj_type.
`blacklist_types` is a list of obj_type to exclude from the strategy.
If `split_content` is True, generates Content and SkippedContent under different
obj_type, resp. "content" and "skipped_content".
"""
strategies = [
("origin", origins),
("origin_visit", origin_visits),
("origin_visit_status", origin_visit_statuses),
("snapshot", snapshots),
("release", releases),
("revision", revisions),
("directory", directories),
]
if split_content:
strategies.append(("content", present_contents))
strategies.append(("skipped_content", skipped_contents))
else:
strategies.append(("content", contents))
args = [
obj_gen().map(lambda x, obj_type=obj_type: (obj_type, x))
for (obj_type, obj_gen) in strategies
if obj_type not in blacklist_types
]
return one_of(*args)
def object_dicts(blacklist_types=("origin_visit_status",), split_content=False):
"""generates a random couple (type, dict)
which dict is suitable for <ModelForType>.from_dict() factory methods.
`blacklist_types` is a list of obj_type to exclude from the strategy.
If `split_content` is True, generates Content and SkippedContent under different
obj_type, resp. "content" and "skipped_content".
"""
strategies = [
("origin", origins_d),
("origin_visit", origin_visits_d),
("origin_visit_status", origin_visit_statuses_d),
("snapshot", snapshots_d),
("release", releases_d),
("revision", revisions_d),
("directory", directories_d),
]
if split_content:
strategies.append(("content", present_contents_d))
strategies.append(("skipped_content", skipped_contents_d))
else:
strategies.append(("content", contents_d))
args = [
obj_gen().map(lambda x, obj_type=obj_type: (obj_type, x))
for (obj_type, obj_gen) in strategies
if obj_type not in blacklist_types
]
return one_of(*args)
diff --git a/swh/model/identifiers.py b/swh/model/identifiers.py
index de2082d..3c0a46e 100644
--- a/swh/model/identifiers.py
+++ b/swh/model/identifiers.py
@@ -1,870 +1,861 @@
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import binascii
import datetime
import hashlib
from functools import lru_cache
from typing import Any, Dict, NamedTuple
from deprecated import deprecated
from .exceptions import ValidationError
from .fields.hashes import validate_sha1
from .hashutil import hash_git_data, hash_to_hex, MultiHash
ORIGIN = "origin"
SNAPSHOT = "snapshot"
REVISION = "revision"
RELEASE = "release"
DIRECTORY = "directory"
CONTENT = "content"
SWHID_NAMESPACE = "swh"
SWHID_VERSION = 1
SWHID_TYPES = ["ori", "snp", "rel", "rev", "dir", "cnt"]
SWHID_SEP = ":"
SWHID_CTXT_SEP = ";"
# deprecated variables
PID_NAMESPACE = SWHID_NAMESPACE
PID_VERSION = SWHID_VERSION
PID_TYPES = SWHID_TYPES
PID_SEP = SWHID_SEP
PID_CTXT_SEP = SWHID_CTXT_SEP
@lru_cache()
def identifier_to_bytes(identifier):
"""Convert a text identifier to bytes.
Args:
identifier: an identifier, either a 40-char hexadecimal string or a
bytes object of length 20
Returns:
The length 20 bytestring corresponding to the given identifier
Raises:
ValueError: if the identifier is of an unexpected type or length.
"""
if isinstance(identifier, bytes):
if len(identifier) != 20:
raise ValueError(
"Wrong length for bytes identifier %s, expected 20" % len(identifier)
)
return identifier
if isinstance(identifier, str):
if len(identifier) != 40:
raise ValueError(
"Wrong length for str identifier %s, expected 40" % len(identifier)
)
return bytes.fromhex(identifier)
raise ValueError(
"Wrong type for identifier %s, expected bytes or str"
% identifier.__class__.__name__
)
@lru_cache()
def identifier_to_str(identifier):
"""Convert an identifier to an hexadecimal string.
Args:
identifier: an identifier, either a 40-char hexadecimal string or a
bytes object of length 20
Returns:
The length 40 string corresponding to the given identifier, hex encoded
Raises:
ValueError: if the identifier is of an unexpected type or length.
"""
if isinstance(identifier, str):
if len(identifier) != 40:
raise ValueError(
"Wrong length for str identifier %s, expected 40" % len(identifier)
)
return identifier
if isinstance(identifier, bytes):
if len(identifier) != 20:
raise ValueError(
"Wrong length for bytes identifier %s, expected 20" % len(identifier)
)
return binascii.hexlify(identifier).decode()
raise ValueError(
"Wrong type for identifier %s, expected bytes or str"
% identifier.__class__.__name__
)
def content_identifier(content):
"""Return the intrinsic identifier for a content.
A content's identifier is the sha1, sha1_git and sha256 checksums of its
data.
Args:
content: a content conforming to the Software Heritage schema
Returns:
A dictionary with all the hashes for the data
Raises:
KeyError: if the content doesn't have a data member.
"""
return MultiHash.from_data(content["data"]).digest()
def directory_entry_sort_key(entry):
"""The sorting key for tree entries"""
if entry["type"] == "dir":
return entry["name"] + b"/"
else:
return entry["name"]
@lru_cache()
def _perms_to_bytes(perms):
"""Convert the perms value to its bytes representation"""
oc = oct(perms)[2:]
return oc.encode("ascii")
def escape_newlines(snippet):
"""Escape the newlines present in snippet according to git rules.
New lines in git manifests are escaped by indenting the next line by one
space.
"""
if b"\n" in snippet:
return b"\n ".join(snippet.split(b"\n"))
else:
return snippet
def directory_identifier(directory):
"""Return the intrinsic identifier for a directory.
A directory's identifier is the tree sha1 à la git of a directory listing,
using the following algorithm, which is equivalent to the git algorithm for
trees:
1. Entries of the directory are sorted using the name (or the name with '/'
appended for directory entries) as key, in bytes order.
2. For each entry of the directory, the following bytes are output:
- the octal representation of the permissions for the entry (stored in
the 'perms' member), which is a representation of the entry type:
- b'100644' (int 33188) for files
- b'100755' (int 33261) for executable files
- b'120000' (int 40960) for symbolic links
- b'40000' (int 16384) for directories
- b'160000' (int 57344) for references to revisions
- an ascii space (b'\x20')
- the entry's name (as raw bytes), stored in the 'name' member
- a null byte (b'\x00')
- the 20 byte long identifier of the object pointed at by the entry,
stored in the 'target' member:
- for files or executable files: their blob sha1_git
- for symbolic links: the blob sha1_git of a file containing the link
destination
- for directories: their intrinsic identifier
- for revisions: their intrinsic identifier
(Note that there is no separator between entries)
"""
components = []
for entry in sorted(directory["entries"], key=directory_entry_sort_key):
components.extend(
[
_perms_to_bytes(entry["perms"]),
b"\x20",
entry["name"],
b"\x00",
identifier_to_bytes(entry["target"]),
]
)
return identifier_to_str(hash_git_data(b"".join(components), "tree"))
def format_date(date):
"""Convert a date object into an UTC timestamp encoded as ascii bytes.
Git stores timestamps as an integer number of seconds since the UNIX epoch.
However, Software Heritage stores timestamps as an integer number of
microseconds (postgres type "datetime with timezone").
Therefore, we print timestamps with no microseconds as integers, and
timestamps with microseconds as floating point values. We elide the
trailing zeroes from microsecond values, to "future-proof" our
representation if we ever need more precision in timestamps.
"""
if not isinstance(date, dict):
raise ValueError("format_date only supports dicts, %r received" % date)
seconds = date.get("seconds", 0)
microseconds = date.get("microseconds", 0)
if not microseconds:
return str(seconds).encode()
else:
float_value = "%d.%06d" % (seconds, microseconds)
return float_value.rstrip("0").encode()
@lru_cache()
def format_offset(offset, negative_utc=None):
"""Convert an integer number of minutes into an offset representation.
The offset representation is [+-]hhmm where:
- hh is the number of hours;
- mm is the number of minutes.
A null offset is represented as +0000.
"""
if offset < 0 or offset == 0 and negative_utc:
sign = "-"
else:
sign = "+"
hours = abs(offset) // 60
minutes = abs(offset) % 60
t = "%s%02d%02d" % (sign, hours, minutes)
return t.encode()
def normalize_timestamp(time_representation):
"""Normalize a time representation for processing by Software Heritage
This function supports a numeric timestamp (representing a number of
seconds since the UNIX epoch, 1970-01-01 at 00:00 UTC), a
:obj:`datetime.datetime` object (with timezone information), or a
normalized Software Heritage time representation (idempotency).
Args:
time_representation: the representation of a timestamp
Returns:
dict: a normalized dictionary with three keys:
- timestamp: a dict with two optional keys:
- seconds: the integral number of seconds since the UNIX epoch
- microseconds: the integral number of microseconds
- offset: the timezone offset as a number of minutes relative to
UTC
- negative_utc: a boolean representing whether the offset is -0000
when offset = 0.
"""
if time_representation is None:
return None
negative_utc = False
if isinstance(time_representation, dict):
ts = time_representation["timestamp"]
if isinstance(ts, dict):
seconds = ts.get("seconds", 0)
microseconds = ts.get("microseconds", 0)
elif isinstance(ts, int):
seconds = ts
microseconds = 0
else:
raise ValueError(
"normalize_timestamp received non-integer timestamp member:" " %r" % ts
)
offset = time_representation["offset"]
if "negative_utc" in time_representation:
negative_utc = time_representation["negative_utc"]
if negative_utc is None:
negative_utc = False
elif isinstance(time_representation, datetime.datetime):
seconds = int(time_representation.timestamp())
microseconds = time_representation.microsecond
utcoffset = time_representation.utcoffset()
if utcoffset is None:
raise ValueError(
"normalize_timestamp received datetime without timezone: %s"
% time_representation
)
# utcoffset is an integer number of minutes
seconds_offset = utcoffset.total_seconds()
offset = int(seconds_offset) // 60
elif isinstance(time_representation, int):
seconds = time_representation
microseconds = 0
offset = 0
else:
raise ValueError(
"normalize_timestamp received non-integer timestamp:"
" %r" % time_representation
)
return {
"timestamp": {"seconds": seconds, "microseconds": microseconds,},
"offset": offset,
"negative_utc": negative_utc,
}
def format_author(author):
"""Format the specification of an author.
An author is either a byte string (passed unchanged), or a dict with three
keys, fullname, name and email.
If the fullname exists, return it; if it doesn't, we construct a fullname
using the following heuristics: if the name value is None, we return the
email in angle brackets, else, we return the name, a space, and the email
in angle brackets.
"""
if isinstance(author, bytes) or author is None:
return author
if "fullname" in author:
return author["fullname"]
ret = []
if author["name"] is not None:
ret.append(author["name"])
if author["email"] is not None:
ret.append(b"".join([b"<", author["email"], b">"]))
return b" ".join(ret)
def format_author_line(header, author, date_offset):
"""Format a an author line according to git standards.
An author line has three components:
- a header, describing the type of author (author, committer, tagger)
- a name and email, which is an arbitrary bytestring
- optionally, a timestamp with UTC offset specification
The author line is formatted thus::
`header` `name and email`[ `timestamp` `utc_offset`]
The timestamp is encoded as a (decimal) number of seconds since the UNIX
epoch (1970-01-01 at 00:00 UTC). As an extension to the git format, we
support fractional timestamps, using a dot as the separator for the decimal
part.
The utc offset is a number of minutes encoded as '[+-]HHMM'. Note some
tools can pass a negative offset corresponding to the UTC timezone
('-0000'), which is valid and is encoded as such.
For convenience, this function returns the whole line with its trailing
newline.
Args:
header: the header of the author line (one of 'author', 'committer',
'tagger')
author: an author specification (dict with two bytes values: name and
email, or byte value)
date_offset: a normalized date/time representation as returned by
:func:`normalize_timestamp`.
Returns:
the newline-terminated byte string containing the author line
"""
ret = [header.encode(), b" ", escape_newlines(format_author(author))]
date_offset = normalize_timestamp(date_offset)
if date_offset is not None:
date_f = format_date(date_offset["timestamp"])
offset_f = format_offset(date_offset["offset"], date_offset["negative_utc"])
ret.extend([b" ", date_f, b" ", offset_f])
ret.append(b"\n")
return b"".join(ret)
def revision_identifier(revision):
"""Return the intrinsic identifier for a revision.
The fields used for the revision identifier computation are:
- directory
- parents
- author
- author_date
- committer
- committer_date
- - metadata -> extra_headers
+ - extra_headers or metadata -> extra_headers
- message
A revision's identifier is the 'git'-checksum of a commit manifest
constructed as follows (newlines are a single ASCII newline character)::
tree <directory identifier>
[for each parent in parents]
parent <parent identifier>
[end for each parents]
author <author> <author_date>
committer <committer> <committer_date>
[for each key, value in extra_headers]
<key> <encoded value>
[end for each extra_headers]
<message>
The directory identifier is the ascii representation of its hexadecimal
encoding.
Author and committer are formatted with the :func:`format_author` function.
Dates are formatted with the :func:`format_offset` function.
Extra headers are an ordered list of [key, value] pairs. Keys are strings
and get encoded to utf-8 for identifier computation. Values are either byte
strings, unicode strings (that get encoded to utf-8), or integers (that get
encoded to their utf-8 decimal representation).
Multiline extra header values are escaped by indenting the continuation
lines with one ascii space.
If the message is None, the manifest ends with the last header. Else, the
message is appended to the headers after an empty line.
The checksum of the full manifest is computed using the 'commit' git object
type.
"""
components = [
b"tree ",
identifier_to_str(revision["directory"]).encode(),
b"\n",
]
for parent in revision["parents"]:
if parent:
components.extend(
[b"parent ", identifier_to_str(parent).encode(), b"\n",]
)
components.extend(
[
format_author_line("author", revision["author"], revision["date"]),
format_author_line(
"committer", revision["committer"], revision["committer_date"]
),
]
)
# Handle extra headers
- metadata = revision.get("metadata")
- if not metadata:
- metadata = {}
+ metadata = revision.get("metadata") or {}
+ extra_headers = revision.get("extra_headers", ())
+ if not extra_headers and "extra_headers" in metadata:
+ extra_headers = metadata["extra_headers"]
- for key, value in metadata.get("extra_headers", []):
-
- # Integer values: decimal representation
- if isinstance(value, int):
- value = str(value).encode("utf-8")
-
- # Unicode string values: utf-8 encoding
- if isinstance(value, str):
- value = value.encode("utf-8")
-
- # encode the key to utf-8
- components.extend([key.encode("utf-8"), b" ", escape_newlines(value), b"\n"])
+ for key, value in extra_headers:
+ components.extend([key, b" ", escape_newlines(value), b"\n"])
if revision["message"] is not None:
components.extend([b"\n", revision["message"]])
commit_raw = b"".join(components)
return identifier_to_str(hash_git_data(commit_raw, "commit"))
def target_type_to_git(target_type):
"""Convert a software heritage target type to a git object type"""
return {
"content": b"blob",
"directory": b"tree",
"revision": b"commit",
"release": b"tag",
"snapshot": b"refs",
}[target_type]
def release_identifier(release):
"""Return the intrinsic identifier for a release."""
components = [
b"object ",
identifier_to_str(release["target"]).encode(),
b"\n",
b"type ",
target_type_to_git(release["target_type"]),
b"\n",
b"tag ",
release["name"],
b"\n",
]
if "author" in release and release["author"]:
components.append(
format_author_line("tagger", release["author"], release["date"])
)
if release["message"] is not None:
components.extend([b"\n", release["message"]])
return identifier_to_str(hash_git_data(b"".join(components), "tag"))
def snapshot_identifier(snapshot, *, ignore_unresolved=False):
"""Return the intrinsic identifier for a snapshot.
Snapshots are a set of named branches, which are pointers to objects at any
level of the Software Heritage DAG.
As well as pointing to other objects in the Software Heritage DAG, branches
can also be *alias*es, in which case their target is the name of another
branch in the same snapshot, or *dangling*, in which case the target is
unknown (and represented by the ``None`` value).
A snapshot identifier is a salted sha1 (using the git hashing algorithm
with the ``snapshot`` object type) of a manifest following the algorithm:
1. Branches are sorted using the name as key, in bytes order.
2. For each branch, the following bytes are output:
- the type of the branch target:
- ``content``, ``directory``, ``revision``, ``release`` or ``snapshot``
for the corresponding entries in the DAG;
- ``alias`` for branches referencing another branch;
- ``dangling`` for dangling branches
- an ascii space (``\\x20``)
- the branch name (as raw bytes)
- a null byte (``\\x00``)
- the length of the target identifier, as an ascii-encoded decimal number
(``20`` for current intrinsic identifiers, ``0`` for dangling
branches, the length of the target branch name for branch aliases)
- a colon (``:``)
- the identifier of the target object pointed at by the branch,
stored in the 'target' member:
- for contents: their *sha1_git*
- for directories, revisions, releases or snapshots: their intrinsic
identifier
- for branch aliases, the name of the target branch (as raw bytes)
- for dangling branches, the empty string
Note that, akin to directory manifests, there is no separator between
entries. Because of symbolic branches, identifiers are of arbitrary
length but are length-encoded to avoid ambiguity.
Args:
snapshot (dict): the snapshot of which to compute the identifier. A
single entry is needed, ``'branches'``, which is itself a :class:`dict`
mapping each branch to its target
ignore_unresolved (bool): if `True`, ignore unresolved branch aliases.
Returns:
str: the intrinsic identifier for `snapshot`
"""
unresolved = []
lines = []
for name, target in sorted(snapshot["branches"].items()):
if not target:
target_type = b"dangling"
target_id = b""
elif target["target_type"] == "alias":
target_type = b"alias"
target_id = target["target"]
if target_id not in snapshot["branches"] or target_id == name:
unresolved.append((name, target_id))
else:
target_type = target["target_type"].encode()
target_id = identifier_to_bytes(target["target"])
lines.extend(
[
target_type,
b"\x20",
name,
b"\x00",
("%d:" % len(target_id)).encode(),
target_id,
]
)
if unresolved and not ignore_unresolved:
raise ValueError(
"Branch aliases unresolved: %s"
% ", ".join("%s -> %s" % x for x in unresolved),
unresolved,
)
return identifier_to_str(hash_git_data(b"".join(lines), "snapshot"))
def origin_identifier(origin):
"""Return the intrinsic identifier for an origin.
An origin's identifier is the sha1 checksum of the entire origin URL
"""
return hashlib.sha1(origin["url"].encode("utf-8")).hexdigest()
_object_type_map = {
ORIGIN: {"short_name": "ori", "key_id": "id"},
SNAPSHOT: {"short_name": "snp", "key_id": "id"},
RELEASE: {"short_name": "rel", "key_id": "id"},
REVISION: {"short_name": "rev", "key_id": "id"},
DIRECTORY: {"short_name": "dir", "key_id": "id"},
CONTENT: {"short_name": "cnt", "key_id": "sha1_git"},
}
_SWHID = NamedTuple(
"SWHID",
[
("namespace", str),
("scheme_version", int),
("object_type", str),
("object_id", str),
("metadata", Dict[str, Any]),
],
)
class SWHID(_SWHID):
"""
Named tuple holding the relevant info associated to a SoftWare Heritage
persistent IDentifier (SWHID)
Args:
namespace (str): the namespace of the identifier, defaults to ``swh``
scheme_version (int): the scheme version of the identifier,
defaults to 1
object_type (str): the type of object the identifier points to,
either ``content``, ``directory``, ``release``, ``revision`` or ``snapshot``
object_id (str): object's identifier
metadata (dict): optional dict filled with metadata related to
pointed object
Raises:
swh.model.exceptions.ValidationError: In case of invalid object type or id
Once created, it contains the following attributes:
Attributes:
namespace (str): the namespace of the identifier
scheme_version (int): the scheme version of the identifier
object_type (str): the type of object the identifier points to
object_id (str): hexadecimal representation of the object hash
metadata (dict): metadata related to the pointed object
To get the raw SWHID string from an instance of this named tuple,
use the :func:`str` function::
swhid = SWHID(
object_type='content',
object_id='8ff44f081d43176474b267de5451f2c2e88089d0'
)
swhid_str = str(swhid)
# 'swh:1:cnt:8ff44f081d43176474b267de5451f2c2e88089d0'
"""
__slots__ = ()
def __new__(
cls,
namespace: str = SWHID_NAMESPACE,
scheme_version: int = SWHID_VERSION,
object_type: str = "",
object_id: str = "",
metadata: Dict[str, Any] = {},
):
o = _object_type_map.get(object_type)
if not o:
raise ValidationError(
"Wrong input: Supported types are %s" % (list(_object_type_map.keys()))
)
if namespace != SWHID_NAMESPACE:
raise ValidationError(
"Wrong format: only supported namespace is '%s'" % SWHID_NAMESPACE
)
if scheme_version != SWHID_VERSION:
raise ValidationError(
"Wrong format: only supported version is %d" % SWHID_VERSION
)
# internal swh representation resolution
if isinstance(object_id, dict):
object_id = object_id[o["key_id"]]
validate_sha1(object_id) # can raise if invalid hash
object_id = hash_to_hex(object_id)
return super().__new__(
cls, namespace, scheme_version, object_type, object_id, metadata
)
def __str__(self) -> str:
o = _object_type_map.get(self.object_type)
assert o
swhid = SWHID_SEP.join(
[self.namespace, str(self.scheme_version), o["short_name"], self.object_id]
)
if self.metadata:
for k, v in self.metadata.items():
swhid += "%s%s=%s" % (SWHID_CTXT_SEP, k, v)
return swhid
@deprecated("Use swh.model.identifiers.SWHID instead")
class PersistentId(SWHID):
"""
Named tuple holding the relevant info associated to a SoftWare Heritage
persistent IDentifier.
.. deprecated:: 0.3.8
Use :class:`swh.model.identifiers.SWHID` instead
"""
def __new__(cls, *args, **kwargs):
return super(cls, PersistentId).__new__(cls, *args, **kwargs)
def swhid(
object_type: str,
object_id: str,
scheme_version: int = 1,
metadata: Dict[str, Any] = {},
) -> str:
"""Compute :ref:`persistent-identifiers`
Args:
object_type: object's type, either ``content``, ``directory``,
``release``, ``revision`` or ``snapshot``
object_id: object's identifier
scheme_version: SWHID scheme version, defaults to 1
metadata: metadata related to the pointed object
Raises:
swh.model.exceptions.ValidationError: In case of invalid object type or id
Returns:
the SWHID of the object
"""
swhid = SWHID(
scheme_version=scheme_version,
object_type=object_type,
object_id=object_id,
metadata=metadata,
)
return str(swhid)
@deprecated("Use swh.model.identifiers.swhid instead")
def persistent_identifier(*args, **kwargs) -> str:
"""Compute :ref:`persistent-identifiers`
.. deprecated:: 0.3.8
Use :func:`swh.model.identifiers.swhid` instead
"""
return swhid(*args, **kwargs)
def parse_swhid(swhid: str) -> SWHID:
"""Parse :ref:`persistent-identifiers`.
Args:
swhid (str): A persistent identifier
Raises:
swh.model.exceptions.ValidationError: in case of:
* missing mandatory values (4)
* invalid namespace supplied
* invalid version supplied
* invalid type supplied
* missing hash
* invalid hash identifier supplied
Returns:
a named tuple holding the parsing result
"""
# <swhid>;<contextual-information>
swhid_parts = swhid.split(SWHID_CTXT_SEP)
swhid_data = swhid_parts.pop(0).split(":")
if len(swhid_data) != 4:
raise ValidationError("Wrong format: There should be 4 mandatory values")
# Checking for parsing errors
_ns, _version, _type, _id = swhid_data
for otype, data in _object_type_map.items():
if _type == data["short_name"]:
_type = otype
break
if not _id:
raise ValidationError("Wrong format: Identifier should be present")
_metadata = {}
for part in swhid_parts:
try:
key, val = part.split("=")
_metadata[key] = val
except Exception:
msg = "Contextual data is badly formatted, form key=val expected"
raise ValidationError(msg)
return SWHID(_ns, int(_version), _type, _id, _metadata)
@deprecated("Use swh.model.identifiers.parse_swhid instead")
def parse_persistent_identifier(persistent_id: str) -> PersistentId:
"""Parse :ref:`persistent-identifiers`.
.. deprecated:: 0.3.8
Use :func:`swh.model.identifiers.parse_swhid` instead
"""
return PersistentId(**parse_swhid(persistent_id)._asdict())
diff --git a/swh/model/model.py b/swh/model/model.py
index f585051..c4f185f 100644
--- a/swh/model/model.py
+++ b/swh/model/model.py
@@ -1,672 +1,698 @@
# Copyright (C) 2018-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import datetime
from abc import ABCMeta, abstractmethod
+from copy import deepcopy
from enum import Enum
from hashlib import sha256
-from typing import Dict, Optional, Tuple, TypeVar, Union
+from typing import Dict, Iterable, Optional, Tuple, TypeVar, Union
from typing_extensions import Final
import attr
from attrs_strict import type_validator
import dateutil.parser
import iso8601
from .identifiers import (
normalize_timestamp,
directory_identifier,
revision_identifier,
release_identifier,
snapshot_identifier,
)
from .hashutil import DEFAULT_ALGORITHMS, hash_to_bytes, MultiHash
class MissingData(Exception):
"""Raised by `Content.with_data` when it has no way of fetching the
data (but not when fetching the data fails)."""
pass
SHA1_SIZE = 20
# TODO: Limit this to 20 bytes
Sha1Git = bytes
def dictify(value):
"Helper function used by BaseModel.to_dict()"
if isinstance(value, BaseModel):
return value.to_dict()
elif isinstance(value, Enum):
return value.value
elif isinstance(value, dict):
return {k: dictify(v) for k, v in value.items()}
elif isinstance(value, tuple):
return tuple(dictify(v) for v in value)
else:
return value
ModelType = TypeVar("ModelType", bound="BaseModel")
class BaseModel:
"""Base class for SWH model classes.
Provides serialization/deserialization to/from Python dictionaries,
that are suitable for JSON/msgpack-like formats."""
def to_dict(self):
"""Wrapper of `attr.asdict` that can be overridden by subclasses
that have special handling of some of the fields."""
return dictify(attr.asdict(self, recurse=False))
@classmethod
def from_dict(cls, d):
"""Takes a dictionary representing a tree of SWH objects, and
recursively builds the corresponding objects."""
return cls(**d)
def anonymize(self: ModelType) -> Optional[ModelType]:
"""Returns an anonymized version of the object, if needed.
If the object model does not need/support anonymization, returns None.
"""
return None
class HashableObject(metaclass=ABCMeta):
"""Mixin to automatically compute object identifier hash when
the associated model is instantiated."""
@staticmethod
@abstractmethod
def compute_hash(object_dict):
"""Derived model classes must implement this to compute
the object hash from its dict representation."""
pass
def __attrs_post_init__(self):
if not self.id:
obj_id = hash_to_bytes(self.compute_hash(self.to_dict()))
object.__setattr__(self, "id", obj_id)
@attr.s(frozen=True)
class Person(BaseModel):
"""Represents the author/committer of a revision or release."""
object_type: Final = "person"
fullname = attr.ib(type=bytes, validator=type_validator())
name = attr.ib(type=Optional[bytes], validator=type_validator())
email = attr.ib(type=Optional[bytes], validator=type_validator())
@classmethod
def from_fullname(cls, fullname: bytes):
"""Returns a Person object, by guessing the name and email from the
fullname, in the `name <email>` format.
The fullname is left unchanged."""
if fullname is None:
raise TypeError("fullname is None.")
name: Optional[bytes]
email: Optional[bytes]
try:
open_bracket = fullname.index(b"<")
except ValueError:
name = fullname
email = None
else:
raw_name = fullname[:open_bracket]
raw_email = fullname[open_bracket + 1 :]
if not raw_name:
name = None
else:
name = raw_name.strip()
try:
close_bracket = raw_email.rindex(b">")
except ValueError:
email = raw_email
else:
email = raw_email[:close_bracket]
return Person(name=name or None, email=email or None, fullname=fullname,)
def anonymize(self) -> "Person":
"""Returns an anonymized version of the Person object.
Anonymization is simply a Person which fullname is the hashed, with unset name
or email.
"""
return Person(fullname=sha256(self.fullname).digest(), name=None, email=None,)
@attr.s(frozen=True)
class Timestamp(BaseModel):
"""Represents a naive timestamp from a VCS."""
object_type: Final = "timestamp"
seconds = attr.ib(type=int, validator=type_validator())
microseconds = attr.ib(type=int, validator=type_validator())
@seconds.validator
def check_seconds(self, attribute, value):
"""Check that seconds fit in a 64-bits signed integer."""
if not (-(2 ** 63) <= value < 2 ** 63):
raise ValueError("Seconds must be a signed 64-bits integer.")
@microseconds.validator
def check_microseconds(self, attribute, value):
"""Checks that microseconds are positive and < 1000000."""
if not (0 <= value < 10 ** 6):
raise ValueError("Microseconds must be in [0, 1000000[.")
@attr.s(frozen=True)
class TimestampWithTimezone(BaseModel):
"""Represents a TZ-aware timestamp from a VCS."""
object_type: Final = "timestamp_with_timezone"
timestamp = attr.ib(type=Timestamp, validator=type_validator())
offset = attr.ib(type=int, validator=type_validator())
negative_utc = attr.ib(type=bool, validator=type_validator())
@offset.validator
def check_offset(self, attribute, value):
"""Checks the offset is a 16-bits signed integer (in theory, it
should always be between -14 and +14 hours)."""
if not (-(2 ** 15) <= value < 2 ** 15):
# max 14 hours offset in theory, but you never know what
# you'll find in the wild...
raise ValueError("offset too large: %d minutes" % value)
@negative_utc.validator
def check_negative_utc(self, attribute, value):
if self.offset and value:
raise ValueError("negative_utc can only be True is offset=0")
@classmethod
def from_dict(cls, obj: Union[Dict, datetime.datetime, int]):
"""Builds a TimestampWithTimezone from any of the formats
accepted by :func:`swh.model.normalize_timestamp`."""
# TODO: this accept way more types than just dicts; find a better
# name
d = normalize_timestamp(obj)
return cls(
timestamp=Timestamp.from_dict(d["timestamp"]),
offset=d["offset"],
negative_utc=d["negative_utc"],
)
@classmethod
def from_datetime(cls, dt: datetime.datetime):
return cls.from_dict(dt)
@classmethod
def from_iso8601(cls, s):
"""Builds a TimestampWithTimezone from an ISO8601-formatted string.
"""
dt = iso8601.parse_date(s)
tstz = cls.from_datetime(dt)
if dt.tzname() == "-00:00":
tstz = attr.evolve(tstz, negative_utc=True)
return tstz
@attr.s(frozen=True)
class Origin(BaseModel):
"""Represents a software source: a VCS and an URL."""
object_type: Final = "origin"
url = attr.ib(type=str, validator=type_validator())
@attr.s(frozen=True)
class OriginVisit(BaseModel):
"""Represents an origin visit with a given type at a given point in time, by a
SWH loader."""
object_type: Final = "origin_visit"
origin = attr.ib(type=str, validator=type_validator())
date = attr.ib(type=datetime.datetime, validator=type_validator())
type = attr.ib(type=str, validator=type_validator())
"""Should not be set before calling 'origin_visit_add()'."""
visit = attr.ib(type=Optional[int], validator=type_validator(), default=None)
def to_dict(self):
"""Serializes the date as a string and omits the visit id if it is
`None`."""
ov = super().to_dict()
if ov["visit"] is None:
del ov["visit"]
return ov
@attr.s(frozen=True)
class OriginVisitStatus(BaseModel):
"""Represents a visit update of an origin at a given point in time.
"""
object_type: Final = "origin_visit_status"
origin = attr.ib(type=str, validator=type_validator())
visit = attr.ib(type=int, validator=type_validator())
date = attr.ib(type=datetime.datetime, validator=type_validator())
status = attr.ib(
type=str,
validator=attr.validators.in_(["created", "ongoing", "full", "partial"]),
)
snapshot = attr.ib(type=Optional[Sha1Git], validator=type_validator())
metadata = attr.ib(
type=Optional[Dict[str, object]], validator=type_validator(), default=None
)
class TargetType(Enum):
"""The type of content pointed to by a snapshot branch. Usually a
revision or an alias."""
CONTENT = "content"
DIRECTORY = "directory"
REVISION = "revision"
RELEASE = "release"
SNAPSHOT = "snapshot"
ALIAS = "alias"
class ObjectType(Enum):
"""The type of content pointed to by a release. Usually a revision"""
CONTENT = "content"
DIRECTORY = "directory"
REVISION = "revision"
RELEASE = "release"
SNAPSHOT = "snapshot"
@attr.s(frozen=True)
class SnapshotBranch(BaseModel):
"""Represents one of the branches of a snapshot."""
object_type: Final = "snapshot_branch"
target = attr.ib(type=bytes, validator=type_validator())
target_type = attr.ib(type=TargetType, validator=type_validator())
@target.validator
def check_target(self, attribute, value):
"""Checks the target type is not an alias, checks the target is a
valid sha1_git."""
if self.target_type != TargetType.ALIAS and self.target is not None:
if len(value) != 20:
raise ValueError("Wrong length for bytes identifier: %d" % len(value))
@classmethod
def from_dict(cls, d):
return cls(target=d["target"], target_type=TargetType(d["target_type"]))
@attr.s(frozen=True)
class Snapshot(BaseModel, HashableObject):
"""Represents the full state of an origin at a given point in time."""
object_type: Final = "snapshot"
branches = attr.ib(
type=Dict[bytes, Optional[SnapshotBranch]], validator=type_validator()
)
id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"")
@staticmethod
def compute_hash(object_dict):
return snapshot_identifier(object_dict)
@classmethod
def from_dict(cls, d):
d = d.copy()
return cls(
branches={
name: SnapshotBranch.from_dict(branch) if branch else None
for (name, branch) in d.pop("branches").items()
},
**d,
)
@attr.s(frozen=True)
class Release(BaseModel, HashableObject):
object_type: Final = "release"
name = attr.ib(type=bytes, validator=type_validator())
message = attr.ib(type=Optional[bytes], validator=type_validator())
target = attr.ib(type=Optional[Sha1Git], validator=type_validator())
target_type = attr.ib(type=ObjectType, validator=type_validator())
synthetic = attr.ib(type=bool, validator=type_validator())
author = attr.ib(type=Optional[Person], validator=type_validator(), default=None)
date = attr.ib(
type=Optional[TimestampWithTimezone], validator=type_validator(), default=None
)
metadata = attr.ib(
type=Optional[Dict[str, object]], validator=type_validator(), default=None
)
id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"")
@staticmethod
def compute_hash(object_dict):
return release_identifier(object_dict)
@author.validator
def check_author(self, attribute, value):
"""If the author is `None`, checks the date is `None` too."""
if self.author is None and self.date is not None:
raise ValueError("release date must be None if author is None.")
def to_dict(self):
rel = super().to_dict()
if rel["metadata"] is None:
del rel["metadata"]
return rel
@classmethod
def from_dict(cls, d):
d = d.copy()
if d.get("author"):
d["author"] = Person.from_dict(d["author"])
if d.get("date"):
d["date"] = TimestampWithTimezone.from_dict(d["date"])
return cls(target_type=ObjectType(d.pop("target_type")), **d)
def anonymize(self) -> "Release":
"""Returns an anonymized version of the Release object.
Anonymization consists in replacing the author with an anonymized Person object.
"""
author = self.author and self.author.anonymize()
return attr.evolve(self, author=author)
class RevisionType(Enum):
GIT = "git"
TAR = "tar"
DSC = "dsc"
SUBVERSION = "svn"
MERCURIAL = "hg"
+def tuplify_extra_headers(value: Iterable) -> Tuple:
+ return tuple((k, v) for k, v in value)
+
+
@attr.s(frozen=True)
class Revision(BaseModel, HashableObject):
object_type: Final = "revision"
message = attr.ib(type=Optional[bytes], validator=type_validator())
author = attr.ib(type=Person, validator=type_validator())
committer = attr.ib(type=Person, validator=type_validator())
date = attr.ib(type=Optional[TimestampWithTimezone], validator=type_validator())
committer_date = attr.ib(
type=Optional[TimestampWithTimezone], validator=type_validator()
)
type = attr.ib(type=RevisionType, validator=type_validator())
directory = attr.ib(type=Sha1Git, validator=type_validator())
synthetic = attr.ib(type=bool, validator=type_validator())
metadata = attr.ib(
type=Optional[Dict[str, object]], validator=type_validator(), default=None
)
parents = attr.ib(type=Tuple[Sha1Git, ...], validator=type_validator(), default=())
id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"")
+ extra_headers = attr.ib(
+ type=Tuple[Tuple[bytes, bytes], ...], # but it makes mypy sad
+ validator=type_validator(),
+ converter=tuplify_extra_headers, # type: ignore
+ default=(),
+ )
+
+ def __attrs_post_init__(self):
+ super().__attrs_post_init__()
+ # ensure metadata is a deep copy of whatever was given, and if needed
+ # extract extra_headers from there
+ if self.metadata:
+ metadata = deepcopy(self.metadata)
+ if not self.extra_headers and "extra_headers" in metadata:
+ object.__setattr__(
+ self,
+ "extra_headers",
+ tuplify_extra_headers(metadata.pop("extra_headers")),
+ )
+ attr.validate(self)
+ object.__setattr__(self, "metadata", metadata)
@staticmethod
def compute_hash(object_dict):
return revision_identifier(object_dict)
@classmethod
def from_dict(cls, d):
d = d.copy()
date = d.pop("date")
if date:
date = TimestampWithTimezone.from_dict(date)
committer_date = d.pop("committer_date")
if committer_date:
committer_date = TimestampWithTimezone.from_dict(committer_date)
return cls(
author=Person.from_dict(d.pop("author")),
committer=Person.from_dict(d.pop("committer")),
date=date,
committer_date=committer_date,
type=RevisionType(d.pop("type")),
parents=tuple(d.pop("parents")), # for BW compat
**d,
)
def anonymize(self) -> "Revision":
"""Returns an anonymized version of the Revision object.
Anonymization consists in replacing the author and committer with an anonymized
Person object.
"""
return attr.evolve(
self, author=self.author.anonymize(), committer=self.committer.anonymize()
)
@attr.s(frozen=True)
class DirectoryEntry(BaseModel):
object_type: Final = "directory_entry"
name = attr.ib(type=bytes, validator=type_validator())
type = attr.ib(type=str, validator=attr.validators.in_(["file", "dir", "rev"]))
target = attr.ib(type=Sha1Git, validator=type_validator())
perms = attr.ib(type=int, validator=type_validator())
"""Usually one of the values of `swh.model.from_disk.DentryPerms`."""
@attr.s(frozen=True)
class Directory(BaseModel, HashableObject):
object_type: Final = "directory"
entries = attr.ib(type=Tuple[DirectoryEntry, ...], validator=type_validator())
id = attr.ib(type=Sha1Git, validator=type_validator(), default=b"")
@staticmethod
def compute_hash(object_dict):
return directory_identifier(object_dict)
@classmethod
def from_dict(cls, d):
d = d.copy()
return cls(
entries=tuple(
DirectoryEntry.from_dict(entry) for entry in d.pop("entries")
),
**d,
)
@attr.s(frozen=True)
class BaseContent(BaseModel):
status = attr.ib(
type=str, validator=attr.validators.in_(["visible", "hidden", "absent"])
)
@staticmethod
def _hash_data(data: bytes):
"""Hash some data, returning most of the fields of a content object"""
d = MultiHash.from_data(data).digest()
d["data"] = data
d["length"] = len(data)
return d
@classmethod
def from_dict(cls, d, use_subclass=True):
if use_subclass:
# Chooses a subclass to instantiate instead.
if d["status"] == "absent":
return SkippedContent.from_dict(d)
else:
return Content.from_dict(d)
else:
return super().from_dict(d)
def get_hash(self, hash_name):
if hash_name not in DEFAULT_ALGORITHMS:
raise ValueError("{} is not a valid hash name.".format(hash_name))
return getattr(self, hash_name)
def hashes(self) -> Dict[str, bytes]:
"""Returns a dictionary {hash_name: hash_value}"""
return {algo: getattr(self, algo) for algo in DEFAULT_ALGORITHMS}
@attr.s(frozen=True)
class Content(BaseContent):
object_type: Final = "content"
sha1 = attr.ib(type=bytes, validator=type_validator())
sha1_git = attr.ib(type=Sha1Git, validator=type_validator())
sha256 = attr.ib(type=bytes, validator=type_validator())
blake2s256 = attr.ib(type=bytes, validator=type_validator())
length = attr.ib(type=int, validator=type_validator())
status = attr.ib(
type=str,
validator=attr.validators.in_(["visible", "hidden"]),
default="visible",
)
data = attr.ib(type=Optional[bytes], validator=type_validator(), default=None)
ctime = attr.ib(
type=Optional[datetime.datetime],
validator=type_validator(),
default=None,
eq=False,
)
@length.validator
def check_length(self, attribute, value):
"""Checks the length is positive."""
if value < 0:
raise ValueError("Length must be positive.")
def to_dict(self):
content = super().to_dict()
if content["data"] is None:
del content["data"]
return content
@classmethod
def from_data(cls, data, status="visible", ctime=None) -> "Content":
"""Generate a Content from a given `data` byte string.
This populates the Content with the hashes and length for the data
passed as argument, as well as the data itself.
"""
d = cls._hash_data(data)
d["status"] = status
d["ctime"] = ctime
return cls(**d)
@classmethod
def from_dict(cls, d):
if isinstance(d.get("ctime"), str):
d = d.copy()
d["ctime"] = dateutil.parser.parse(d["ctime"])
return super().from_dict(d, use_subclass=False)
def with_data(self) -> "Content":
"""Loads the `data` attribute; meaning that it is guaranteed not to
be None after this call.
This call is almost a no-op, but subclasses may overload this method
to lazy-load data (eg. from disk or objstorage)."""
if self.data is None:
raise MissingData("Content data is None.")
return self
@attr.s(frozen=True)
class SkippedContent(BaseContent):
object_type: Final = "skipped_content"
sha1 = attr.ib(type=Optional[bytes], validator=type_validator())
sha1_git = attr.ib(type=Optional[Sha1Git], validator=type_validator())
sha256 = attr.ib(type=Optional[bytes], validator=type_validator())
blake2s256 = attr.ib(type=Optional[bytes], validator=type_validator())
length = attr.ib(type=Optional[int], validator=type_validator())
status = attr.ib(type=str, validator=attr.validators.in_(["absent"]))
reason = attr.ib(type=Optional[str], validator=type_validator(), default=None)
origin = attr.ib(type=Optional[str], validator=type_validator(), default=None)
ctime = attr.ib(
type=Optional[datetime.datetime],
validator=type_validator(),
default=None,
eq=False,
)
@reason.validator
def check_reason(self, attribute, value):
"""Checks the reason is full if status != absent."""
assert self.reason == value
if value is None:
raise ValueError("Must provide a reason if content is absent.")
@length.validator
def check_length(self, attribute, value):
"""Checks the length is positive or -1."""
if value < -1:
raise ValueError("Length must be positive or -1.")
def to_dict(self):
content = super().to_dict()
if content["origin"] is None:
del content["origin"]
return content
@classmethod
def from_data(
cls, data: bytes, reason: str, ctime: Optional[datetime.datetime] = None
) -> "SkippedContent":
"""Generate a SkippedContent from a given `data` byte string.
This populates the SkippedContent with the hashes and length for the
data passed as argument.
You can use `attr.evolve` on such a generated content to nullify some
of its attributes, e.g. for tests.
"""
d = cls._hash_data(data)
del d["data"]
d["status"] = "absent"
d["reason"] = reason
d["ctime"] = ctime
return cls(**d)
@classmethod
def from_dict(cls, d):
d2 = d.copy()
if d2.pop("data", None) is not None:
raise ValueError('SkippedContent has no "data" attribute %r' % d)
return super().from_dict(d2, use_subclass=False)
diff --git a/swh/model/tests/test_identifiers.py b/swh/model/tests/test_identifiers.py
index 6edb26c..c03b9ef 100644
--- a/swh/model/tests/test_identifiers.py
+++ b/swh/model/tests/test_identifiers.py
@@ -1,1070 +1,1066 @@
# Copyright (C) 2015-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import binascii
import datetime
import pytest
import unittest
from swh.model import hashutil, identifiers
from swh.model.exceptions import ValidationError
from swh.model.hashutil import hash_to_bytes as _x
from swh.model.identifiers import (
CONTENT,
DIRECTORY,
RELEASE,
REVISION,
SNAPSHOT,
SWHID,
normalize_timestamp,
)
class UtilityFunctionsIdentifier(unittest.TestCase):
def setUp(self):
self.str_id = "c2e41aae41ac17bd4a650770d6ee77f62e52235b"
self.bytes_id = binascii.unhexlify(self.str_id)
self.bad_type_id = object()
def test_identifier_to_bytes(self):
for id in [self.str_id, self.bytes_id]:
self.assertEqual(identifiers.identifier_to_bytes(id), self.bytes_id)
# wrong length
with self.assertRaises(ValueError) as cm:
identifiers.identifier_to_bytes(id[:-2])
self.assertIn("length", str(cm.exception))
with self.assertRaises(ValueError) as cm:
identifiers.identifier_to_bytes(self.bad_type_id)
self.assertIn("type", str(cm.exception))
def test_identifier_to_str(self):
for id in [self.str_id, self.bytes_id]:
self.assertEqual(identifiers.identifier_to_str(id), self.str_id)
# wrong length
with self.assertRaises(ValueError) as cm:
identifiers.identifier_to_str(id[:-2])
self.assertIn("length", str(cm.exception))
with self.assertRaises(ValueError) as cm:
identifiers.identifier_to_str(self.bad_type_id)
self.assertIn("type", str(cm.exception))
class UtilityFunctionsDateOffset(unittest.TestCase):
def setUp(self):
self.dates = {
b"1448210036": {"seconds": 1448210036, "microseconds": 0,},
b"1448210036.002342": {"seconds": 1448210036, "microseconds": 2342,},
b"1448210036.12": {"seconds": 1448210036, "microseconds": 120000,},
}
self.broken_dates = [
1448210036.12,
]
self.offsets = {
0: b"+0000",
-630: b"-1030",
800: b"+1320",
}
def test_format_date(self):
for date_repr, date in self.dates.items():
self.assertEqual(identifiers.format_date(date), date_repr)
def test_format_date_fail(self):
for date in self.broken_dates:
with self.assertRaises(ValueError):
identifiers.format_date(date)
def test_format_offset(self):
for offset, res in self.offsets.items():
self.assertEqual(identifiers.format_offset(offset), res)
class ContentIdentifier(unittest.TestCase):
def setUp(self):
self.content = {
"status": "visible",
"length": 5,
"data": b"1984\n",
"ctime": datetime.datetime(
2015, 11, 22, 16, 33, 56, tzinfo=datetime.timezone.utc
),
}
self.content_id = hashutil.MultiHash.from_data(self.content["data"]).digest()
def test_content_identifier(self):
self.assertEqual(identifiers.content_identifier(self.content), self.content_id)
directory_example = {
"id": "d7ed3d2c31d608823be58b1cbe57605310615231",
"entries": [
{
"type": "file",
"perms": 33188,
"name": b"README",
"target": _x("37ec8ea2110c0b7a32fbb0e872f6e7debbf95e21"),
},
{
"type": "file",
"perms": 33188,
"name": b"Rakefile",
"target": _x("3bb0e8592a41ae3185ee32266c860714980dbed7"),
},
{
"type": "dir",
"perms": 16384,
"name": b"app",
"target": _x("61e6e867f5d7ba3b40540869bc050b0c4fed9e95"),
},
{
"type": "file",
"perms": 33188,
"name": b"1.megabyte",
"target": _x("7c2b2fbdd57d6765cdc9d84c2d7d333f11be7fb3"),
},
{
"type": "dir",
"perms": 16384,
"name": b"config",
"target": _x("591dfe784a2e9ccc63aaba1cb68a765734310d98"),
},
{
"type": "dir",
"perms": 16384,
"name": b"public",
"target": _x("9588bf4522c2b4648bfd1c61d175d1f88c1ad4a5"),
},
{
"type": "file",
"perms": 33188,
"name": b"development.sqlite3",
"target": _x("e69de29bb2d1d6434b8b29ae775ad8c2e48c5391"),
},
{
"type": "dir",
"perms": 16384,
"name": b"doc",
"target": _x("154705c6aa1c8ead8c99c7915373e3c44012057f"),
},
{
"type": "dir",
"perms": 16384,
"name": b"db",
"target": _x("85f157bdc39356b7bc7de9d0099b4ced8b3b382c"),
},
{
"type": "dir",
"perms": 16384,
"name": b"log",
"target": _x("5e3d3941c51cce73352dff89c805a304ba96fffe"),
},
{
"type": "dir",
"perms": 16384,
"name": b"script",
"target": _x("1b278423caf176da3f3533592012502aa10f566c"),
},
{
"type": "dir",
"perms": 16384,
"name": b"test",
"target": _x("035f0437c080bfd8711670b3e8677e686c69c763"),
},
{
"type": "dir",
"perms": 16384,
"name": b"vendor",
"target": _x("7c0dc9ad978c1af3f9a4ce061e50f5918bd27138"),
},
{
"type": "rev",
"perms": 57344,
"name": b"will_paginate",
"target": _x("3d531e169db92a16a9a8974f0ae6edf52e52659e"),
},
# in git order, the dir named "order" should be between the files
# named "order." and "order0"
{
"type": "dir",
"perms": 16384,
"name": b"order",
"target": _x("62cdb7020ff920e5aa642c3d4066950dd1f01f4d"),
},
{
"type": "file",
"perms": 16384,
"name": b"order.",
"target": _x("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"),
},
{
"type": "file",
"perms": 16384,
"name": b"order0",
"target": _x("bbe960a25ea311d21d40669e93df2003ba9b90a2"),
},
],
}
class DirectoryIdentifier(unittest.TestCase):
def setUp(self):
self.directory = directory_example
self.empty_directory = {
"id": "4b825dc642cb6eb9a060e54bf8d69288fbee4904",
"entries": [],
}
def test_dir_identifier(self):
self.assertEqual(
identifiers.directory_identifier(self.directory), self.directory["id"]
)
def test_dir_identifier_entry_order(self):
# Reverse order of entries, check the id is still the same.
directory = {"entries": reversed(self.directory["entries"])}
self.assertEqual(
identifiers.directory_identifier(directory), self.directory["id"]
)
def test_dir_identifier_empty_directory(self):
self.assertEqual(
identifiers.directory_identifier(self.empty_directory),
self.empty_directory["id"],
)
linus_tz = datetime.timezone(datetime.timedelta(minutes=-420))
revision_example = {
"id": "bc0195aad0daa2ad5b0d76cce22b167bc3435590",
"directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"),
"parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")],
"author": {
"name": b"Linus Torvalds",
"email": b"torvalds@linux-foundation.org",
"fullname": b"Linus Torvalds <torvalds@linux-foundation.org>",
},
"date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz),
"committer": {
"name": b"Linus Torvalds",
"email": b"torvalds@linux-foundation.org",
"fullname": b"Linus Torvalds <torvalds@linux-foundation.org>",
},
"committer_date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz),
"message": b"Linux 4.2-rc2\n",
"type": "git",
"synthetic": False,
}
class RevisionIdentifier(unittest.TestCase):
def setUp(self):
gpgsig = b"""\
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.13 (Darwin)
iQIcBAABAgAGBQJVJcYsAAoJEBiY3kIkQRNJVAUQAJ8/XQIfMqqC5oYeEFfHOPYZ
L7qy46bXHVBa9Qd8zAJ2Dou3IbI2ZoF6/Et89K/UggOycMlt5FKV/9toWyuZv4Po
L682wonoxX99qvVTHo6+wtnmYO7+G0f82h+qHMErxjP+I6gzRNBvRr+SfY7VlGdK
wikMKOMWC5smrScSHITnOq1Ews5pe3N7qDYMzK0XVZmgDoaem4RSWMJs4My/qVLN
e0CqYWq2A22GX7sXl6pjneJYQvcAXUX+CAzp24QnPSb+Q22Guj91TcxLFcHCTDdn
qgqMsEyMiisoglwrCbO+D+1xq9mjN9tNFWP66SQ48mrrHYTBV5sz9eJyDfroJaLP
CWgbDTgq6GzRMehHT3hXfYS5NNatjnhkNISXR7pnVP/obIi/vpWh5ll6Gd8q26z+
a/O41UzOaLTeNI365MWT4/cnXohVLRG7iVJbAbCxoQmEgsYMRc/pBAzWJtLfcB2G
jdTswYL6+MUdL8sB9pZ82D+BP/YAdHe69CyTu1lk9RT2pYtI/kkfjHubXBCYEJSG
+VGllBbYG6idQJpyrOYNRJyrDi9yvDJ2W+S0iQrlZrxzGBVGTB/y65S8C+2WTBcE
lf1Qb5GDsQrZWgD+jtWTywOYHtCBwyCKSAXxSARMbNPeak9WPlcW/Jmu+fUcMe2x
dg1KdHOa34shrKDaOVzW
=od6m
-----END PGP SIGNATURE-----"""
self.revision = revision_example
self.revision_none_metadata = {
"id": "bc0195aad0daa2ad5b0d76cce22b167bc3435590",
"directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"),
"parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")],
"author": {
"name": b"Linus Torvalds",
"email": b"torvalds@linux-foundation.org",
},
"date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz),
"committer": {
"name": b"Linus Torvalds",
"email": b"torvalds@linux-foundation.org",
},
"committer_date": datetime.datetime(
2015, 7, 12, 15, 10, 30, tzinfo=linus_tz
),
"message": b"Linux 4.2-rc2\n",
"metadata": None,
}
self.synthetic_revision = {
"id": b"\xb2\xa7\xe1&\x04\x92\xe3D\xfa\xb3\xcb\xf9\x1b\xc1<\x91"
b"\xe0T&\xfd",
"author": {
"name": b"Software Heritage",
"email": b"robot@softwareheritage.org",
},
"date": {
"timestamp": {"seconds": 1437047495},
"offset": 0,
"negative_utc": False,
},
"type": "tar",
"committer": {
"name": b"Software Heritage",
"email": b"robot@softwareheritage.org",
},
"committer_date": 1437047495,
"synthetic": True,
"parents": [None],
"message": b"synthetic revision message\n",
"directory": b"\xd1\x1f\x00\xa6\xa0\xfe\xa6\x05SA\xd2U\x84\xb5\xa9"
b"e\x16\xc0\xd2\xb8",
"metadata": {
"original_artifact": [
{
"archive_type": "tar",
"name": "gcc-5.2.0.tar.bz2",
"sha1_git": "39d281aff934d44b439730057e55b055e206a586",
"sha1": "fe3f5390949d47054b613edc36c557eb1d51c18e",
"sha256": "5f835b04b5f7dd4f4d2dc96190ec1621b8d89f"
"2dc6f638f9f8bc1b1014ba8cad",
}
]
},
}
# cat commit.txt | git hash-object -t commit --stdin
self.revision_with_extra_headers = {
"id": "010d34f384fa99d047cdd5e2f41e56e5c2feee45",
"directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"),
"parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")],
"author": {
"name": b"Linus Torvalds",
"email": b"torvalds@linux-foundation.org",
"fullname": b"Linus Torvalds <torvalds@linux-foundation.org>",
},
"date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz),
"committer": {
"name": b"Linus Torvalds",
"email": b"torvalds@linux-foundation.org",
"fullname": b"Linus Torvalds <torvalds@linux-foundation.org>",
},
"committer_date": datetime.datetime(
2015, 7, 12, 15, 10, 30, tzinfo=linus_tz
),
"message": b"Linux 4.2-rc2\n",
- "metadata": {
- "extra_headers": [
- ["svn-repo-uuid", "046f1af7-66c2-d61b-5410-ce57b7db7bff"],
- ["svn-revision", 10],
- ]
- },
+ "extra_headers": (
+ (b"svn-repo-uuid", b"046f1af7-66c2-d61b-5410-ce57b7db7bff"),
+ (b"svn-revision", b"10"),
+ ),
}
self.revision_with_gpgsig = {
"id": "44cc742a8ca17b9c279be4cc195a93a6ef7a320e",
"directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"),
"parents": [
_x("689664ae944b4692724f13b709a4e4de28b54e57"),
_x("c888305e1efbaa252d01b4e5e6b778f865a97514"),
],
"author": {
"name": b"Jiang Xin",
"email": b"worldhello.net@gmail.com",
"fullname": b"Jiang Xin <worldhello.net@gmail.com>",
},
"date": {"timestamp": 1428538899, "offset": 480,},
"committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",},
"committer_date": {"timestamp": 1428538899, "offset": 480,},
- "metadata": {"extra_headers": [["gpgsig", gpgsig],],},
+ "extra_headers": ((b"gpgsig", gpgsig),),
"message": b"""Merge branch 'master' of git://github.com/alexhenrie/git-po
* 'master' of git://github.com/alexhenrie/git-po:
l10n: ca.po: update translation
""",
}
self.revision_no_message = {
"id": "4cfc623c9238fa92c832beed000ce2d003fd8333",
"directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"),
"parents": [
_x("689664ae944b4692724f13b709a4e4de28b54e57"),
_x("c888305e1efbaa252d01b4e5e6b778f865a97514"),
],
"author": {
"name": b"Jiang Xin",
"email": b"worldhello.net@gmail.com",
"fullname": b"Jiang Xin <worldhello.net@gmail.com>",
},
"date": {"timestamp": 1428538899, "offset": 480,},
"committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",},
"committer_date": {"timestamp": 1428538899, "offset": 480,},
"message": None,
}
self.revision_empty_message = {
"id": "7442cd78bd3b4966921d6a7f7447417b7acb15eb",
"directory": _x("b134f9b7dc434f593c0bab696345548b37de0558"),
"parents": [
_x("689664ae944b4692724f13b709a4e4de28b54e57"),
_x("c888305e1efbaa252d01b4e5e6b778f865a97514"),
],
"author": {
"name": b"Jiang Xin",
"email": b"worldhello.net@gmail.com",
"fullname": b"Jiang Xin <worldhello.net@gmail.com>",
},
"date": {"timestamp": 1428538899, "offset": 480,},
"committer": {"name": b"Jiang Xin", "email": b"worldhello.net@gmail.com",},
"committer_date": {"timestamp": 1428538899, "offset": 480,},
"message": b"",
}
self.revision_only_fullname = {
"id": "010d34f384fa99d047cdd5e2f41e56e5c2feee45",
"directory": _x("85a74718d377195e1efd0843ba4f3260bad4fe07"),
"parents": [_x("01e2d0627a9a6edb24c37db45db5ecb31e9de808")],
"author": {"fullname": b"Linus Torvalds <torvalds@linux-foundation.org>",},
"date": datetime.datetime(2015, 7, 12, 15, 10, 30, tzinfo=linus_tz),
"committer": {
"fullname": b"Linus Torvalds <torvalds@linux-foundation.org>",
},
"committer_date": datetime.datetime(
2015, 7, 12, 15, 10, 30, tzinfo=linus_tz
),
"message": b"Linux 4.2-rc2\n",
- "metadata": {
- "extra_headers": [
- ["svn-repo-uuid", "046f1af7-66c2-d61b-5410-ce57b7db7bff"],
- ["svn-revision", 10],
- ]
- },
+ "extra_headers": (
+ (b"svn-repo-uuid", b"046f1af7-66c2-d61b-5410-ce57b7db7bff"),
+ (b"svn-revision", b"10"),
+ ),
}
def test_revision_identifier(self):
self.assertEqual(
identifiers.revision_identifier(self.revision),
identifiers.identifier_to_str(self.revision["id"]),
)
def test_revision_identifier_none_metadata(self):
self.assertEqual(
identifiers.revision_identifier(self.revision_none_metadata),
identifiers.identifier_to_str(self.revision_none_metadata["id"]),
)
def test_revision_identifier_synthetic(self):
self.assertEqual(
identifiers.revision_identifier(self.synthetic_revision),
identifiers.identifier_to_str(self.synthetic_revision["id"]),
)
def test_revision_identifier_with_extra_headers(self):
self.assertEqual(
identifiers.revision_identifier(self.revision_with_extra_headers),
identifiers.identifier_to_str(self.revision_with_extra_headers["id"]),
)
def test_revision_identifier_with_gpgsig(self):
self.assertEqual(
identifiers.revision_identifier(self.revision_with_gpgsig),
identifiers.identifier_to_str(self.revision_with_gpgsig["id"]),
)
def test_revision_identifier_no_message(self):
self.assertEqual(
identifiers.revision_identifier(self.revision_no_message),
identifiers.identifier_to_str(self.revision_no_message["id"]),
)
def test_revision_identifier_empty_message(self):
self.assertEqual(
identifiers.revision_identifier(self.revision_empty_message),
identifiers.identifier_to_str(self.revision_empty_message["id"]),
)
def test_revision_identifier_only_fullname(self):
self.assertEqual(
identifiers.revision_identifier(self.revision_only_fullname),
identifiers.identifier_to_str(self.revision_only_fullname["id"]),
)
release_example = {
"id": "2b10839e32c4c476e9d94492756bb1a3e1ec4aa8",
"target": b't\x1b"R\xa5\xe1Ml`\xa9\x13\xc7z`\x99\xab\xe7:\x85J',
"target_type": "revision",
"name": b"v2.6.14",
"author": {
"name": b"Linus Torvalds",
"email": b"torvalds@g5.osdl.org",
"fullname": b"Linus Torvalds <torvalds@g5.osdl.org>",
},
"date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz),
"message": b"""\
Linux 2.6.14 release
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.1 (GNU/Linux)
iD8DBQBDYWq6F3YsRnbiHLsRAmaeAJ9RCez0y8rOBbhSv344h86l/VVcugCeIhO1
wdLOnvj91G4wxYqrvThthbE=
=7VeT
-----END PGP SIGNATURE-----
""",
"synthetic": False,
}
class ReleaseIdentifier(unittest.TestCase):
def setUp(self):
linus_tz = datetime.timezone(datetime.timedelta(minutes=-420))
self.release = release_example
self.release_no_author = {
"id": b"&y\x1a\x8b\xcf\x0em3\xf4:\xefv\x82\xbd\xb5U#mV\xde",
"target": "9ee1c939d1cb936b1f98e8d81aeffab57bae46ab",
"target_type": "revision",
"name": b"v2.6.12",
"message": b"""\
This is the final 2.6.12 release
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.2.4 (GNU/Linux)
iD8DBQBCsykyF3YsRnbiHLsRAvPNAJ482tCZwuxp/bJRz7Q98MHlN83TpACdHr37
o6X/3T+vm8K3bf3driRr34c=
=sBHn
-----END PGP SIGNATURE-----
""",
"synthetic": False,
}
self.release_no_message = {
"id": "b6f4f446715f7d9543ef54e41b62982f0db40045",
"target": "9ee1c939d1cb936b1f98e8d81aeffab57bae46ab",
"target_type": "revision",
"name": b"v2.6.12",
"author": {"name": b"Linus Torvalds", "email": b"torvalds@g5.osdl.org",},
"date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz),
"message": None,
}
self.release_empty_message = {
"id": "71a0aea72444d396575dc25ac37fec87ee3c6492",
"target": "9ee1c939d1cb936b1f98e8d81aeffab57bae46ab",
"target_type": "revision",
"name": b"v2.6.12",
"author": {"name": b"Linus Torvalds", "email": b"torvalds@g5.osdl.org",},
"date": datetime.datetime(2005, 10, 27, 17, 2, 33, tzinfo=linus_tz),
"message": b"",
}
self.release_negative_utc = {
"id": "97c8d2573a001f88e72d75f596cf86b12b82fd01",
"name": b"20081029",
"target": "54e9abca4c77421e2921f5f156c9fe4a9f7441c7",
"target_type": "revision",
"date": {
"timestamp": {"seconds": 1225281976},
"offset": 0,
"negative_utc": True,
},
"author": {
"name": b"Otavio Salvador",
"email": b"otavio@debian.org",
"id": 17640,
},
"synthetic": False,
"message": b"tagging version 20081029\n\nr56558\n",
}
self.release_newline_in_author = {
"author": {
"email": b"esycat@gmail.com",
"fullname": b"Eugene Janusov\n<esycat@gmail.com>",
"name": b"Eugene Janusov\n",
},
"date": {
"negative_utc": None,
"offset": 600,
"timestamp": {"microseconds": 0, "seconds": 1377480558,},
},
"id": b"\\\x98\xf5Y\xd04\x16-\xe2->\xbe\xb9T3\xe6\xf8\x88R1",
"message": b"Release of v0.3.2.",
"name": b"0.3.2",
"synthetic": False,
"target": (b"\xc0j\xa3\xd9;x\xa2\x86\\I5\x17" b"\x000\xf8\xc2\xd79o\xd3"),
"target_type": "revision",
}
self.release_snapshot_target = dict(self.release)
self.release_snapshot_target["target_type"] = "snapshot"
self.release_snapshot_target["id"] = "c29c3ddcc6769a04e54dd69d63a6fdcbc566f850"
def test_release_identifier(self):
self.assertEqual(
identifiers.release_identifier(self.release),
identifiers.identifier_to_str(self.release["id"]),
)
def test_release_identifier_no_author(self):
self.assertEqual(
identifiers.release_identifier(self.release_no_author),
identifiers.identifier_to_str(self.release_no_author["id"]),
)
def test_release_identifier_no_message(self):
self.assertEqual(
identifiers.release_identifier(self.release_no_message),
identifiers.identifier_to_str(self.release_no_message["id"]),
)
def test_release_identifier_empty_message(self):
self.assertEqual(
identifiers.release_identifier(self.release_empty_message),
identifiers.identifier_to_str(self.release_empty_message["id"]),
)
def test_release_identifier_negative_utc(self):
self.assertEqual(
identifiers.release_identifier(self.release_negative_utc),
identifiers.identifier_to_str(self.release_negative_utc["id"]),
)
def test_release_identifier_newline_in_author(self):
self.assertEqual(
identifiers.release_identifier(self.release_newline_in_author),
identifiers.identifier_to_str(self.release_newline_in_author["id"]),
)
def test_release_identifier_snapshot_target(self):
self.assertEqual(
identifiers.release_identifier(self.release_snapshot_target),
identifiers.identifier_to_str(self.release_snapshot_target["id"]),
)
snapshot_example = {
"id": _x("6e65b86363953b780d92b0a928f3e8fcdd10db36"),
"branches": {
b"directory": {
"target": _x("1bd0e65f7d2ff14ae994de17a1e7fe65111dcad8"),
"target_type": "directory",
},
b"content": {
"target": _x("fe95a46679d128ff167b7c55df5d02356c5a1ae1"),
"target_type": "content",
},
b"alias": {"target": b"revision", "target_type": "alias",},
b"revision": {
"target": _x("aafb16d69fd30ff58afdd69036a26047f3aebdc6"),
"target_type": "revision",
},
b"release": {
"target": _x("7045404f3d1c54e6473c71bbb716529fbad4be24"),
"target_type": "release",
},
b"snapshot": {
"target": _x("1a8893e6a86f444e8be8e7bda6cb34fb1735a00e"),
"target_type": "snapshot",
},
b"dangling": None,
},
}
class SnapshotIdentifier(unittest.TestCase):
def setUp(self):
super().setUp()
self.empty = {
"id": "1a8893e6a86f444e8be8e7bda6cb34fb1735a00e",
"branches": {},
}
self.dangling_branch = {
"id": "c84502e821eb21ed84e9fd3ec40973abc8b32353",
"branches": {b"HEAD": None,},
}
self.unresolved = {
"id": "84b4548ea486e4b0a7933fa541ff1503a0afe1e0",
"branches": {b"foo": {"target": b"bar", "target_type": "alias",},},
}
self.all_types = snapshot_example
def test_empty_snapshot(self):
self.assertEqual(
identifiers.snapshot_identifier(self.empty),
identifiers.identifier_to_str(self.empty["id"]),
)
def test_dangling_branch(self):
self.assertEqual(
identifiers.snapshot_identifier(self.dangling_branch),
identifiers.identifier_to_str(self.dangling_branch["id"]),
)
def test_unresolved(self):
with self.assertRaisesRegex(ValueError, "b'foo' -> b'bar'"):
identifiers.snapshot_identifier(self.unresolved)
def test_unresolved_force(self):
self.assertEqual(
identifiers.snapshot_identifier(self.unresolved, ignore_unresolved=True,),
identifiers.identifier_to_str(self.unresolved["id"]),
)
def test_all_types(self):
self.assertEqual(
identifiers.snapshot_identifier(self.all_types),
identifiers.identifier_to_str(self.all_types["id"]),
)
def test_swhid(self):
_snapshot_id = _x("c7c108084bc0bf3d81436bf980b46e98bd338453")
_release_id = "22ece559cc7cc2364edc5e5593d63ae8bd229f9f"
_revision_id = "309cf2674ee7a0749978cf8265ab91a60aea0f7d"
_directory_id = "d198bc9d7a6bcf6db04f476d29314f157507d505"
_content_id = "94a9ed024d3859793618152ea559a168bbcbb5e2"
_snapshot = {"id": _snapshot_id}
_release = {"id": _release_id}
_revision = {"id": _revision_id}
_directory = {"id": _directory_id}
_content = {"sha1_git": _content_id}
for full_type, _hash, expected_swhid, version, _meta in [
(
SNAPSHOT,
_snapshot_id,
"swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453",
None,
{},
),
(
RELEASE,
_release_id,
"swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f",
1,
{},
),
(
REVISION,
_revision_id,
"swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d",
None,
{},
),
(
DIRECTORY,
_directory_id,
"swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505",
None,
{},
),
(
CONTENT,
_content_id,
"swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2",
1,
{},
),
(
SNAPSHOT,
_snapshot,
"swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453",
None,
{},
),
(
RELEASE,
_release,
"swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f",
1,
{},
),
(
REVISION,
_revision,
"swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d",
None,
{},
),
(
DIRECTORY,
_directory,
"swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505",
None,
{},
),
(
CONTENT,
_content,
"swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2",
1,
{},
),
(
CONTENT,
_content,
"swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2;origin=1",
1,
{"origin": "1"},
),
]:
if version:
actual_value = identifiers.swhid(
full_type, _hash, version, metadata=_meta
)
else:
actual_value = identifiers.swhid(full_type, _hash, metadata=_meta)
self.assertEqual(actual_value, expected_swhid)
def test_swhid_wrong_input(self):
_snapshot_id = "notahash4bc0bf3d81436bf980b46e98bd338453"
_snapshot = {"id": _snapshot_id}
for _type, _hash in [
(SNAPSHOT, _snapshot_id),
(SNAPSHOT, _snapshot),
("foo", ""),
]:
with self.assertRaises(ValidationError):
identifiers.swhid(_type, _hash)
def test_parse_swhid(self):
for swhid, _type, _version, _hash in [
(
"swh:1:cnt:94a9ed024d3859793618152ea559a168bbcbb5e2",
CONTENT,
1,
"94a9ed024d3859793618152ea559a168bbcbb5e2",
),
(
"swh:1:dir:d198bc9d7a6bcf6db04f476d29314f157507d505",
DIRECTORY,
1,
"d198bc9d7a6bcf6db04f476d29314f157507d505",
),
(
"swh:1:rev:309cf2674ee7a0749978cf8265ab91a60aea0f7d",
REVISION,
1,
"309cf2674ee7a0749978cf8265ab91a60aea0f7d",
),
(
"swh:1:rel:22ece559cc7cc2364edc5e5593d63ae8bd229f9f",
RELEASE,
1,
"22ece559cc7cc2364edc5e5593d63ae8bd229f9f",
),
(
"swh:1:snp:c7c108084bc0bf3d81436bf980b46e98bd338453",
SNAPSHOT,
1,
"c7c108084bc0bf3d81436bf980b46e98bd338453",
),
]:
expected_result = SWHID(
namespace="swh",
scheme_version=_version,
object_type=_type,
object_id=_hash,
metadata={},
)
actual_result = identifiers.parse_swhid(swhid)
self.assertEqual(actual_result, expected_result)
for swhid, _type, _version, _hash, _metadata in [
(
"swh:1:cnt:9c95815d9e9d91b8dae8e05d8bbc696fe19f796b;lines=1-18;origin=https://github.com/python/cpython", # noqa
CONTENT,
1,
"9c95815d9e9d91b8dae8e05d8bbc696fe19f796b",
{"lines": "1-18", "origin": "https://github.com/python/cpython"},
),
(
"swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;origin=deb://Debian/packages/linuxdoc-tools", # noqa
DIRECTORY,
1,
"0b6959356d30f1a4e9b7f6bca59b9a336464c03d",
{"origin": "deb://Debian/packages/linuxdoc-tools"},
),
]:
expected_result = SWHID(
namespace="swh",
scheme_version=_version,
object_type=_type,
object_id=_hash,
metadata=_metadata,
)
actual_result = identifiers.parse_swhid(swhid)
self.assertEqual(actual_result, expected_result)
def test_parse_swhid_parsing_error(self):
for swhid in [
("swh:1:cnt"),
("swh:1:"),
("swh:"),
("swh:1:cnt:"),
("foo:1:cnt:abc8bc9d7a6bcf6db04f476d29314f157507d505"),
("swh:2:dir:def8bc9d7a6bcf6db04f476d29314f157507d505"),
("swh:1:foo:fed8bc9d7a6bcf6db04f476d29314f157507d505"),
("swh:1:dir:0b6959356d30f1a4e9b7f6bca59b9a336464c03d;invalid;" "malformed"),
("swh:1:snp:gh6959356d30f1a4e9b7f6bca59b9a336464c03d"),
("swh:1:snp:foo"),
]:
with self.assertRaises(ValidationError):
identifiers.parse_swhid(swhid)
def test_persistentid_class_validation_error(self):
for _ns, _version, _type, _id in [
("foo", 1, CONTENT, "abc8bc9d7a6bcf6db04f476d29314f157507d505"),
("swh", 2, DIRECTORY, "def8bc9d7a6bcf6db04f476d29314f157507d505"),
("swh", 1, "foo", "fed8bc9d7a6bcf6db04f476d29314f157507d505"),
("swh", 1, SNAPSHOT, "gh6959356d30f1a4e9b7f6bca59b9a336464c03d"),
]:
with self.assertRaises(ValidationError):
SWHID(
namespace=_ns,
scheme_version=_version,
object_type=_type,
object_id=_id,
)
class OriginIdentifier(unittest.TestCase):
def setUp(self):
self.origin = {
"url": "https://github.com/torvalds/linux",
}
def test_content_identifier(self):
self.assertEqual(
identifiers.origin_identifier(self.origin),
"b63a575fe3faab7692c9f38fb09d4bb45651bb0f",
)
TS_DICTS = [
(
{"timestamp": 12345, "offset": 0},
{
"timestamp": {"seconds": 12345, "microseconds": 0},
"offset": 0,
"negative_utc": False,
},
),
(
{"timestamp": 12345, "offset": 0, "negative_utc": False},
{
"timestamp": {"seconds": 12345, "microseconds": 0},
"offset": 0,
"negative_utc": False,
},
),
(
{"timestamp": 12345, "offset": 0, "negative_utc": False},
{
"timestamp": {"seconds": 12345, "microseconds": 0},
"offset": 0,
"negative_utc": False,
},
),
(
{"timestamp": 12345, "offset": 0, "negative_utc": None},
{
"timestamp": {"seconds": 12345, "microseconds": 0},
"offset": 0,
"negative_utc": False,
},
),
(
{"timestamp": {"seconds": 12345}, "offset": 0, "negative_utc": None},
{
"timestamp": {"seconds": 12345, "microseconds": 0},
"offset": 0,
"negative_utc": False,
},
),
(
{
"timestamp": {"seconds": 12345, "microseconds": 0},
"offset": 0,
"negative_utc": None,
},
{
"timestamp": {"seconds": 12345, "microseconds": 0},
"offset": 0,
"negative_utc": False,
},
),
(
{
"timestamp": {"seconds": 12345, "microseconds": 100},
"offset": 0,
"negative_utc": None,
},
{
"timestamp": {"seconds": 12345, "microseconds": 100},
"offset": 0,
"negative_utc": False,
},
),
(
{"timestamp": 12345, "offset": 0, "negative_utc": True},
{
"timestamp": {"seconds": 12345, "microseconds": 0},
"offset": 0,
"negative_utc": True,
},
),
(
{"timestamp": 12345, "offset": 0, "negative_utc": None},
{
"timestamp": {"seconds": 12345, "microseconds": 0},
"offset": 0,
"negative_utc": False,
},
),
]
@pytest.mark.parametrize("dict_input,expected", TS_DICTS)
def test_normalize_timestamp_dict(dict_input, expected):
assert normalize_timestamp(dict_input) == expected
TS_DICTS_INVALID_TIMESTAMP = [
{"timestamp": 1.2, "offset": 0},
{"timestamp": "1", "offset": 0},
# these below should really also trigger a ValueError...
# {"timestamp": {"seconds": "1"}, "offset": 0},
# {"timestamp": {"seconds": 1.2}, "offset": 0},
# {"timestamp": {"seconds": 1.2}, "offset": 0},
]
@pytest.mark.parametrize("dict_input", TS_DICTS_INVALID_TIMESTAMP)
def test_normalize_timestamp_dict_invalid_timestamp(dict_input):
with pytest.raises(ValueError, match="non-integer timestamp"):
normalize_timestamp(dict_input)
diff --git a/swh/model/tests/test_model.py b/swh/model/tests/test_model.py
index df94924..edfc829 100644
--- a/swh/model/tests/test_model.py
+++ b/swh/model/tests/test_model.py
@@ -1,492 +1,680 @@
# Copyright (C) 2019-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
import copy
import datetime
import attr
from attrs_strict import AttributeTypeError
from hypothesis import given
from hypothesis.strategies import binary
import pytest
from swh.model.model import (
BaseModel,
Content,
SkippedContent,
Directory,
Revision,
Release,
Snapshot,
Origin,
Timestamp,
TimestampWithTimezone,
MissingData,
Person,
)
from swh.model.hashutil import hash_to_bytes, MultiHash
import swh.model.hypothesis_strategies as strategies
from swh.model.identifiers import (
directory_identifier,
revision_identifier,
release_identifier,
snapshot_identifier,
)
from swh.model.tests.test_identifiers import (
directory_example,
revision_example,
release_example,
snapshot_example,
)
@given(strategies.objects())
def test_todict_inverse_fromdict(objtype_and_obj):
(obj_type, obj) = objtype_and_obj
if obj_type in ("origin", "origin_visit"):
return
obj_as_dict = obj.to_dict()
obj_as_dict_copy = copy.deepcopy(obj_as_dict)
# Check the composition of to_dict and from_dict is the identity
assert obj == type(obj).from_dict(obj_as_dict)
# Check from_dict() does not change the input dict
assert obj_as_dict == obj_as_dict_copy
# Check the composition of from_dict and to_dict is the identity
assert obj_as_dict == type(obj).from_dict(obj_as_dict).to_dict()
# Anonymization
@given(strategies.objects())
def test_anonymization(objtype_and_obj):
(obj_type, obj) = objtype_and_obj
def check_person(p):
if p is not None:
assert p.name is None
assert p.email is None
assert len(p.fullname) == 32
anon_obj = obj.anonymize()
if obj_type == "person":
assert anon_obj is not None
check_person(anon_obj)
elif obj_type == "release":
assert anon_obj is not None
check_person(anon_obj.author)
elif obj_type == "revision":
assert anon_obj is not None
check_person(anon_obj.author)
check_person(anon_obj.committer)
else:
assert anon_obj is None
# Origin, OriginVisit
@given(strategies.origins())
def test_todict_origins(origin):
obj = origin.to_dict()
assert "type" not in obj
assert type(origin)(url=origin.url) == type(origin).from_dict(obj)
@given(strategies.origin_visits())
def test_todict_origin_visits(origin_visit):
obj = origin_visit.to_dict()
assert origin_visit == type(origin_visit).from_dict(obj)
@given(strategies.origin_visit_statuses())
def test_todict_origin_visit_statuses(origin_visit_status):
obj = origin_visit_status.to_dict()
assert origin_visit_status == type(origin_visit_status).from_dict(obj)
# Timestamp
@given(strategies.timestamps())
def test_timestamps_strategy(timestamp):
attr.validate(timestamp)
def test_timestamp_seconds():
attr.validate(Timestamp(seconds=0, microseconds=0))
with pytest.raises(AttributeTypeError):
Timestamp(seconds="0", microseconds=0)
attr.validate(Timestamp(seconds=2 ** 63 - 1, microseconds=0))
with pytest.raises(ValueError):
Timestamp(seconds=2 ** 63, microseconds=0)
attr.validate(Timestamp(seconds=-(2 ** 63), microseconds=0))
with pytest.raises(ValueError):
Timestamp(seconds=-(2 ** 63) - 1, microseconds=0)
def test_timestamp_microseconds():
attr.validate(Timestamp(seconds=0, microseconds=0))
with pytest.raises(AttributeTypeError):
Timestamp(seconds=0, microseconds="0")
attr.validate(Timestamp(seconds=0, microseconds=10 ** 6 - 1))
with pytest.raises(ValueError):
Timestamp(seconds=0, microseconds=10 ** 6)
with pytest.raises(ValueError):
Timestamp(seconds=0, microseconds=-1)
def test_timestamp_from_dict():
assert Timestamp.from_dict({"seconds": 10, "microseconds": 5})
with pytest.raises(AttributeTypeError):
Timestamp.from_dict({"seconds": "10", "microseconds": 5})
with pytest.raises(AttributeTypeError):
Timestamp.from_dict({"seconds": 10, "microseconds": "5"})
with pytest.raises(ValueError):
Timestamp.from_dict({"seconds": 0, "microseconds": -1})
Timestamp.from_dict({"seconds": 0, "microseconds": 10 ** 6 - 1})
with pytest.raises(ValueError):
Timestamp.from_dict({"seconds": 0, "microseconds": 10 ** 6})
# TimestampWithTimezone
def test_timestampwithtimezone():
ts = Timestamp(seconds=0, microseconds=0)
tstz = TimestampWithTimezone(timestamp=ts, offset=0, negative_utc=False)
attr.validate(tstz)
assert tstz.negative_utc is False
attr.validate(TimestampWithTimezone(timestamp=ts, offset=10, negative_utc=False))
attr.validate(TimestampWithTimezone(timestamp=ts, offset=-10, negative_utc=False))
tstz = TimestampWithTimezone(timestamp=ts, offset=0, negative_utc=True)
attr.validate(tstz)
assert tstz.negative_utc is True
with pytest.raises(AttributeTypeError):
TimestampWithTimezone(
timestamp=datetime.datetime.now(), offset=0, negative_utc=False
)
with pytest.raises(AttributeTypeError):
TimestampWithTimezone(timestamp=ts, offset="0", negative_utc=False)
with pytest.raises(AttributeTypeError):
TimestampWithTimezone(timestamp=ts, offset=1.0, negative_utc=False)
with pytest.raises(AttributeTypeError):
TimestampWithTimezone(timestamp=ts, offset=1, negative_utc=0)
with pytest.raises(ValueError):
TimestampWithTimezone(timestamp=ts, offset=1, negative_utc=True)
with pytest.raises(ValueError):
TimestampWithTimezone(timestamp=ts, offset=-1, negative_utc=True)
def test_timestampwithtimezone_from_datetime():
tz = datetime.timezone(datetime.timedelta(minutes=+60))
date = datetime.datetime(2020, 2, 27, 14, 39, 19, tzinfo=tz)
tstz = TimestampWithTimezone.from_datetime(date)
assert tstz == TimestampWithTimezone(
timestamp=Timestamp(seconds=1582810759, microseconds=0,),
offset=60,
negative_utc=False,
)
def test_timestampwithtimezone_from_iso8601():
date = "2020-02-27 14:39:19.123456+0100"
tstz = TimestampWithTimezone.from_iso8601(date)
assert tstz == TimestampWithTimezone(
timestamp=Timestamp(seconds=1582810759, microseconds=123456,),
offset=60,
negative_utc=False,
)
def test_timestampwithtimezone_from_iso8601_negative_utc():
date = "2020-02-27 13:39:19-0000"
tstz = TimestampWithTimezone.from_iso8601(date)
assert tstz == TimestampWithTimezone(
timestamp=Timestamp(seconds=1582810759, microseconds=0,),
offset=0,
negative_utc=True,
)
def test_person_from_fullname():
"""The author should have name, email and fullname filled.
"""
actual_person = Person.from_fullname(b"tony <ynot@dagobah>")
assert actual_person == Person(
fullname=b"tony <ynot@dagobah>", name=b"tony", email=b"ynot@dagobah",
)
def test_person_from_fullname_no_email():
"""The author and fullname should be the same as the input (author).
"""
actual_person = Person.from_fullname(b"tony")
assert actual_person == Person(fullname=b"tony", name=b"tony", email=None,)
def test_person_from_fullname_empty_person():
"""Empty person has only its fullname filled with the empty
byte-string.
"""
actual_person = Person.from_fullname(b"")
assert actual_person == Person(fullname=b"", name=None, email=None,)
def test_git_author_line_to_author():
# edge case out of the way
with pytest.raises(TypeError):
Person.from_fullname(None)
tests = {
b"a <b@c.com>": Person(name=b"a", email=b"b@c.com", fullname=b"a <b@c.com>",),
b"<foo@bar.com>": Person(
name=None, email=b"foo@bar.com", fullname=b"<foo@bar.com>",
),
b"malformed <email": Person(
name=b"malformed", email=b"email", fullname=b"malformed <email"
),
b'malformed <"<br"@ckets>': Person(
name=b"malformed",
email=b'"<br"@ckets',
fullname=b'malformed <"<br"@ckets>',
),
b"trailing <sp@c.e> ": Person(
name=b"trailing", email=b"sp@c.e", fullname=b"trailing <sp@c.e> ",
),
b"no<sp@c.e>": Person(name=b"no", email=b"sp@c.e", fullname=b"no<sp@c.e>",),
b" more <sp@c.es>": Person(
name=b"more", email=b"sp@c.es", fullname=b" more <sp@c.es>",
),
b" <>": Person(name=None, email=None, fullname=b" <>",),
}
for person in sorted(tests):
expected_person = tests[person]
assert expected_person == Person.from_fullname(person)
# Content
def test_content_get_hash():
hashes = dict(sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux")
c = Content(length=42, status="visible", **hashes)
for (hash_name, hash_) in hashes.items():
assert c.get_hash(hash_name) == hash_
def test_content_hashes():
hashes = dict(sha1=b"foo", sha1_git=b"bar", sha256=b"baz", blake2s256=b"qux")
c = Content(length=42, status="visible", **hashes)
assert c.hashes() == hashes
def test_content_data():
c = Content(
length=42,
status="visible",
data=b"foo",
sha1=b"foo",
sha1_git=b"bar",
sha256=b"baz",
blake2s256=b"qux",
)
assert c.with_data() == c
def test_content_data_missing():
c = Content(
length=42,
status="visible",
sha1=b"foo",
sha1_git=b"bar",
sha256=b"baz",
blake2s256=b"qux",
)
with pytest.raises(MissingData):
c.with_data()
@given(strategies.present_contents_d())
def test_content_from_dict(content_d):
c = Content.from_data(**content_d)
assert c
assert c.ctime == content_d["ctime"]
content_d2 = c.to_dict()
c2 = Content.from_dict(content_d2)
assert c2.ctime == c.ctime
def test_content_from_dict_str_ctime():
# test with ctime as a string
n = datetime.datetime(2020, 5, 6, 12, 34)
content_d = {
"ctime": n.isoformat(),
"data": b"",
"length": 0,
"sha1": b"\x00",
"sha256": b"\x00",
"sha1_git": b"\x00",
"blake2s256": b"\x00",
}
c = Content.from_dict(content_d)
assert c.ctime == n
@given(binary(max_size=4096))
def test_content_from_data(data):
c = Content.from_data(data)
assert c.data == data
assert c.length == len(data)
assert c.status == "visible"
for key, value in MultiHash.from_data(data).digest().items():
assert getattr(c, key) == value
@given(binary(max_size=4096))
def test_hidden_content_from_data(data):
c = Content.from_data(data, status="hidden")
assert c.data == data
assert c.length == len(data)
assert c.status == "hidden"
for key, value in MultiHash.from_data(data).digest().items():
assert getattr(c, key) == value
# SkippedContent
@given(binary(max_size=4096))
def test_skipped_content_from_data(data):
c = SkippedContent.from_data(data, reason="reason")
assert c.reason == "reason"
assert c.length == len(data)
assert c.status == "absent"
for key, value in MultiHash.from_data(data).digest().items():
assert getattr(c, key) == value
@given(strategies.skipped_contents_d())
def test_skipped_content_origin_is_str(skipped_content_d):
assert SkippedContent.from_dict(skipped_content_d)
skipped_content_d["origin"] = "http://path/to/origin"
assert SkippedContent.from_dict(skipped_content_d)
skipped_content_d["origin"] = Origin(url="http://path/to/origin")
with pytest.raises(ValueError, match="origin"):
SkippedContent.from_dict(skipped_content_d)
+# Revision
+
+
+def test_revision_extra_headers_no_headers():
+ rev_dict = revision_example.copy()
+ rev_dict.pop("id")
+ rev = Revision.from_dict(rev_dict)
+ rev_dict = attr.asdict(rev, recurse=False)
+
+ rev_model = Revision(**rev_dict)
+ assert rev_model.metadata is None
+ assert rev_model.extra_headers == ()
+
+ rev_dict["metadata"] = {
+ "something": "somewhere",
+ "some other thing": "stranger",
+ }
+ rev_model = Revision(**rev_dict)
+ assert rev_model.metadata == rev_dict["metadata"]
+ assert rev_model.extra_headers == ()
+
+
+def test_revision_extra_headers_with_headers():
+ rev_dict = revision_example.copy()
+ rev_dict.pop("id")
+ rev = Revision.from_dict(rev_dict)
+ rev_dict = attr.asdict(rev, recurse=False)
+ rev_dict["metadata"] = {
+ "something": "somewhere",
+ "some other thing": "stranger",
+ }
+ extra_headers = (
+ (b"header1", b"value1"),
+ (b"header2", b"42"),
+ (b"header3", b"should I?\u0000"),
+ (b"header1", b"again"),
+ )
+
+ rev_dict["extra_headers"] = extra_headers
+ rev_model = Revision(**rev_dict)
+ assert "extra_headers" not in rev_model.metadata
+ assert rev_model.extra_headers == extra_headers
+
+
+def test_revision_extra_headers_in_metadata():
+ rev_dict = revision_example.copy()
+ rev_dict.pop("id")
+ rev = Revision.from_dict(rev_dict)
+ rev_dict = attr.asdict(rev, recurse=False)
+ rev_dict["metadata"] = {
+ "something": "somewhere",
+ "some other thing": "stranger",
+ }
+
+ extra_headers = (
+ (b"header1", b"value1"),
+ (b"header2", b"42"),
+ (b"header3", b"should I?\u0000"),
+ (b"header1", b"again"),
+ )
+
+ # check the bw-compat init hook does the job
+ # ie. extra_headers are given in the metadata field
+ rev_dict["metadata"]["extra_headers"] = extra_headers
+ rev_model = Revision(**rev_dict)
+ assert "extra_headers" not in rev_model.metadata
+ assert rev_model.extra_headers == extra_headers
+
+
+def test_revision_extra_headers_as_lists():
+ rev_dict = revision_example.copy()
+ rev_dict.pop("id")
+ rev = Revision.from_dict(rev_dict)
+ rev_dict = attr.asdict(rev, recurse=False)
+ rev_dict["metadata"] = {}
+
+ extra_headers = (
+ (b"header1", b"value1"),
+ (b"header2", b"42"),
+ (b"header3", b"should I?\u0000"),
+ (b"header1", b"again"),
+ )
+
+ # check Revision.extra_headers tuplify does the job
+ rev_dict["extra_headers"] = [list(x) for x in extra_headers]
+ rev_model = Revision(**rev_dict)
+ assert "extra_headers" not in rev_model.metadata
+ assert rev_model.extra_headers == extra_headers
+
+
+def test_revision_extra_headers_type_error():
+ rev_dict = revision_example.copy()
+ rev_dict.pop("id")
+ rev = Revision.from_dict(rev_dict)
+ orig_rev_dict = attr.asdict(rev, recurse=False)
+ orig_rev_dict["metadata"] = {
+ "something": "somewhere",
+ "some other thing": "stranger",
+ }
+ extra_headers = (
+ ("header1", b"value1"),
+ (b"header2", 42),
+ ("header1", "again"),
+ )
+ # check headers one at a time
+ # if given as extra_header
+ for extra_header in extra_headers:
+ rev_dict = copy.deepcopy(orig_rev_dict)
+ rev_dict["extra_headers"] = (extra_header,)
+ with pytest.raises(AttributeTypeError):
+ Revision(**rev_dict)
+ # if given as metadata
+ for extra_header in extra_headers:
+ rev_dict = copy.deepcopy(orig_rev_dict)
+ rev_dict["metadata"]["extra_headers"] = (extra_header,)
+ with pytest.raises(AttributeTypeError):
+ Revision(**rev_dict)
+
+
+def test_revision_extra_headers_from_dict():
+ rev_dict = revision_example.copy()
+ rev_dict.pop("id")
+ rev_model = Revision.from_dict(rev_dict)
+ assert rev_model.metadata is None
+ assert rev_model.extra_headers == ()
+
+ rev_dict["metadata"] = {
+ "something": "somewhere",
+ "some other thing": "stranger",
+ }
+ rev_model = Revision.from_dict(rev_dict)
+ assert rev_model.metadata == rev_dict["metadata"]
+ assert rev_model.extra_headers == ()
+
+ extra_headers = (
+ (b"header1", b"value1"),
+ (b"header2", b"42"),
+ (b"header3", b"should I?\nmaybe\x00\xff"),
+ (b"header1", b"again"),
+ )
+ rev_dict["extra_headers"] = extra_headers
+ rev_model = Revision.from_dict(rev_dict)
+ assert "extra_headers" not in rev_model.metadata
+ assert rev_model.extra_headers == extra_headers
+
+
+def test_revision_extra_headers_in_metadata_from_dict():
+ rev_dict = revision_example.copy()
+ rev_dict.pop("id")
+
+ rev_dict["metadata"] = {
+ "something": "somewhere",
+ "some other thing": "stranger",
+ }
+ extra_headers = (
+ (b"header1", b"value1"),
+ (b"header2", b"42"),
+ (b"header3", b"should I?\nmaybe\x00\xff"),
+ (b"header1", b"again"),
+ )
+ # check the bw-compat init hook does the job
+ rev_dict["metadata"]["extra_headers"] = extra_headers
+ rev_model = Revision.from_dict(rev_dict)
+ assert "extra_headers" not in rev_model.metadata
+ assert rev_model.extra_headers == extra_headers
+
+
+def test_revision_extra_headers_as_lists_from_dict():
+ rev_dict = revision_example.copy()
+ rev_dict.pop("id")
+ rev_model = Revision.from_dict(rev_dict)
+ rev_dict["metadata"] = {
+ "something": "somewhere",
+ "some other thing": "stranger",
+ }
+ extra_headers = (
+ (b"header1", b"value1"),
+ (b"header2", b"42"),
+ (b"header3", b"should I?\nmaybe\x00\xff"),
+ (b"header1", b"again"),
+ )
+ # check Revision.extra_headers converter does the job
+ rev_dict["extra_headers"] = [list(x) for x in extra_headers]
+ rev_model = Revision.from_dict(rev_dict)
+ assert "extra_headers" not in rev_model.metadata
+ assert rev_model.extra_headers == extra_headers
+
+
# ID computation
def test_directory_model_id_computation():
dir_dict = directory_example.copy()
del dir_dict["id"]
dir_id = hash_to_bytes(directory_identifier(dir_dict))
dir_model = Directory.from_dict(dir_dict)
assert dir_model.id == dir_id
def test_revision_model_id_computation():
rev_dict = revision_example.copy()
del rev_dict["id"]
rev_id = hash_to_bytes(revision_identifier(rev_dict))
rev_model = Revision.from_dict(rev_dict)
assert rev_model.id == rev_id
def test_revision_model_id_computation_with_no_date():
"""We can have revision with date to None
"""
rev_dict = revision_example.copy()
rev_dict["date"] = None
rev_dict["committer_date"] = None
del rev_dict["id"]
rev_id = hash_to_bytes(revision_identifier(rev_dict))
rev_model = Revision.from_dict(rev_dict)
assert rev_model.date is None
assert rev_model.committer_date is None
assert rev_model.id == rev_id
def test_release_model_id_computation():
rel_dict = release_example.copy()
del rel_dict["id"]
rel_id = hash_to_bytes(release_identifier(rel_dict))
rel_model = Release.from_dict(rel_dict)
assert isinstance(rel_model.date, TimestampWithTimezone)
assert rel_model.id == hash_to_bytes(rel_id)
def test_snapshot_model_id_computation():
snp_dict = snapshot_example.copy()
del snp_dict["id"]
snp_id = hash_to_bytes(snapshot_identifier(snp_dict))
snp_model = Snapshot.from_dict(snp_dict)
assert snp_model.id == snp_id
@given(strategies.objects(split_content=True))
def test_object_type(objtype_and_obj):
obj_type, obj = objtype_and_obj
assert obj_type == obj.object_type
def test_object_type_is_final():
object_types = set()
def check_final(cls):
if hasattr(cls, "object_type"):
assert cls.object_type not in object_types
object_types.add(cls.object_type)
if cls.__subclasses__():
assert not hasattr(cls, "object_type")
for subcls in cls.__subclasses__():
check_final(subcls)
check_final(BaseModel)
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Fri, Jul 4, 1:18 PM (6 d, 21 h ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3290687
Attached To
rDMOD Data model
Event Timeline
Log In to Comment