Changeset View
Standalone View
swh/lister/maven/lister.py
- This file was added.
# Copyright (C) 2021 The Software Heritage developers | ||||||||||||||||||
# See the AUTHORS file at the top-level directory of this distribution | ||||||||||||||||||
# License: GNU General Public License version 3, or any later version | ||||||||||||||||||
# See top-level LICENSE file for more information | ||||||||||||||||||
from collections import defaultdict | ||||||||||||||||||
import logging | ||||||||||||||||||
from os import remove | ||||||||||||||||||
import re | ||||||||||||||||||
from tempfile import NamedTemporaryFile | ||||||||||||||||||
from typing import Any, Dict, Iterator, Optional | ||||||||||||||||||
from urllib.parse import urljoin | ||||||||||||||||||
import requests | ||||||||||||||||||
from tenacity.before_sleep import before_sleep_log | ||||||||||||||||||
from urllib3.util import parse_url | ||||||||||||||||||
import xmltodict | ||||||||||||||||||
from swh.lister.utils import throttling_retry | ||||||||||||||||||
from swh.scheduler.interface import SchedulerInterface | ||||||||||||||||||
from swh.scheduler.model import ListedOrigin | ||||||||||||||||||
from .. import USER_AGENT | ||||||||||||||||||
from ..pattern import CredentialsType, StatelessLister | ||||||||||||||||||
logger = logging.getLogger(__name__) | ||||||||||||||||||
RepoPage = Dict[str, Any] | ||||||||||||||||||
class MavenLister(StatelessLister[RepoPage]): | ||||||||||||||||||
"""List origins from a Maven repository. | ||||||||||||||||||
Maven Central provides artifacts for Java builds. | ||||||||||||||||||
It includes POM files and source archives, which we download to get | ||||||||||||||||||
the source code of artifacts and links to their scm repository. | ||||||||||||||||||
This lister yields origins of types: git/svn/hg or whatever the Artifacts | ||||||||||||||||||
use as repository type, plus maven types for the maven loader (tgz, jar).""" | ||||||||||||||||||
LISTER_NAME = "maven" | ||||||||||||||||||
def __init__( | ||||||||||||||||||
self, | ||||||||||||||||||
scheduler: SchedulerInterface, | ||||||||||||||||||
url: str, | ||||||||||||||||||
index_url: str = None, | ||||||||||||||||||
instance: Optional[str] = None, | ||||||||||||||||||
credentials: CredentialsType = None, | ||||||||||||||||||
): | ||||||||||||||||||
"""Lister class for Maven repositories. | ||||||||||||||||||
Args: | ||||||||||||||||||
url: main URL of the Maven repository, i.e. url of the base index | ||||||||||||||||||
used to fetch maven artifacts. For Maven central use | ||||||||||||||||||
https://repo1.maven.org/maven2/ | ||||||||||||||||||
index_url: the URL to download the exported text indexes from. | ||||||||||||||||||
Would typically be a local host running the export docker image. | ||||||||||||||||||
douardda: Why is docker involved here? I see no other mention of a docker stuff anywhere in this diff.
I… | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsIt's been a long discussion held back in June on IRC and in task T1724. In a nutshell, we need two tools to transform the maven indexes into something readable: maven-indexer-cli and clue. Rather than have a virtual machine (there is no way to run java code in python without one) it was requested that the tools be put in a docker container. End of August, the docker image was ready and olasd asked me (08-25) to put it on a separate server so the lister would simply have to query it on the network. so: index_url is the name (or IP address) of this local server that hosts the docker image. borisbaldassari: It's been a long discussion held back in June on IRC and in task T1724. In a nutshell, we need… | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline Actions
so: index_url is the name (or IP address) of this local server that hosts the docker image *and* the exported indexes to be downloaded. borisbaldassari: > so: index_url is the name (or IP address) of this local server that hosts the docker image. | ||||||||||||||||||
douarddaUnsubmitted Done Inline ActionsOk, so please document all this some where (in this diff). The README file shoudl give some high level explanations, and this docstring should refer to this former doc. Also, where is documented/specified this index file format? It should be somewhere. douardda: Ok, so please document all this some where (in this diff). The README file shoudl give some… | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsAdded a README.md (in this arc diff) and added a link to the readme in the f-string (yet to commit). borisbaldassari: Added a README.md (in this arc diff) and added a link to the readme in the f-string (yet to… | ||||||||||||||||||
instance: Name of maven instance. Defaults to url's network location | ||||||||||||||||||
if unset. | ||||||||||||||||||
""" | ||||||||||||||||||
self.BASE_URL = url | ||||||||||||||||||
self.INDEX_URL = index_url | ||||||||||||||||||
if instance is None: | ||||||||||||||||||
instance = parse_url(url).host | ||||||||||||||||||
super().__init__( | ||||||||||||||||||
scheduler=scheduler, credentials=credentials, url=url, instance=instance, | ||||||||||||||||||
) | ||||||||||||||||||
self.session = requests.Session() | ||||||||||||||||||
self.session.headers.update( | ||||||||||||||||||
{"Accept": "application/json", "User-Agent": USER_AGENT,} | ||||||||||||||||||
) | ||||||||||||||||||
@throttling_retry(before_sleep=before_sleep_log(logger, logging.WARNING)) | ||||||||||||||||||
def page_request(self, url: str, params: Dict[str, Any]) -> requests.Response: | ||||||||||||||||||
logger.info("Fetching URL %s with params %s", url, params) | ||||||||||||||||||
response = self.session.get(url, params=params) | ||||||||||||||||||
if response.status_code != 200: | ||||||||||||||||||
logger.warning( | ||||||||||||||||||
"Unexpected HTTP status code %s on %s: %s", | ||||||||||||||||||
response.status_code, | ||||||||||||||||||
response.url, | ||||||||||||||||||
response.content, | ||||||||||||||||||
) | ||||||||||||||||||
response.raise_for_status() | ||||||||||||||||||
return response | ||||||||||||||||||
def get_pages(self) -> Iterator[RepoPage]: | ||||||||||||||||||
""" Retrieve and parse exported maven indexes to | ||||||||||||||||||
identify all pom files and src archives. | ||||||||||||||||||
""" | ||||||||||||||||||
# Example of returned RepoPage's: | ||||||||||||||||||
# [ | ||||||||||||||||||
# { | ||||||||||||||||||
# "type": "jar", | ||||||||||||||||||
# "url": "https://maven.xwiki.org/..-5.4.2-sources.jar", | ||||||||||||||||||
# "time": 1626109619335, | ||||||||||||||||||
# "gid": "org.xwiki.platform", | ||||||||||||||||||
# "aid": "xwiki-platform-wikistream-events-xwiki", | ||||||||||||||||||
# "version": "5.4.2" | ||||||||||||||||||
# }, | ||||||||||||||||||
# { | ||||||||||||||||||
# "type": "scm", | ||||||||||||||||||
# "url": "scm:git:git://github.com/openengsb/openengsb-framework.git", | ||||||||||||||||||
# "project": "openengsb-framework", | ||||||||||||||||||
# }, | ||||||||||||||||||
# ... | ||||||||||||||||||
# ] | ||||||||||||||||||
# Download the main text index file. | ||||||||||||||||||
logger.info("Downloading text index file..") | ||||||||||||||||||
text_file = NamedTemporaryFile(delete=False) | ||||||||||||||||||
assert self.INDEX_URL is not None | ||||||||||||||||||
response = requests.get(self.INDEX_URL, stream=True) | ||||||||||||||||||
for chunk in response.iter_content(chunk_size=1024): | ||||||||||||||||||
text_file.write(chunk) | ||||||||||||||||||
text_file.close() | ||||||||||||||||||
douarddaUnsubmitted Done Inline Actionswhy not use the context manager API of the NamedTemporaryFile here? douardda: why not use the context manager API of the `NamedTemporaryFile` here? | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsAs far as i can remember, because I wanted to stream it in order to reduce memory footprint: the download can be huge. borisbaldassari: As far as i can remember, because I wanted to stream it in order to reduce memory footprint… | ||||||||||||||||||
douarddaUnsubmitted Done Inline ActionsThe context manager is unrelated with loading the file in RAM or not. douardda: The context manager is unrelated with loading the file in RAM or not. | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsDone. borisbaldassari: Done. | ||||||||||||||||||
logger.debug(f"File is {text_file.name}") | ||||||||||||||||||
# Prepare regex's to parse index exports. | ||||||||||||||||||
re_val = re.compile(r"^\s{4}value ([^|]+)\|([^|]+)\|([^|]+)\|([^|]+)\|([^|]+)$") | ||||||||||||||||||
re_time = re.compile( | ||||||||||||||||||
r"^\s{4}value ([^|]+)\|([^|]+)\|([^|]+)\|([^|]+)\|([^|]+)" | ||||||||||||||||||
+ r"\|([^|]+)\|([^|]+)$" | ||||||||||||||||||
) | ||||||||||||||||||
douarddaUnsubmitted Done Inline ActionsPlease document a bit these regex. Also please prefer named match groups when possible (?P<name>...) which helps to "self-document" regexes. douardda: Please document a bit these regex. Also please prefer named match groups when possible `(? | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsYou're definitely right. Fixed, thanks. borisbaldassari: You're definitely right. Fixed, thanks. | ||||||||||||||||||
re_src = re.compile(r".*src.*") | ||||||||||||||||||
# Read the index text export and get URLs and SCMs. | ||||||||||||||||||
out_pom: Dict = defaultdict(dict) | ||||||||||||||||||
out_src: Dict = defaultdict(dict) | ||||||||||||||||||
with open(text_file.name, mode="rt") as file_txt: | ||||||||||||||||||
line = file_txt.readline() | ||||||||||||||||||
douarddaUnsubmitted Done Inline Actionsa python text file object is iterable, so one would prefer the form: for line in file_txt: [...] douardda: a python text file object is iterable, so one would prefer the form:
```
for line in… | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsYes, I'm shameful. borisbaldassari: Yes, I'm shameful.
The reason for this is there is a second readline later on in the loop, and… | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsFixed with refactoring (see below). borisbaldassari: Fixed with refactoring (see below). | ||||||||||||||||||
url_src = None | ||||||||||||||||||
while line != "": | ||||||||||||||||||
douarddaUnsubmitted Done Inline Actionswhile line: is enough here douardda: `while line:` is enough here | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsFixed, thanks. borisbaldassari: Fixed, thanks. | ||||||||||||||||||
m_val = re_val.match(line) | ||||||||||||||||||
if m_val is not None: | ||||||||||||||||||
(gid, aid, version, classifier, ext) = m_val.group(1, 2, 3, 4, 5) | ||||||||||||||||||
ext = ext.strip() | ||||||||||||||||||
douarddaUnsubmitted Done Inline Actionsthis could be handled by the regex itself douardda: this could be handled by the regex itself | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsYes, it could. But it seems to me that readability is better this way. borisbaldassari: Yes, it could. But it seems to me that readability is better this way.
As you say. Want me to… | ||||||||||||||||||
path = "/".join(gid.split(".")) | ||||||||||||||||||
if classifier == "NA" and ext == "pom": | ||||||||||||||||||
url_pom = urljoin( | ||||||||||||||||||
self.BASE_URL, | ||||||||||||||||||
path | ||||||||||||||||||
+ "/" | ||||||||||||||||||
+ aid | ||||||||||||||||||
+ "/" | ||||||||||||||||||
+ version | ||||||||||||||||||
+ "/" | ||||||||||||||||||
+ aid | ||||||||||||||||||
+ "-" | ||||||||||||||||||
+ version | ||||||||||||||||||
+ "." | ||||||||||||||||||
+ ext, | ||||||||||||||||||
) | ||||||||||||||||||
douarddaUnsubmitted Done Inline Actionswhy use urljoin while hand-building the URL by concatenation of strings with '/'? I mean urljoin does support multiple arguments, like: urljoin(self.BASE_URL, path, aid, version, f"{aid}-{version}.{ext}") douardda: why use `urljoin` while hand-building the URL by concatenation of strings with '/'?
I mean… | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsHum. It yields [1] when I try, and that's not what I've read [2]. [1] "TypeError: urljoin() takes from 2 to 3 positional arguments but 6 were given" Am I missing something? Note: I have fixed the ugliness of if by using f-strings, and it looks a lot better. borisbaldassari: Hum. It yields [1] when I try, and that's not what I've read [2].
[1] "TypeError: urljoin()… | ||||||||||||||||||
douarddaUnsubmitted Done Inline ActionsNo you are right, my mistake, I was assuming urljoin has a decent API, which is not the case. sorry. douardda: No you are right, my mistake, I was assuming urljoin has a decent API, which is not the case. | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsI did assume that too at some point, rings a bell. yeah. borisbaldassari: I did assume that too at some point, rings a bell. yeah. | ||||||||||||||||||
out_pom[url_pom] = aid | ||||||||||||||||||
if (classifier == "sources" or re_src.match(classifier)) and ( | ||||||||||||||||||
ext == "zip" or ext == "jar" | ||||||||||||||||||
douarddaUnsubmitted Done Inline Actionsand ext in ("zip", "jar") douardda: `and ext in ("zip", "jar")` | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline Actionsext in (a, b) => Far more elegant, of course. Fixed, thanks. uppercase => Yes, good point. As a matter of fact there is no uppercase extensions on maven central (just checked) but I'm not sure why (part of the maven convention, maybe?) and that can surely happen. borisbaldassari: ext in (a, b) => Far more elegant, of course. Fixed, thanks.
uppercase => Yes, good point. As… | ||||||||||||||||||
douarddaUnsubmitted Done Inline ActionsBTW, what about an upper case ext value? douardda: BTW, what about an upper case `ext` value? | ||||||||||||||||||
): | ||||||||||||||||||
url_src = urljoin( | ||||||||||||||||||
self.BASE_URL, | ||||||||||||||||||
path | ||||||||||||||||||
+ "/" | ||||||||||||||||||
+ aid | ||||||||||||||||||
+ "/" | ||||||||||||||||||
+ version | ||||||||||||||||||
+ "/" | ||||||||||||||||||
+ aid | ||||||||||||||||||
+ "-" | ||||||||||||||||||
+ version | ||||||||||||||||||
+ "-" | ||||||||||||||||||
+ classifier | ||||||||||||||||||
+ "." | ||||||||||||||||||
+ ext, | ||||||||||||||||||
) | ||||||||||||||||||
out_src[url_src]["g"] = gid | ||||||||||||||||||
out_src[url_src]["a"] = aid | ||||||||||||||||||
out_src[url_src]["v"] = version | ||||||||||||||||||
douarddaUnsubmitted Done Inline Actionsout_src[url_src] = {"g": gid, "a": aid, "v": version} Not sure out_src and out_pom really need to be defaultdict actually. douardda: ```
out_src[url_src] = {"g": gid, "a": aid, "v": version}
```
Not sure `out_src` and `out_pom`… | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsFor uniqueness of entries (some entries tend to appear a few times). Is there a way to do it better with Python? borisbaldassari: For uniqueness of entries (some entries tend to appear a few times). Is there a way to do it… | ||||||||||||||||||
douarddaUnsubmitted Done Inline ActionsI don't see how using defaultdict is related with this uniqueness question. douardda: I don't see how using defaultdict is related with this uniqueness question.
What does it bring… | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsOk, I get it. That's probably an old Perl habit to explicitly have hashes of hashes. borisbaldassari: Ok, I get it. That's probably an old Perl habit to explicitly have hashes of hashes.
Fixed to… | ||||||||||||||||||
else: | ||||||||||||||||||
m_time = re_time.match(line) | ||||||||||||||||||
if m_time is not None and url_src is not None: | ||||||||||||||||||
time = m_time.group(2) | ||||||||||||||||||
out_src[url_src]["t"] = int(time) | ||||||||||||||||||
url_src = None | ||||||||||||||||||
line = file_txt.readline() | ||||||||||||||||||
# Clean up the download afterwards (may be huge). | ||||||||||||||||||
remove(text_file.name) | ||||||||||||||||||
douarddaUnsubmitted Done Inline ActionsNow that I read this, why not do the processing on the flight? Why bother storing the file on disk then read it back line by line to do a bunch of regex? requests does provide a nice API for this: douardda: Now that I read this, why not do the processing on the flight? Why bother storing the file on… | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsShort answer: because I didn't know of iter_*lines*. Oh god. :-) borisbaldassari: Short answer: because I didn't know of iter_*lines*. Oh god. :-)
Thanks!! | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsThat's exactly why we need peers: to get out of our own train of thought. Ok, fixed as you proposed. The file is now parsed as it is downloaded, and that solved a few other points. When the design is broken, everything looks weird, right? Thanks a lot for the feedback! borisbaldassari: That's exactly why we need peers: to get out of our own train of thought.
Ok, fixed as you… | ||||||||||||||||||
logger.info(f"Found {len(out_pom)} poms and {len(out_src)} src items.") | ||||||||||||||||||
# Yield all src archives found. | ||||||||||||||||||
for src in out_src.keys(): | ||||||||||||||||||
douarddaUnsubmitted Done Inline Actionsno need for the .keys() here, iterating on a dict is iterating on its keys. for src, val in out_src.items(): ... yield { ... "time": val["t"], ... BTW, if you use proper keys in out_src[*] (i.e. "time" instead of "t" and so on) you can just use it as is here: yield { "type": "jar", "url": src, **val } douardda: no need for the `.keys()` here, iterating on a dict is iterating on its keys.
But more… | ||||||||||||||||||
logger.debug(f"* Yielding jar {src}.") | ||||||||||||||||||
yield { | ||||||||||||||||||
"type": "jar", | ||||||||||||||||||
"url": src, | ||||||||||||||||||
"time": out_src[src]["t"], | ||||||||||||||||||
"gid": out_src[src]["g"], | ||||||||||||||||||
"aid": out_src[src]["a"], | ||||||||||||||||||
"version": out_src[src]["v"], | ||||||||||||||||||
} | ||||||||||||||||||
# Now fetch pom files and scan them for scm info. | ||||||||||||||||||
logger.info("Fetching poms..") | ||||||||||||||||||
Done Inline Actionsthat case is missing from the example in the comment above vlorentz: that case is missing from the example in the comment above | ||||||||||||||||||
Done Inline ActionsThat's a very good point. This kind of metadata is useful for the jar loader, but not so much for the other types of scm loaders (scm_type, which could be about anything). Do we want to keep them? borisbaldassari: That's a very good point. This kind of metadata is useful for the jar loader, but not so much… | ||||||||||||||||||
out_pom_src = {} | ||||||||||||||||||
for pom in out_pom.keys(): | ||||||||||||||||||
text = self.page_request(pom, {}) | ||||||||||||||||||
try: | ||||||||||||||||||
project = xmltodict.parse(text.content.decode()) | ||||||||||||||||||
douarddaUnsubmitted Done Inline ActionsI'm always nervous when I see a bytes.decode() called on some content coming from The Workd™. Is there any change of getting some encoding error here? douardda: I'm always nervous when I see a `bytes.decode()` called on some content coming from The Workd™. | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsThanks for spotting that, it needs some consideration. Theoretically, no: we're decoding the content of a file downloaded from a local server (transport errors should be ok), which is output by clue asynchronously (i.e. file is not served if the process fails), so corruption or weird content is unlikely, but.. it's still clearly out of our control. So yes, we never know and you're definitely right. OTOH we can't go on without that data, and I have the feeling we should rather fail (throw an exception about decoding and end execution) than pass silently (adding errors='ignore' to decode could do that). Would you like to try & catch, and then throw a specific error? What would you recommend? => added errors='ignore' so the list will simply be empty. borisbaldassari: Thanks for spotting that, it needs some consideration.
Theoretically, no: we're decoding the… | ||||||||||||||||||
if "scm" in project["project"]: | ||||||||||||||||||
if "connection" in project["project"]["scm"]: | ||||||||||||||||||
scm = project["project"]["scm"]["connection"] | ||||||||||||||||||
gid = project["project"]["groupId"] | ||||||||||||||||||
aid = project["project"]["artifactId"] | ||||||||||||||||||
out_pom_src[scm] = f"{gid}.{aid}" | ||||||||||||||||||
else: | ||||||||||||||||||
logger.debug(f"No scm.connection in pom {pom}") | ||||||||||||||||||
else: | ||||||||||||||||||
logger.debug(f"No scm in pom {pom}") | ||||||||||||||||||
except xmltodict.expat.ExpatError as error: | ||||||||||||||||||
logger.info(f"Could not parse POM {pom} XML: {error}. Next.") | ||||||||||||||||||
# Yield all src archives found. | ||||||||||||||||||
for src in out_pom_src.keys(): | ||||||||||||||||||
douarddaUnsubmitted Done Inline Actionsuse items() on the dict: for src, project in out_pom.items(): yield {"type": "scm", "url": src, "project": project} BTW, why build the dict to yield its values just after building it? Why not yielding values directly from the for pom in out_pom loop? douardda: use `items()` on the dict:
```
for src, project in out_pom.items():
yield {"type"… | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsVery good point, moved it to the for pom in out_pom loop. Thanks! borisbaldassari: Very good point, moved it to the `for pom in out_pom` loop. Thanks! | ||||||||||||||||||
logger.debug(f"* Yielding scm {src}.") | ||||||||||||||||||
yield { | ||||||||||||||||||
"type": "scm", | ||||||||||||||||||
"url": src, | ||||||||||||||||||
"project": out_pom_src[src], | ||||||||||||||||||
} | ||||||||||||||||||
def get_origins_from_page(self, page: RepoPage) -> Iterator[ListedOrigin]: | ||||||||||||||||||
"""Convert a page of Maven repositories into a list of ListedOrigins. | ||||||||||||||||||
""" | ||||||||||||||||||
assert self.lister_obj.id is not None | ||||||||||||||||||
if page["type"] == "scm": | ||||||||||||||||||
# If origin is a scm url: detect scm type and yield. | ||||||||||||||||||
# Note that the official format is: | ||||||||||||||||||
# scm:git:git://github.com/openengsb/openengsb-framework.git | ||||||||||||||||||
# but many, many projects directly put the repo url, so we have to | ||||||||||||||||||
# detect the content to match it properly. | ||||||||||||||||||
re_scm = re.compile(r"^scm:([^:]+):(.*)$") | ||||||||||||||||||
m_scm = re_scm.match(page["url"]) | ||||||||||||||||||
douarddaUnsubmitted Done Inline Actionsno need to compile the regex if it's used only once. Just use the match function directly: m_scm = re.match(r"^scm:([^:]+):(.*)$", page["url"]) Also please prefer named group matching (https://docs.python.org/3/library/re.html#index-17) douardda: no need to compile the regex if it's used only once. Just use the `match` function directly… | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsRight, fixed, thank you! :-) borisbaldassari: Right, fixed, thank you! :-) | ||||||||||||||||||
if m_scm is not None: | ||||||||||||||||||
scm_type = m_scm.group(1) | ||||||||||||||||||
scm_url = m_scm.group(2) | ||||||||||||||||||
origin = ListedOrigin( | ||||||||||||||||||
lister_id=self.lister_obj.id, | ||||||||||||||||||
url=scm_url, # or page["url"], | ||||||||||||||||||
douarddaUnsubmitted Done Inline Actionswhat's the comment for? douardda: what's the comment for? | ||||||||||||||||||
borisbaldassariAuthorUnsubmitted Done Inline ActionsRemoved. borisbaldassari: Removed. | ||||||||||||||||||
visit_type=scm_type, | ||||||||||||||||||
) | ||||||||||||||||||
yield origin | ||||||||||||||||||
else: | ||||||||||||||||||
re_scm = re.compile(r".*\.git$") | ||||||||||||||||||
m_scm = re_scm.match(page["url"]) | ||||||||||||||||||
if m_scm is not None: | ||||||||||||||||||
origin = ListedOrigin( | ||||||||||||||||||
lister_id=self.lister_obj.id, url=page["url"], visit_type="git", | ||||||||||||||||||
) | ||||||||||||||||||
yield origin | ||||||||||||||||||
douarddaUnsubmitted Done Inline Actions
do you really need a regex to check for a '.git' at the end? I means page["url"].enndswith(".git") should do the trick here. douardda: do you really need a regex to check for a '.git' at the end? I means page["url"].enndswith(". | ||||||||||||||||||
else: | ||||||||||||||||||
# Origin is a source archive: | ||||||||||||||||||
origin = ListedOrigin( | ||||||||||||||||||
lister_id=self.lister_obj.id, | ||||||||||||||||||
url=page["url"], | ||||||||||||||||||
visit_type=page["type"], | ||||||||||||||||||
# last_update=parse_packaged_date(package_info), | ||||||||||||||||||
douarddaUnsubmitted Done Inline Actionswhy the commented line? douardda: why the commented line? | ||||||||||||||||||
extra_loader_arguments={ | ||||||||||||||||||
"artifacts": [ | ||||||||||||||||||
{ | ||||||||||||||||||
"time": page["time"], | ||||||||||||||||||
"gid": page["gid"], | ||||||||||||||||||
"aid": page["aid"], | ||||||||||||||||||
"version": page["version"], | ||||||||||||||||||
} | ||||||||||||||||||
] | ||||||||||||||||||
}, | ||||||||||||||||||
) | ||||||||||||||||||
yield origin |
Why is docker involved here? I see no other mention of a docker stuff anywhere in this diff.
I don't understand what this "index_url" is and how it is supposed to be used.