diff --git a/swh/web/assets/src/bundles/revision/diff-utils.js b/swh/web/assets/src/bundles/revision/diff-utils.js
index c827a949..3cc80f18 100644
--- a/swh/web/assets/src/bundles/revision/diff-utils.js
+++ b/swh/web/assets/src/bundles/revision/diff-utils.js
@@ -1,515 +1,515 @@
/**
* Copyright (C) 2018 The Software Heritage developers
* See the AUTHORS file at the top-level directory of this distribution
* License: GNU Affero General Public License version 3, or any later version
* See top-level LICENSE file for more information
*/
import 'waypoints/lib/jquery.waypoints';
import {staticAsset} from 'utils/functions';
// path to static spinner asset
let swhSpinnerSrc = staticAsset('img/swh-spinner.gif');
// number of changed files in the revision
let changes = null;
let nbChangedFiles = 0;
// to track the number of already computed files diffs
let nbDiffsComputed = 0;
// the no newline at end of file marker from Github
let noNewLineMarker = ``;
// to track the total number of added lines in files diffs
let nbAdditions = 0;
// to track the total number of deleted lines in files diffs
let nbDeletions = 0;
// to track the already computed diffs by id
let computedDiffs = {};
// map a diff id to its computation url
let diffsUrls = {};
// to check if a DOM element is in the viewport
function isInViewport(elt) {
let elementTop = $(elt).offset().top;
let elementBottom = elementTop + $(elt).outerHeight();
let viewportTop = $(window).scrollTop();
let viewportBottom = viewportTop + $(window).height();
return elementBottom > viewportTop && elementTop < viewportBottom;
}
// to format the diffs line numbers
function formatDiffLineNumbers(fromLine, toLine, maxNumberChars) {
let ret = '';
if (fromLine != null) {
for (let i = 0; i < (maxNumberChars - fromLine.length); ++i) {
ret += ' ';
}
ret += fromLine;
}
if (fromLine != null && toLine != null) {
ret += ' ';
}
if (toLine != null) {
for (let i = 0; i < (maxNumberChars - toLine.length); ++i) {
ret += ' ';
}
ret += toLine;
}
return ret;
}
// to compute diff and process it for display
export function computeDiff(diffUrl, diffId) {
// force diff computation ?
let force = diffUrl.indexOf('force=true') !== -1;
// it no forced computation and diff already computed, do nothing
if (!force && computedDiffs.hasOwnProperty(diffId)) {
return;
}
// mark diff computation as already requested
computedDiffs[diffId] = true;
$(`#${diffId}-loading`).css('visibility', 'visible');
// set spinner visible while requesting diff
$(`#${diffId}-loading`).css('display', 'block');
$(`#${diffId}-highlightjs`).css('display', 'none');
// request diff computation and process it
fetch(diffUrl)
.then(response => response.json())
.then(data => {
// increment number of computed diffs
++nbDiffsComputed;
// toggle the 'Compute all diffs' button if all diffs have been computed
if (nbDiffsComputed === changes.length) {
$('#swh-compute-all-diffs').addClass('active');
}
// Large diff (> threshold) are not automatically computed,
// add a button to force its computation
if (data.diff_str.indexOf('Large diff') === 0) {
$(`#${diffId}`)[0].innerHTML = data.diff_str +
` `;
setDiffVisible(diffId);
} else if (data.diff_str.indexOf('@@') !== 0) {
$(`#${diffId}`).text(data.diff_str);
setDiffVisible(diffId);
} else {
// prepare code highlighting
- $(`.${diffId}`).removeClass('nohighlight-swh');
+ $(`.${diffId}`).removeClass('nohighlight');
$(`.${diffId}`).addClass(data.language);
// set unified diff text
$(`#${diffId}`).text(data.diff_str);
// code highlighting for unified diff
$(`#${diffId}`).each((i, block) => {
hljs.highlightBlock(block);
hljs.lineNumbersBlock(block);
});
// hljs.lineNumbersBlock is asynchronous so we have to postpone our
// next treatments by adding it at the end of the current js events queue
setTimeout(() => {
// process unified diff lines in order to generate side-by-side diffs text
// but also compute line numbers for unified and side-by-side difss
let linesInfoRegExp = new RegExp(/^@@ -(\d+),(\d+) \+(\d+),(\d+) @@$/gm);
let baseFromLine = '';
let baseToLine = '';
let fromToLines = [];
let fromLines = [];
let toLines = [];
let maxNumberChars = 0;
let diffFromStr = '';
let diffToStr = '';
let linesOffset = 0;
$(`#${diffId} .hljs-ln-numbers`).each((i, lnElt) => {
let lnText = lnElt.nextSibling.innerText;
let linesInfo = linesInfoRegExp.exec(lnText);
let fromLine = '';
let toLine = '';
// parsed lines info from the diff output
if (linesInfo) {
baseFromLine = parseInt(linesInfo[1]) - 1;
baseToLine = parseInt(linesInfo[3]) - 1;
linesOffset = 0;
diffFromStr += (lnText + '\n');
diffToStr += (lnText + '\n');
fromLines.push('');
toLines.push('');
// line removed in the from file
} else if (lnText.length > 0 && lnText[0] === '-') {
baseFromLine = baseFromLine + 1;
fromLine = baseFromLine.toString();
fromLines.push(fromLine);
++nbDeletions;
diffFromStr += (lnText + '\n');
++linesOffset;
// line added in the from file
} else if (lnText.length > 0 && lnText[0] === '+') {
baseToLine = baseToLine + 1;
toLine = baseToLine.toString();
toLines.push(toLine);
++nbAdditions;
diffToStr += (lnText + '\n');
--linesOffset;
// line present in both files
} else {
baseFromLine = baseFromLine + 1;
baseToLine = baseToLine + 1;
fromLine = baseFromLine.toString();
toLine = baseToLine.toString();
for (let j = 0; j < Math.abs(linesOffset); ++j) {
if (linesOffset > 0) {
diffToStr += '\n';
toLines.push('');
} else {
diffFromStr += '\n';
fromLines.push('');
}
}
linesOffset = 0;
diffFromStr += (lnText + '\n');
diffToStr += (lnText + '\n');
toLines.push(toLine);
fromLines.push(fromLine);
}
if (!baseFromLine) {
fromLine = '';
}
if (!baseToLine) {
toLine = '';
}
fromToLines[i] = [fromLine, toLine];
maxNumberChars = Math.max(maxNumberChars, fromLine.length);
maxNumberChars = Math.max(maxNumberChars, toLine.length);
});
// set side-by-side diffs text
$(`#${diffId}-from`).text(diffFromStr);
$(`#${diffId}-to`).text(diffToStr);
// code highlighting for side-by-side diffs
$(`#${diffId}-from, #${diffId}-to`).each((i, block) => {
hljs.highlightBlock(block);
hljs.lineNumbersBlock(block);
});
// hljs.lineNumbersBlock is asynchronous so we have to postpone our
// next treatments by adding it at the end of the current js events queue
setTimeout(() => {
// diff highlighting for added/removed lines on top of code highlighting
$(`.${diffId} .hljs-ln-numbers`).each((i, lnElt) => {
let lnText = lnElt.nextSibling.innerText;
let linesInfo = linesInfoRegExp.exec(lnText);
if (linesInfo) {
$(lnElt).parent().addClass('swh-diff-lines-info');
let linesInfoText = $(lnElt).parent().find('.hljs-ln-code .hljs-ln-line').text();
$(lnElt).parent().find('.hljs-ln-code .hljs-ln-line').children().remove();
$(lnElt).parent().find('.hljs-ln-code .hljs-ln-line').text('');
$(lnElt).parent().find('.hljs-ln-code .hljs-ln-line').append(`${linesInfoText}`);
} else if (lnText.length > 0 && lnText[0] === '-') {
$(lnElt).parent().addClass('swh-diff-removed-line');
} else if (lnText.length > 0 && lnText[0] === '+') {
$(lnElt).parent().addClass('swh-diff-added-line');
}
});
// set line numbers for unified diff
$(`#${diffId} .hljs-ln-numbers`).each((i, lnElt) => {
$(lnElt).children().attr('data-line-number',
formatDiffLineNumbers(fromToLines[i][0], fromToLines[i][1],
maxNumberChars));
});
// set line numbers for the from side-by-side diff
$(`#${diffId}-from .hljs-ln-numbers`).each((i, lnElt) => {
$(lnElt).children().attr('data-line-number',
formatDiffLineNumbers(fromLines[i], null,
maxNumberChars));
});
// set line numbers for the to side-by-side diff
$(`#${diffId}-to .hljs-ln-numbers`).each((i, lnElt) => {
$(lnElt).children().attr('data-line-number',
formatDiffLineNumbers(null, toLines[i],
maxNumberChars));
});
// last processings:
// - remove the '+' and '-' at the beginning of the diff lines
// from code highlighting
// - add the "no new line at end of file marker" if needed
$(`.${diffId} .hljs-ln-line`).each((i, lnElt) => {
if (lnElt.firstChild) {
if (lnElt.firstChild.nodeName !== '#text') {
let lineText = lnElt.firstChild.innerHTML;
if (lineText[0] === '-' || lineText[0] === '+') {
lnElt.firstChild.innerHTML = lineText.substr(1);
let newTextNode = document.createTextNode(lineText[0]);
$(lnElt).prepend(newTextNode);
}
}
$(lnElt).contents().filter((i, elt) => {
return elt.nodeType === 3; // Node.TEXT_NODE
}).each((i, textNode) => {
let swhNoNewLineMarker = '[swh-no-nl-marker]';
if (textNode.textContent.indexOf(swhNoNewLineMarker) !== -1) {
textNode.textContent = textNode.textContent.replace(swhNoNewLineMarker, '');
$(lnElt).append($(noNewLineMarker));
}
});
}
});
// hide the diff mode switch button in case of not generated diffs
if (data.diff_str.indexOf('Diffs are not generated for non textual content') !== 0) {
$(`#panel_${diffId} .diff-styles`).css('visibility', 'visible');
}
setDiffVisible(diffId);
});
});
}
});
}
function setDiffVisible(diffId) {
// set the unified diff visible by default
$(`#${diffId}-loading`).css('display', 'none');
$(`#${diffId}-highlightjs`).css('display', 'block');
// update displayed counters
$('#swh-revision-lines-added').text(`${nbAdditions} additions`);
$('#swh-revision-lines-deleted').text(`${nbDeletions} deletions`);
$('#swh-nb-diffs-computed').text(nbDiffsComputed);
// refresh the waypoints triggering diffs computation as
// the DOM layout has been updated
Waypoint.refreshAll();
}
// to compute all visible diffs in the viewport
function computeVisibleDiffs() {
$('.swh-file-diff-panel').each((i, elt) => {
if (isInViewport(elt)) {
let diffId = elt.id.replace('panel_', '');
computeDiff(diffsUrls[diffId], diffId);
}
});
}
function genDiffPanel(diffData) {
let diffPanelTitle = diffData.path;
if (diffData.type === 'rename') {
diffPanelTitle = `${diffData.from_path} → ${diffData.to_path}`;
}
let diffPanelHtml =
`
`;
return diffPanelHtml;
}
// setup waypoints to request diffs computation on the fly while scrolling
function setupWaypoints() {
for (let i = 0; i < changes.length; ++i) {
let diffData = changes[i];
// create a waypoint that will trigger diff computation when
// the top of the diff panel hits the bottom of the viewport
$(`#panel_${diffData.id}`).waypoint({
handler: function() {
if (isInViewport(this.element)) {
let diffId = this.element.id.replace('panel_', '');
computeDiff(diffsUrls[diffId], diffId);
this.destroy();
}
},
offset: '100%'
});
// create a waypoint that will trigger diff computation when
// the bottom of the diff panel hits the top of the viewport
$(`#panel_${diffData.id}`).waypoint({
handler: function() {
if (isInViewport(this.element)) {
let diffId = this.element.id.replace('panel_', '');
computeDiff(diffsUrls[diffId], diffId);
this.destroy();
}
},
offset: function() {
return -$(this.element).height();
}
});
}
Waypoint.refreshAll();
}
// callback to switch from side-by-side diff to unified one
export function showUnifiedDiff(event, diffId) {
$(`#${diffId}-splitted-diff`).css('display', 'none');
$(`#${diffId}-unified-diff`).css('display', 'block');
}
// callback to switch from unified diff to side-by-side one
export function showSplittedDiff(event, diffId) {
$(`#${diffId}-unified-diff`).css('display', 'none');
$(`#${diffId}-splitted-diff`).css('display', 'block');
}
// callback when the user clicks on the 'Compute all diffs' button
export function computeAllDiffs(event) {
$(event.currentTarget).addClass('active');
for (let diffId in diffsUrls) {
if (diffsUrls.hasOwnProperty(diffId)) {
computeDiff(diffsUrls[diffId], diffId);
}
}
event.stopPropagation();
}
export async function initRevisionDiff(revisionMessageBody, diffRevisionUrl) {
await import(/* webpackChunkName: "highlightjs" */ 'utils/highlightjs');
// callback when the 'Changes' tab is activated
$(document).on('shown.bs.tab', 'a[data-toggle="tab"]', e => {
if (e.currentTarget.text.trim() === 'Changes') {
$('#readme-panel').css('display', 'none');
if (changes) {
return;
}
// request computation of revision file changes list
// when navigating to the 'Changes' tab and add diff panels
// to the DOM when receiving the result
fetch(diffRevisionUrl)
.then(response => response.json())
.then(data => {
changes = data.changes;
nbChangedFiles = data.total_nb_changes;
let changedFilesText = `${nbChangedFiles} changed file`;
if (nbChangedFiles !== 1) {
changedFilesText += 's';
}
$('#swh-revision-changed-files').text(changedFilesText);
$('#swh-total-nb-diffs').text(changes.length);
$('#swh-revision-changes-list pre')[0].innerHTML = data.changes_msg;
$('#swh-revision-changes-loading').css('display', 'none');
$('#swh-revision-changes-list pre').css('display', 'block');
$('#swh-compute-all-diffs').css('visibility', 'visible');
$('#swh-revision-changes-list').removeClass('in');
if (nbChangedFiles > changes.length) {
$('#swh-too-large-revision-diff').css('display', 'block');
$('#swh-nb-loaded-diffs').text(changes.length);
}
for (let i = 0; i < changes.length; ++i) {
let diffData = changes[i];
diffsUrls[diffData.id] = diffData.diff_url;
$('#swh-revision-diffs').append(genDiffPanel(diffData));
}
setupWaypoints();
computeVisibleDiffs();
});
} else if (e.currentTarget.text.trim() === 'Files') {
$('#readme-panel').css('display', 'block');
}
});
$(document).ready(() => {
if (revisionMessageBody.length > 0) {
$('#swh-revision-message').addClass('in');
} else {
$('#swh-collapse-revision-message').attr('data-toggle', '');
}
let $root = $('html, body');
// callback when the user requests to scroll on a specific diff or back to top
$('#swh-revision-changes-list a[href^="#"], #back-to-top a[href^="#"]').click(e => {
let href = $.attr(e.currentTarget, 'href');
// disable waypoints while scrolling as we do not want to
// launch computation of diffs the user is not interested in
// (file changes list can be large)
Waypoint.disableAll();
$root.animate(
{
scrollTop: $(href).offset().top
},
{
duration: 500,
complete: () => {
window.location.hash = href;
// enable waypoints back after scrolling
Waypoint.enableAll();
// compute diffs visible in the viewport
computeVisibleDiffs();
}
});
return false;
});
});
}
diff --git a/swh/web/assets/src/bundles/webapp/code-highlighting.js b/swh/web/assets/src/bundles/webapp/code-highlighting.js
index 1bbf88de..c43951df 100644
--- a/swh/web/assets/src/bundles/webapp/code-highlighting.js
+++ b/swh/web/assets/src/bundles/webapp/code-highlighting.js
@@ -1,121 +1,113 @@
/**
* Copyright (C) 2018 The Software Heritage developers
* See the AUTHORS file at the top-level directory of this distribution
* License: GNU Affero General Public License version 3, or any later version
* See top-level LICENSE file for more information
*/
export async function highlightCode(showLineNumbers = true) {
await import(/* webpackChunkName: "highlightjs" */ 'utils/highlightjs');
- // empty hljs language definition
- function noHighlight(hljs) {
- return {};
- }
- // just a trick to get line numbers working when no highlight
- // has to be performed
- hljs.registerLanguage('nohighlight-swh', noHighlight);
-
// keep track of the first highlighted line
let firstHighlightedLine = null;
// highlighting color
let lineHighlightColor = 'rgb(193, 255, 193)';
// function to highlight a line
function highlightLine(i) {
let lineTd = $(`.swh-content div[data-line-number="${i}"]`).parent().parent();
lineTd.css('background-color', lineHighlightColor);
return lineTd;
}
function removeHash() {
history.replaceState('', document.title, window.location.pathname + window.location.search);
}
// function to reset highlighting
function resetHighlightedLines() {
firstHighlightedLine = null;
$('.swh-content tr').css('background-color', 'inherit');
}
function scrollToLine(lineDomElt) {
if ($(lineDomElt).closest('.swh-content').length > 0) {
$('html, body').animate({
scrollTop: $(lineDomElt).offset().top - 70
}, 500);
}
}
// function to highlight lines based on a url fragment
// in the form '#Lx' or '#Lx-Ly'
function parseUrlFragmentForLinesToHighlight() {
let lines = [];
let linesRegexp = new RegExp(/L(\d+)/g);
let line = linesRegexp.exec(window.location.hash);
while (line) {
lines.push(parseInt(line[1]));
line = linesRegexp.exec(window.location.hash);
}
resetHighlightedLines();
if (lines.length === 1) {
firstHighlightedLine = parseInt(lines[0]);
scrollToLine(highlightLine(lines[0]));
} else if (lines[0] < lines[lines.length - 1]) {
firstHighlightedLine = parseInt(lines[0]);
scrollToLine(highlightLine(lines[0]));
for (let i = lines[0] + 1; i <= lines[lines.length - 1]; ++i) {
highlightLine(i);
}
}
}
$(document).ready(() => {
// highlight code and add line numbers
$('code').each((i, block) => {
hljs.highlightBlock(block);
if (showLineNumbers) {
hljs.lineNumbersBlock(block);
}
});
if (!showLineNumbers) {
return;
}
// click handler to dynamically highlight line(s)
// when the user clicks on a line number (lines range
// can also be highlighted while holding the shift key)
$('body').click(evt => {
if (evt.target.classList.contains('hljs-ln-n')) {
let line = parseInt($(evt.target).data('line-number'));
if (evt.shiftKey && firstHighlightedLine && line > firstHighlightedLine) {
let firstLine = firstHighlightedLine;
resetHighlightedLines();
for (let i = firstLine; i <= line; ++i) {
highlightLine(i);
}
firstHighlightedLine = firstLine;
window.location.hash = `#L${firstLine}-L${line}`;
} else {
resetHighlightedLines();
highlightLine(line);
window.location.hash = `#L${line}`;
scrollToLine(evt.target);
}
} else if ($(evt.target).closest('.hljs').length) {
resetHighlightedLines();
removeHash();
}
});
// update lines highlighting when the url fragment changes
$(window).on('hashchange', () => parseUrlFragmentForLinesToHighlight());
// schedule lines highlighting if any as hljs.lineNumbersBlock() is async
setTimeout(() => {
parseUrlFragmentForLinesToHighlight();
});
});
}
diff --git a/swh/web/browse/utils.py b/swh/web/browse/utils.py
index b189e2c5..0b7e6d81 100644
--- a/swh/web/browse/utils.py
+++ b/swh/web/browse/utils.py
@@ -1,1122 +1,1122 @@
# Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
import base64
from collections import defaultdict
import magic
import math
import pypandoc
import stat
from django.core.cache import cache
from django.utils.safestring import mark_safe
from importlib import reload
from swh.web.common import highlightjs, service
from swh.web.common.exc import NotFoundExc, http_status_code_message
from swh.web.common.utils import (
reverse, format_utc_iso_date, parse_timestamp,
get_origin_visits, get_swh_persistent_id
)
from swh.web.config import get_config
def get_directory_entries(sha1_git):
"""Function that retrieves the content of a SWH directory
from the SWH archive.
The directories entries are first sorted in lexicographical order.
Sub-directories and regular files are then extracted.
Args:
sha1_git: sha1_git identifier of the directory
Returns:
A tuple whose first member corresponds to the sub-directories list
and second member the regular files list
Raises:
NotFoundExc if the directory is not found
"""
cache_entry_id = 'directory_entries_%s' % sha1_git
cache_entry = cache.get(cache_entry_id)
if cache_entry:
return cache_entry
entries = list(service.lookup_directory(sha1_git))
for e in entries:
e['perms'] = stat.filemode(e['perms'])
if e['type'] == 'rev':
# modify dir entry name to explicitely show it points
# to a revision
e['name'] = '%s @ %s' % (e['name'], e['target'][:7])
dirs = [e for e in entries if e['type'] in ('dir', 'rev')]
files = [e for e in entries if e['type'] == 'file']
dirs = sorted(dirs, key=lambda d: d['name'])
files = sorted(files, key=lambda f: f['name'])
cache.set(cache_entry_id, (dirs, files))
return dirs, files
def get_mimetype_and_encoding_for_content(content):
"""Function that returns the mime type and the encoding associated to
a content buffer using the magic module under the hood.
Args:
content (bytes): a content buffer
Returns:
A tuple (mimetype, encoding), for instance ('text/plain', 'us-ascii'),
associated to the provided content.
"""
while True:
try:
magic_result = magic.detect_from_content(content)
mime_type = magic_result.mime_type
encoding = magic_result.encoding
break
except Exception as exc:
# workaround an issue with the magic module who can fail
# if detect_from_content is called multiple times in
# a short amount of time
reload(magic)
return mime_type, encoding
# maximum authorized content size in bytes for HTML display
# with code highlighting
content_display_max_size = get_config()['content_display_max_size']
def request_content(query_string, max_size=content_display_max_size,
raise_if_unavailable=True, reencode=True):
"""Function that retrieves a SWH content from the SWH archive.
Raw bytes content is first retrieved, then the content mime type.
If the mime type is not stored in the archive, it will be computed
using Python magic module.
Args:
query_string: a string of the form "[ALGO_HASH:]HASH" where
optional ALGO_HASH can be either *sha1*, *sha1_git*, *sha256*,
or *blake2s256* (default to *sha1*) and HASH the hexadecimal
representation of the hash value
max_size: the maximum size for a content to retrieve (default to 1MB,
no size limit if None)
Returns:
A tuple whose first member corresponds to the content raw bytes
and second member the content mime type
Raises:
NotFoundExc if the content is not found
"""
content_data = service.lookup_content(query_string)
filetype = None
language = None
license = None
# requests to the indexer db may fail so properly handle
# those cases in order to avoid content display errors
try:
filetype = service.lookup_content_filetype(query_string)
language = service.lookup_content_language(query_string)
license = service.lookup_content_license(query_string)
except Exception as e:
pass
mimetype = 'unknown'
encoding = 'unknown'
if filetype:
mimetype = filetype['mimetype']
encoding = filetype['encoding']
content_data['error_code'] = 200
content_data['error_message'] = ''
content_data['error_description'] = ''
if not max_size or content_data['length'] < max_size:
try:
content_raw = service.lookup_content_raw(query_string)
except Exception as e:
if raise_if_unavailable:
raise e
else:
content_data['raw_data'] = None
content_data['error_code'] = 404
content_data['error_description'] = \
'The bytes of the content are currently not available in the archive.' # noqa
content_data['error_message'] = \
http_status_code_message[content_data['error_code']]
else:
content_data['raw_data'] = content_raw['data']
if not filetype:
mimetype, encoding = \
get_mimetype_and_encoding_for_content(content_data['raw_data']) # noqa
# encode textual content to utf-8 if needed
if reencode and mimetype.startswith('text/'):
# probably a malformed UTF-8 content, re-encode it
# by replacing invalid chars with a substitution one
if encoding == 'unknown-8bit':
content_data['raw_data'] = \
content_data['raw_data'].decode('utf-8', 'replace')\
.encode('utf-8')
elif 'ascii' not in encoding and encoding not in ['utf-8', 'binary']: # noqa
content_data['raw_data'] = \
content_data['raw_data'].decode(encoding, 'replace')\
.encode('utf-8')
elif reencode and mimetype.startswith('application/octet-stream'):
# file may detect a text content as binary
# so try to decode it for display
encodings = ['us-ascii']
encodings += ['iso-8859-%s' % i for i in range(1, 17)]
for encoding in encodings:
try:
content_data['raw_data'] = \
content_data['raw_data'].decode(encoding)\
.encode('utf-8')
except Exception as e:
pass
else:
# ensure display in content view
mimetype = 'text/plain'
break
else:
content_data['raw_data'] = None
content_data['mimetype'] = mimetype
content_data['encoding'] = encoding
if language:
content_data['language'] = language['lang']
else:
content_data['language'] = 'not detected'
if license:
content_data['licenses'] = ', '.join(license['facts'][0]['licenses'])
else:
content_data['licenses'] = 'not detected'
return content_data
_browsers_supported_image_mimes = set(['image/gif', 'image/png',
'image/jpeg', 'image/bmp',
'image/webp', 'image/svg',
'image/svg+xml'])
def prepare_content_for_display(content_data, mime_type, path):
"""Function that prepares a content for HTML display.
The function tries to associate a programming language to a
content in order to perform syntax highlighting client-side
using highlightjs. The language is determined using either
the content filename or its mime type.
If the mime type corresponds to an image format supported
by web browsers, the content will be encoded in base64
for displaying the image.
Args:
content_data (bytes): raw bytes of the content
mime_type (string): mime type of the content
path (string): path of the content including filename
Returns:
A dict containing the content bytes (possibly different from the one
provided as parameter if it is an image) under the key 'content_data
and the corresponding highlightjs language class under the
key 'language'.
"""
language = highlightjs.get_hljs_language_from_filename(path)
if not language:
language = highlightjs.get_hljs_language_from_mime_type(mime_type)
if not language:
- language = 'nohighlight-swh'
+ language = 'nohighlight'
elif mime_type.startswith('application/'):
mime_type = mime_type.replace('application/', 'text/')
if mime_type.startswith('image/'):
if mime_type in _browsers_supported_image_mimes:
content_data = base64.b64encode(content_data)
else:
content_data = None
if mime_type.startswith('image/svg'):
mime_type = 'image/svg+xml'
return {'content_data': content_data,
'language': language,
'mimetype': mime_type}
def get_origin_visit(origin_info, visit_ts=None, visit_id=None,
snapshot_id=None):
"""Function that returns information about a SWH visit for
a given origin.
The visit is retrieved from a provided timestamp.
The closest visit from that timestamp is selected.
Args:
origin_info (dict): a dict filled with origin information
(id, url, type)
visit_ts (int or str): an ISO date string or Unix timestamp to parse
Returns:
A dict containing the visit info as described below::
{'origin': 2,
'date': '2017-10-08T11:54:25.582463+00:00',
'metadata': {},
'visit': 25,
'status': 'full'}
"""
visits = get_origin_visits(origin_info)
if not visits:
raise NotFoundExc('No SWH visit associated to origin with'
' type %s and url %s!' % (origin_info['type'],
origin_info['url']))
if snapshot_id:
visit = [v for v in visits if v['snapshot'] == snapshot_id]
if len(visit) == 0:
raise NotFoundExc(
'Visit for snapshot with id %s for origin with type %s'
' and url %s not found!' % (snapshot_id, origin_info['type'],
origin_info['url']))
return visit[0]
if visit_id:
visit = [v for v in visits if v['visit'] == int(visit_id)]
if len(visit) == 0:
raise NotFoundExc(
'Visit with id %s for origin with type %s'
' and url %s not found!' % (visit_id, origin_info['type'],
origin_info['url']))
return visit[0]
if not visit_ts:
# returns the latest full visit when no timestamp is provided
for v in reversed(visits):
if v['status'] == 'full':
return v
return visits[-1]
parsed_visit_ts = math.floor(parse_timestamp(visit_ts).timestamp())
visit_idx = None
for i, visit in enumerate(visits):
ts = math.floor(parse_timestamp(visit['date']).timestamp())
if i == 0 and parsed_visit_ts <= ts:
return visit
elif i == len(visits) - 1:
if parsed_visit_ts >= ts:
return visit
else:
next_ts = math.floor(
parse_timestamp(visits[i+1]['date']).timestamp())
if parsed_visit_ts >= ts and parsed_visit_ts < next_ts:
if (parsed_visit_ts - ts) < (next_ts - parsed_visit_ts):
visit_idx = i
break
else:
visit_idx = i+1
break
if visit_idx is not None:
visit = visits[visit_idx]
while visit_idx < len(visits) - 1 and \
visit['date'] == visits[visit_idx+1]['date']:
visit_idx = visit_idx + 1
visit = visits[visit_idx]
return visit
else:
raise NotFoundExc(
'Visit with timestamp %s for origin with type %s and url %s not found!' % # noqa
(visit_ts, origin_info['type'], origin_info['url']))
def get_snapshot_content(snapshot_id):
"""Returns the lists of branches and releases
associated to a swh snapshot.
That list is put in cache in order to speedup the navigation
in the swh-web/browse ui.
Args:
snapshot_id (str): hexadecimal representation of the snapshot
identifier
Returns:
A tuple with two members. The first one is a list of dict describing
the snapshot branches. The second one is a list of dict describing the
snapshot releases.
Raises:
NotFoundExc if the snapshot does not exist
"""
cache_entry_id = 'swh_snapshot_%s' % snapshot_id
cache_entry = cache.get(cache_entry_id)
if cache_entry:
return cache_entry['branches'], cache_entry['releases']
branches = {}
releases = {}
if snapshot_id:
revision_to_branch = defaultdict(set)
revision_to_release = defaultdict(set)
release_to_branch = defaultdict(set)
snapshot = service.lookup_snapshot(snapshot_id)
snapshot_branches = snapshot['branches']
for branch_name, target in snapshot_branches.items():
if not target:
# FIXME: display branches with an unknown target anyway
continue
target_id = target['target']
target_type = target['target_type']
if target_type == 'revision':
branches[branch_name] = {
'name': branch_name,
'revision': target_id,
}
revision_to_branch[target_id].add(branch_name)
elif target_type == 'release':
release_to_branch[target_id].add(branch_name)
# FIXME: handle pointers to other object types
# FIXME: handle branch aliases
releases_info = service.lookup_release_multiple(
release_to_branch.keys()
)
for release in releases_info:
branches_to_update = release_to_branch[release['id']]
for branch in branches_to_update:
releases[branch] = {
'name': release['name'],
'date': format_utc_iso_date(release['date']),
'id': release['id'],
'message': release['message'],
'target_type': release['target_type'],
'target': release['target'],
}
if release['target_type'] == 'revision':
revision_to_release[release['target']].update(
branches_to_update
)
revisions = service.lookup_revision_multiple(
set(revision_to_branch.keys()) | set(revision_to_release.keys())
)
for revision in revisions:
revision_data = {
'directory': revision['directory'],
'date': format_utc_iso_date(revision['date']),
'message': revision['message'],
}
for branch in revision_to_branch[revision['id']]:
branches[branch].update(revision_data)
for release in revision_to_release[revision['id']]:
releases[release]['directory'] = revision['directory']
ret_branches = list(sorted(branches.values(), key=lambda b: b['name']))
ret_releases = list(sorted(releases.values(), key=lambda b: b['name']))
cache.set(cache_entry_id, {
'branches': ret_branches,
'releases': ret_releases,
})
return ret_branches, ret_releases
def get_origin_visit_snapshot(origin_info, visit_ts=None, visit_id=None,
snapshot_id=None):
"""Returns the lists of branches and releases
associated to a swh origin for a given visit.
The visit is expressed by a timestamp. In the latter case,
the closest visit from the provided timestamp will be used.
If no visit parameter is provided, it returns the list of branches
found for the latest visit.
That list is put in cache in order to speedup the navigation
in the swh-web/browse ui.
Args:
origin_info (dict): a dict filled with origin information
(id, url, type)
visit_ts (int or str): an ISO date string or Unix timestamp to parse
visit_id (int): optional visit id for desambiguation in case
several visits have the same timestamp
Returns:
A tuple with two members. The first one is a list of dict describing
the origin branches for the given visit.
The second one is a list of dict describing the origin releases
for the given visit.
Raises:
NotFoundExc if the origin or its visit are not found
"""
visit_info = get_origin_visit(origin_info, visit_ts, visit_id, snapshot_id)
return get_snapshot_content(visit_info['snapshot'])
def gen_link(url, link_text=None, link_attrs={}):
"""
Utility function for generating an HTML link to insert
in Django templates.
Args:
url (str): an url
link_text (str): optional text for the produced link,
if not provided the url will be used
link_attrs (dict): optional attributes (e.g. class)
to add to the link
Returns:
An HTML link in the form 'link_text'
"""
attrs = ' '
for k, v in link_attrs.items():
attrs += '%s="%s" ' % (k, v)
if not link_text:
link_text = url
link = '%s' % (attrs, url, link_text)
return mark_safe(link)
def gen_person_link(person_id, person_name, snapshot_context=None,
link_attrs={}):
"""
Utility function for generating a link to a SWH person HTML view
to insert in Django templates.
Args:
person_id (int): a SWH person id
person_name (str): the associated person name
link_attrs (dict): optional attributes (e.g. class)
to add to the link
Returns:
An HTML link in the form 'person_name'
"""
query_params = None
if snapshot_context and snapshot_context['origin_info']:
origin_info = snapshot_context['origin_info']
query_params = {'origin_type': origin_info['type'],
'origin': origin_info['url']}
if 'timestamp' in snapshot_context['url_args']:
query_params['timestamp'] = \
snapshot_context['url_args']['timestamp']
if 'visit_id' in snapshot_context['query_params']:
query_params['visit_id'] = \
snapshot_context['query_params']['visit_id']
elif snapshot_context:
query_params = {'snapshot_id': snapshot_context['snapshot_id']}
person_url = reverse('browse-person', kwargs={'person_id': person_id},
query_params=query_params)
return gen_link(person_url, person_name or 'None', link_attrs)
def gen_revision_link(revision_id, shorten_id=False, snapshot_context=None,
link_text=None, link_attrs={}):
"""
Utility function for generating a link to a SWH revision HTML view
to insert in Django templates.
Args:
revision_id (str): a SWH revision id
shorten_id (boolean): whether to shorten the revision id to 7
characters for the link text
snapshot_context (dict): if provided, generate snapshot-dependent
browsing link
link_attrs (dict): optional attributes (e.g. class)
to add to the link
Returns:
An HTML link in the form 'revision_id'
"""
query_params = None
if snapshot_context and snapshot_context['origin_info']:
origin_info = snapshot_context['origin_info']
origin_type = snapshot_context['origin_type']
query_params = {'origin_type': origin_type,
'origin': origin_info['url']}
if 'timestamp' in snapshot_context['url_args']:
query_params['timestamp'] = \
snapshot_context['url_args']['timestamp']
if 'visit_id' in snapshot_context['query_params']:
query_params['visit_id'] = \
snapshot_context['query_params']['visit_id']
elif snapshot_context:
query_params = {'snapshot_id': snapshot_context['snapshot_id']}
revision_url = reverse('browse-revision',
kwargs={'sha1_git': revision_id},
query_params=query_params)
if shorten_id:
return gen_link(revision_url, revision_id[:7], link_attrs)
else:
if not link_text:
link_text = revision_id
return gen_link(revision_url, link_text, link_attrs)
def gen_origin_link(origin_info, link_attrs={}):
"""
Utility function for generating a link to a SWH origin HTML view
to insert in Django templates.
Args:
origin_info (dict): a dicted filled with origin information
(id, type, url)
link_attrs (dict): optional attributes (e.g. class)
to add to the link
Returns:
An HTML link in the form 'Origin: origin_url'
""" # noqa
origin_browse_url = reverse('browse-origin',
kwargs={'origin_type': origin_info['type'],
'origin_url': origin_info['url']})
return gen_link(origin_browse_url,
'Origin: ' + origin_info['url'], link_attrs)
def gen_directory_link(sha1_git, link_text=None, link_attrs={}):
"""
Utility function for generating a link to a SWH directory HTML view
to insert in Django templates.
Args:
sha1_git (str): directory identifier
link_text (str): optional text for the generated link
(the generated url will be used by default)
link_attrs (dict): optional attributes (e.g. class)
to add to the link
Returns:
An HTML link in the form 'link_text'
"""
directory_url = reverse('browse-directory',
kwargs={'sha1_git': sha1_git})
if not link_text:
link_text = directory_url
return gen_link(directory_url, link_text, link_attrs)
def gen_snapshot_link(snapshot_id, link_text=None, link_attrs={}):
"""
Utility function for generating a link to a SWH snapshot HTML view
to insert in Django templates.
Args:
snapshot_id (str): snapshot identifier
link_text (str): optional text for the generated link
(the generated url will be used by default)
link_attrs (dict): optional attributes (e.g. class)
to add to the link
Returns:
An HTML link in the form 'link_text'
"""
snapshot_url = reverse('browse-snapshot',
kwargs={'snapshot_id': snapshot_id})
if not link_text:
link_text = snapshot_url
return gen_link(snapshot_url, link_text, link_attrs)
def gen_snapshot_directory_link(snapshot_context, revision_id=None,
link_text=None, link_attrs={}):
"""
Utility function for generating a link to a SWH directory HTML view
in the context of a snapshot to insert in Django templates.
Args:
snapshot_context (dict): the snapshot information
revision_id (str): optional revision identifier in order
to use the associated directory
link_text (str): optional text to use for the generated link
link_attrs (dict): optional attributes (e.g. class)
to add to the link
Returns:
An HTML link in the form
'origin_directory_view_url'
"""
query_params = {'revision': revision_id}
if snapshot_context['origin_info']:
origin_info = snapshot_context['origin_info']
url_args = {'origin_type': origin_info['type'],
'origin_url': origin_info['url']}
if 'timestamp' in snapshot_context['url_args']:
url_args['timestamp'] = \
snapshot_context['url_args']['timestamp']
if 'visit_id' in snapshot_context['query_params']:
query_params['visit_id'] = \
snapshot_context['query_params']['visit_id']
directory_url = reverse('browse-origin-directory',
kwargs=url_args,
query_params=query_params)
else:
url_args = {'snapshot_id': snapshot_context['snapshot_id']}
directory_url = reverse('browse-snapshot-directory',
kwargs=url_args,
query_params=query_params)
if not link_text:
link_text = directory_url
return gen_link(directory_url, link_text, link_attrs)
def gen_content_link(sha1_git, link_text=None, link_attrs={}):
"""
Utility function for generating a link to a SWH content HTML view
to insert in Django templates.
Args:
sha1_git (str): content identifier
link_text (str): optional text for the generated link
(the generated url will be used by default)
link_attrs (dict): optional attributes (e.g. class)
to add to the link
Returns:
An HTML link in the form 'link_text'
"""
content_url = reverse('browse-content',
kwargs={'query_string': 'sha1_git:' + sha1_git})
if not link_text:
link_text = content_url
return gen_link(content_url, link_text, link_attrs)
def get_revision_log_url(revision_id, snapshot_context=None):
"""
Utility function for getting the URL for a SWH revision log HTML view
(possibly in the context of an origin).
Args:
revision_id (str): revision identifier the history heads to
snapshot_context (dict): if provided, generate snapshot-dependent
browsing link
Returns:
The SWH revision log view URL
"""
query_params = {'revision': revision_id}
if snapshot_context and snapshot_context['origin_info']:
origin_info = snapshot_context['origin_info']
url_args = {'origin_type': origin_info['type'],
'origin_url': origin_info['url']}
if 'timestamp' in snapshot_context['url_args']:
url_args['timestamp'] = \
snapshot_context['url_args']['timestamp']
if 'visit_id' in snapshot_context['query_params']:
query_params['visit_id'] = \
snapshot_context['query_params']['visit_id']
revision_log_url = reverse('browse-origin-log',
kwargs=url_args,
query_params=query_params)
elif snapshot_context:
url_args = {'snapshot_id': snapshot_context['snapshot_id']}
revision_log_url = reverse('browse-snapshot-log',
kwargs=url_args,
query_params=query_params)
else:
revision_log_url = reverse('browse-revision-log',
kwargs={'sha1_git': revision_id})
return revision_log_url
def gen_revision_log_link(revision_id, snapshot_context=None, link_text=None,
link_attrs={}):
"""
Utility function for generating a link to a SWH revision log HTML view
(possibly in the context of an origin) to insert in Django templates.
Args:
revision_id (str): revision identifier the history heads to
snapshot_context (dict): if provided, generate snapshot-dependent
browsing link
link_text (str): optional text to use for the generated link
link_attrs (dict): optional attributes (e.g. class)
to add to the link
Returns:
An HTML link in the form
'link_text'
"""
revision_log_url = get_revision_log_url(revision_id, snapshot_context)
if not link_text:
link_text = revision_log_url
return gen_link(revision_log_url, link_text, link_attrs)
def _format_log_entries(revision_log, per_page, snapshot_context=None):
revision_log_data = []
for i, log in enumerate(revision_log):
if i == per_page:
break
author_name = 'None'
author_link = 'None'
if log['author']:
author_name = log['author']['name'] or log['author']['fullname']
author_link = gen_person_link(log['author']['id'], author_name,
snapshot_context)
revision_log_data.append(
{'author': author_link,
'revision': gen_revision_link(log['id'], True, snapshot_context),
'message': log['message'],
'date': format_utc_iso_date(log['date']),
'directory': log['directory']})
return revision_log_data
def prepare_revision_log_for_display(revision_log, per_page, revs_breadcrumb,
snapshot_context=None):
"""
Utility functions that process raw revision log data for HTML display.
Its purpose is to:
* add links to relevant SWH browse views
* format date in human readable format
* truncate the message log
It also computes the data needed to generate the links for navigating back
and forth in the history log.
Args:
revision_log (list): raw revision log as returned by the SWH web api
per_page (int): number of log entries per page
revs_breadcrumb (str): breadcrumbs of revisions navigated so far,
in the form 'rev1[/rev2/../revN]'. Each revision corresponds to
the first one displayed in the HTML view for history log.
snapshot_context (dict): if provided, generate snapshot-dependent
browsing link
"""
current_rev = revision_log[0]['id']
next_rev = None
prev_rev = None
next_revs_breadcrumb = None
prev_revs_breadcrumb = None
if len(revision_log) == per_page + 1:
prev_rev = revision_log[-1]['id']
prev_rev_bc = current_rev
if snapshot_context:
prev_rev_bc = prev_rev
if revs_breadcrumb:
revs = revs_breadcrumb.split('/')
next_rev = revs[-1]
if len(revs) > 1:
next_revs_breadcrumb = '/'.join(revs[:-1])
if len(revision_log) == per_page + 1:
prev_revs_breadcrumb = revs_breadcrumb + '/' + prev_rev_bc
else:
prev_revs_breadcrumb = prev_rev_bc
return {'revision_log_data': _format_log_entries(revision_log, per_page,
snapshot_context),
'prev_rev': prev_rev,
'prev_revs_breadcrumb': prev_revs_breadcrumb,
'next_rev': next_rev,
'next_revs_breadcrumb': next_revs_breadcrumb}
# list of origin types that can be found in the swh archive
# TODO: retrieve it dynamically in an efficient way instead
# of hardcoding it
_swh_origin_types = ['git', 'svn', 'deb', 'hg', 'ftp', 'deposit', 'pypi']
def get_origin_info(origin_url, origin_type=None):
"""
Get info about a SWH origin.
Its main purpose is to automatically find an origin type
when it is not provided as parameter.
Args:
origin_url (str): complete url of a SWH origin
origin_type (str): optional origin type
Returns:
A dict with the following entries:
* type: the origin type
* url: the origin url
* id: the SWH internal id of the origin
"""
if origin_type:
return service.lookup_origin({'type': origin_type,
'url': origin_url})
else:
for origin_type in _swh_origin_types:
try:
origin_info = service.lookup_origin({'type': origin_type,
'url': origin_url})
return origin_info
except Exception:
pass
raise NotFoundExc('Origin with url %s not found!' % origin_url)
def get_snapshot_context(snapshot_id=None, origin_type=None, origin_url=None,
timestamp=None, visit_id=None):
"""
Utility function to compute relevant information when navigating
the SWH archive in a snapshot context. The snapshot is either
referenced by its id or it will be retrieved from an origin visit.
Args:
snapshot_id (str): hexadecimal representation of a snapshot identifier,
all other parameters will be ignored if it is provided
origin_type (str): the origin type (git, svn, deposit, ...)
origin_url (str): the origin_url (e.g. https://github.com/(user)/(repo)/)
timestamp (str): a datetime string for retrieving the closest
SWH visit of the origin
visit_id (int): optional visit id for disambiguation in case
of several visits with the same timestamp
Returns:
A dict with the following entries:
* origin_info: dict containing origin information
* visit_info: dict containing SWH visit information
* branches: the list of branches for the origin found
during the visit
* releases: the list of releases for the origin found
during the visit
* origin_browse_url: the url to browse the origin
* origin_branches_url: the url to browse the origin branches
* origin_releases_url': the url to browse the origin releases
* origin_visit_url: the url to browse the snapshot of the origin
found during the visit
* url_args: dict containing url arguments to use when browsing in
the context of the origin and its visit
""" # noqa
origin_info = None
visit_info = None
url_args = None
query_params = {}
branches = []
releases = []
browse_url = None
visit_url = None
branches_url = None
releases_url = None
swh_type = 'snapshot'
if origin_url:
swh_type = 'origin'
origin_info = get_origin_info(origin_url, origin_type)
visit_info = get_origin_visit(origin_info, timestamp, visit_id,
snapshot_id)
visit_info['fmt_date'] = format_utc_iso_date(visit_info['date'])
snapshot_id = visit_info['snapshot']
# provided timestamp is not necessarily equals to the one
# of the retrieved visit, so get the exact one in order
# use it in the urls generated below
if timestamp:
timestamp = visit_info['date']
branches, releases = \
get_origin_visit_snapshot(origin_info, timestamp, visit_id,
snapshot_id)
url_args = {'origin_type': origin_type,
'origin_url': origin_info['url']}
query_params = {'visit_id': visit_id}
browse_url = reverse('browse-origin-visits',
kwargs=url_args)
if timestamp:
url_args['timestamp'] = format_utc_iso_date(timestamp,
'%Y-%m-%dT%H:%M:%S')
visit_url = reverse('browse-origin-directory',
kwargs=url_args,
query_params=query_params)
visit_info['url'] = visit_url
branches_url = reverse('browse-origin-branches',
kwargs=url_args,
query_params=query_params)
releases_url = reverse('browse-origin-releases',
kwargs=url_args,
query_params=query_params)
elif snapshot_id:
branches, releases = get_snapshot_content(snapshot_id)
url_args = {'snapshot_id': snapshot_id}
browse_url = reverse('browse-snapshot',
kwargs=url_args)
branches_url = reverse('browse-snapshot-branches',
kwargs=url_args)
releases_url = reverse('browse-snapshot-releases',
kwargs=url_args)
releases = list(reversed(releases))
return {
'swh_type': swh_type,
'snapshot_id': snapshot_id,
'origin_info': origin_info,
# keep track if the origin type was provided as url argument
'origin_type': origin_type,
'visit_info': visit_info,
'branches': branches,
'releases': releases,
'branch': None,
'release': None,
'browse_url': browse_url,
'branches_url': branches_url,
'releases_url': releases_url,
'url_args': url_args,
'query_params': query_params
}
# list of common readme names ordered by preference
# (lower indices have higher priority)
_common_readme_names = [
"readme.markdown",
"readme.md",
"readme.rst",
"readme.txt",
"readme"
]
def get_readme_to_display(readmes):
"""
Process a list of readme files found in a directory
in order to find the adequate one to display.
Args:
readmes: a list of dict where keys are readme file names and values
are readme sha1s
Returns:
A tuple (readme_name, readme_sha1)
"""
readme_name = None
readme_url = None
readme_sha1 = None
readme_html = None
lc_readmes = {k.lower(): {'orig_name': k, 'sha1': v}
for k, v in readmes.items()}
# look for readme names according to the preference order
# defined by the _common_readme_names list
for common_readme_name in _common_readme_names:
if common_readme_name in lc_readmes:
readme_name = lc_readmes[common_readme_name]['orig_name']
readme_sha1 = lc_readmes[common_readme_name]['sha1']
readme_url = reverse('browse-content-raw',
kwargs={'query_string': readme_sha1})
break
# otherwise pick the first readme like file if any
if not readme_name and len(readmes.items()) > 0:
readme_name = next(iter(readmes))
readme_sha1 = readmes[readme_name]
readme_url = reverse('browse-content-raw',
kwargs={'query_string': readme_sha1})
# convert rst README to html server side as there is
# no viable solution to perform that task client side
if readme_name and readme_name.endswith('.rst'):
cache_entry_id = 'readme_%s' % readme_sha1
cache_entry = cache.get(cache_entry_id)
if cache_entry:
readme_html = cache_entry
else:
try:
rst_doc = request_content(readme_sha1)
readme_html = pypandoc.convert_text(rst_doc['raw_data'],
'html', format='rst')
cache.set(cache_entry_id, readme_html)
except Exception:
readme_html = 'Readme bytes are not available'
return readme_name, readme_url, readme_html
def get_swh_persistent_ids(swh_objects, snapshot_context=None):
"""
Returns a list of dict containing info related to persistent
identifiers of swh objects.
Args:
swh_objects (list): a list of dict with the following keys:
* type: swh object type (content/directory/release/revision/snapshot)
* id: swh object id
snapshot_context (dict): optional parameter describing the snapshot in which
the object has been found
Returns:
list: a list of dict with the following keys:
* object_type: the swh object type (content/directory/release/revision/snapshot)
* object_icon: the swh object icon to use in HTML views
* swh_id: the computed swh object persistent identifier
* swh_id_url: the url resolving the persistent identifier
* show_options: boolean indicating if the persistent id options must
be displayed in persistent ids HTML view
""" # noqa
swh_ids = []
for swh_object in swh_objects:
swh_id = get_swh_persistent_id(swh_object['type'], swh_object['id'])
show_options = swh_object['type'] == 'content' or \
(snapshot_context and snapshot_context['origin_info'] is not None)
object_icon = mark_safe('')
if swh_object['type'] == 'directory':
object_icon = mark_safe('')
elif swh_object['type'] == 'release':
object_icon = mark_safe('')
elif swh_object['type'] == 'revision':
object_icon = mark_safe('') # noqa
elif swh_object['type'] == 'snapshot':
object_icon = mark_safe('')
swh_ids.append({
'object_type': swh_object['type'],
'object_icon': object_icon,
'swh_id': swh_id,
'swh_id_url': reverse('browse-swh-id',
kwargs={'swh_id': swh_id}),
'show_options': show_options
})
return swh_ids
diff --git a/swh/web/browse/views/content.py b/swh/web/browse/views/content.py
index 8db804ec..f5b1622f 100644
--- a/swh/web/browse/views/content.py
+++ b/swh/web/browse/views/content.py
@@ -1,294 +1,294 @@
# Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
import difflib
import json
from distutils.util import strtobool
from django.http import HttpResponse
from django.utils.safestring import mark_safe
from django.shortcuts import render
from django.template.defaultfilters import filesizeformat
from swh.model.hashutil import hash_to_hex
from swh.web.common import query
from swh.web.common.utils import (
reverse, gen_path_info
)
from swh.web.common.exc import NotFoundExc, handle_view_exception
from swh.web.browse.utils import (
request_content, prepare_content_for_display,
content_display_max_size, get_snapshot_context,
get_swh_persistent_ids, gen_link
)
from swh.web.browse.browseurls import browse_route
@browse_route(r'content/(?P.+)/raw/',
view_name='browse-content-raw')
def content_raw(request, query_string):
"""Django view that produces a raw display of a SWH content identified
by its hash value.
The url that points to it is :http:get:`/browse/content/[(algo_hash):](hash)/raw/`
""" # noqa
try:
algo, checksum = query.parse_hash(query_string)
checksum = hash_to_hex(checksum)
content_data = request_content(query_string, max_size=None,
reencode=False)
except Exception as exc:
return handle_view_exception(request, exc)
filename = request.GET.get('filename', None)
if not filename:
filename = '%s_%s' % (algo, checksum)
if content_data['mimetype'].startswith('text/') or \
content_data['mimetype'] == 'inode/x-empty':
response = HttpResponse(content_data['raw_data'],
content_type="text/plain")
response['Content-disposition'] = 'filename=%s' % filename
else:
response = HttpResponse(content_data['raw_data'],
content_type='application/octet-stream')
response['Content-disposition'] = 'attachment; filename=%s' % filename
return response
_auto_diff_size_limit = 20000
@browse_route(r'content/(?P.*)/diff/(?P.*)', # noqa
view_name='diff-contents')
def _contents_diff(request, from_query_string, to_query_string):
"""
Browse endpoint used to compute unified diffs between two contents.
Diffs are generated only if the two contents are textual.
By default, diffs whose size are greater than 20 kB will
not be generated. To force the generation of large diffs,
the 'force' boolean query parameter must be used.
Args:
request: input django http request
from_query_string: a string of the form "[ALGO_HASH:]HASH" where
optional ALGO_HASH can be either *sha1*, *sha1_git*, *sha256*,
or *blake2s256* (default to *sha1*) and HASH the hexadecimal
representation of the hash value identifying the first content
to_query_string: same as above for identifying the second content
Returns:
A JSON object containing the unified diff.
"""
diff_data = {}
content_from = None
content_to = None
content_from_size = 0
content_to_size = 0
content_from_lines = []
content_to_lines = []
force = request.GET.get('force', 'false')
path = request.GET.get('path', None)
- language = 'nohighlight-swh'
+ language = 'nohighlight'
force = bool(strtobool(force))
if from_query_string == to_query_string:
diff_str = 'File renamed without changes'
else:
try:
text_diff = True
if from_query_string:
content_from = \
request_content(from_query_string, max_size=None)
content_from_display_data = \
prepare_content_for_display(content_from['raw_data'],
content_from['mimetype'], path)
language = content_from_display_data['language']
content_from_size = content_from['length']
if not (content_from['mimetype'].startswith('text/') or
content_from['mimetype'] == 'inode/x-empty'):
text_diff = False
if text_diff and to_query_string:
content_to = request_content(to_query_string, max_size=None)
content_to_display_data = prepare_content_for_display(
content_to['raw_data'], content_to['mimetype'], path)
language = content_to_display_data['language']
content_to_size = content_to['length']
if not (content_to['mimetype'].startswith('text/') or
content_to['mimetype'] == 'inode/x-empty'):
text_diff = False
diff_size = abs(content_to_size - content_from_size)
if not text_diff:
diff_str = 'Diffs are not generated for non textual content'
- language = 'nohighlight-swh'
+ language = 'nohighlight'
elif not force and diff_size > _auto_diff_size_limit:
diff_str = 'Large diffs are not automatically computed'
- language = 'nohighlight-swh'
+ language = 'nohighlight'
else:
if content_from:
content_from_lines = \
content_from['raw_data'].decode('utf-8')\
.splitlines(True)
if content_from_lines and \
content_from_lines[-1][-1] != '\n':
content_from_lines[-1] += '[swh-no-nl-marker]\n'
if content_to:
content_to_lines = content_to['raw_data'].decode('utf-8')\
.splitlines(True)
if content_to_lines and content_to_lines[-1][-1] != '\n':
content_to_lines[-1] += '[swh-no-nl-marker]\n'
diff_lines = difflib.unified_diff(content_from_lines,
content_to_lines)
diff_str = ''.join(list(diff_lines)[2:])
except Exception as e:
diff_str = str(e)
diff_data['diff_str'] = diff_str
diff_data['language'] = language
diff_data_json = json.dumps(diff_data, separators=(',', ': '))
return HttpResponse(diff_data_json, content_type='application/json')
@browse_route(r'content/(?P.+)/',
view_name='browse-content')
def content_display(request, query_string):
"""Django view that produces an HTML display of a SWH content identified
by its hash value.
The url that points to it is :http:get:`/browse/content/[(algo_hash):](hash)/`
""" # noqa
try:
algo, checksum = query.parse_hash(query_string)
checksum = hash_to_hex(checksum)
content_data = request_content(query_string,
raise_if_unavailable=False)
origin_type = request.GET.get('origin_type', None)
origin_url = request.GET.get('origin_url', None)
if not origin_url:
origin_url = request.GET.get('origin', None)
snapshot_context = None
if origin_url:
try:
snapshot_context = get_snapshot_context(None, origin_type,
origin_url)
except Exception:
raw_cnt_url = reverse('browse-content',
kwargs={'query_string': query_string})
error_message = \
('The Software Heritage archive has a content '
'with the hash you provided but the origin '
'mentioned in your request appears broken: %s. '
'Please check the URL and try again.\n\n'
'Nevertheless, you can still browse the content '
'without origin information: %s'
% (gen_link(origin_url), gen_link(raw_cnt_url)))
raise NotFoundExc(error_message)
if snapshot_context:
snapshot_context['visit_info'] = None
except Exception as exc:
return handle_view_exception(request, exc)
path = request.GET.get('path', None)
content = None
language = None
mimetype = None
if content_data['raw_data'] is not None:
content_display_data = prepare_content_for_display(
content_data['raw_data'], content_data['mimetype'], path)
content = content_display_data['content_data']
language = content_display_data['language']
mimetype = content_display_data['mimetype']
root_dir = None
filename = None
path_info = None
breadcrumbs = []
if path:
split_path = path.split('/')
root_dir = split_path[0]
filename = split_path[-1]
path = path.replace(root_dir + '/', '')
path = path[:-len(filename)]
path_info = gen_path_info(path)
breadcrumbs.append({'name': root_dir[:7],
'url': reverse('browse-directory',
kwargs={'sha1_git': root_dir})})
for pi in path_info:
breadcrumbs.append({'name': pi['name'],
'url': reverse('browse-directory',
kwargs={'sha1_git': root_dir,
'path': pi['path']})})
breadcrumbs.append({'name': filename,
'url': None})
query_params = None
if filename:
query_params = {'filename': filename}
content_raw_url = reverse('browse-content-raw',
kwargs={'query_string': query_string},
query_params=query_params)
content_metadata = {
'sha1 checksum': content_data['checksums']['sha1'],
'sha1_git checksum': content_data['checksums']['sha1_git'],
'sha256 checksum': content_data['checksums']['sha256'],
'blake2s256 checksum': content_data['checksums']['blake2s256'],
'mime type': content_data['mimetype'],
'encoding': content_data['encoding'],
'size': filesizeformat(content_data['length']),
'language': content_data['language'],
'licenses': content_data['licenses']
}
if filename:
content_metadata['filename'] = filename
sha1_git = content_data['checksums']['sha1_git']
swh_ids = get_swh_persistent_ids([{'type': 'content',
'id': sha1_git}])
heading = 'Content - %s' % sha1_git
if breadcrumbs:
content_path = '/'.join([bc['name'] for bc in breadcrumbs])
heading += ' - %s' % content_path
return render(request, 'browse/content.html',
{'heading': heading,
'swh_object_name': 'Content',
'swh_object_metadata': content_metadata,
'content': content,
'content_size': content_data['length'],
'max_content_size': content_display_max_size,
'mimetype': mimetype,
'language': language,
'breadcrumbs': breadcrumbs,
'top_right_link': content_raw_url,
'top_right_link_text': mark_safe(
''
'Raw File'),
'snapshot_context': snapshot_context,
'vault_cooking': None,
'show_actions_menu': True,
'swh_ids': swh_ids,
'error_code': content_data['error_code'],
'error_message': content_data['error_message'],
'error_description': content_data['error_description']},
status=content_data['error_code'])
diff --git a/swh/web/common/highlightjs.py b/swh/web/common/highlightjs.py
index feb6941c..a7301bdc 100644
--- a/swh/web/common/highlightjs.py
+++ b/swh/web/common/highlightjs.py
@@ -1,301 +1,301 @@
# Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from pygments.lexers import (
get_all_lexers,
get_lexer_for_filename
)
# set of languages ids that can be highlighted
# by highlight.js library
_hljs_languages = set([
'1c', 'abnf', 'accesslog', 'actionscript',
'ada', 'apache', 'applescript', 'arduino',
'armasm', 'asciidoc', 'aspectj', 'autohotkey',
'autoit', 'avrasm', 'awk', 'axapta', 'bash',
'basic', 'bnf', 'brainfuck', 'cal', 'capnproto',
'ceylon', 'clean', 'clojure', 'clojure-repl',
'cmake', 'coffeescript', 'coq', 'cos', 'cpp',
'crmsh', 'crystal', 'cs', 'csp', 'css', 'dart',
'delphi', 'diff', 'django', 'd', 'dns', 'dockerfile',
'dos', 'dsconfig', 'dts', 'dust', 'ebnf', 'elixir',
'elm', 'erb', 'erlang', 'erlang-repl', 'excel',
'fix', 'flix', 'fortran', 'fsharp', 'gams', 'gauss',
'gcode', 'gherkin', 'glsl', 'go', 'golo', 'gradle',
'groovy', 'haml', 'handlebars', 'haskell', 'haxe',
'hsp', 'htmlbars', 'http', 'hy', 'inform7',
'ini', 'irpf90', 'java', 'javascript', 'jboss-cli',
'json', 'julia', 'julia-repl', 'kotlin', 'lasso',
'ldif', 'leaf', 'less', 'lisp', 'livecodeserver',
'livescript', 'llvm', 'lsl', 'lua', 'makefile',
'markdown', 'mathematica', 'matlab', 'maxima',
'mel', 'mercury', 'mipsasm', 'mizar', 'mojolicious',
'monkey', 'moonscript', 'n1ql', 'nginx', 'nimrod',
'nix', 'nsis', 'objectivec', 'ocaml', 'openscad',
'oxygene', 'parser3', 'perl', 'pf', 'php', 'pony',
'powershell', 'processing', 'profile', 'prolog',
'protobuf', 'puppet', 'purebasic', 'python', 'q',
'qml', 'rib', 'r', 'roboconf', 'routeros', 'rsl',
'ruby', 'ruleslanguage', 'rust', 'scala', 'scheme',
'scilab', 'scss', 'shell', 'smali', 'smalltalk',
'sml', 'sqf', 'sql', 'stan', 'stata', 'step21',
'stylus', 'subunit', 'swift', 'taggerscript',
'tap', 'tcl', 'tex', 'thrift', 'tp', 'twig',
'typescript', 'vala', 'vbnet', 'vbscript-html',
'vbscript', 'verilog', 'vhdl', 'vim', 'x86asm',
'xl', 'xml', 'xquery', 'yaml', 'zephir',
])
# languages aliases defined in highlight.js
_hljs_languages_aliases = {
'ado': 'stata',
'adoc': 'asciidoc',
'ahk': 'autohotkey',
'apacheconf': 'apache',
'arm': 'armasm',
'as': 'actionscript',
'atom': 'xml',
'bat': 'dos',
'bf': 'brainfuck',
'bind': 'dns',
'c': 'cpp',
'c++': 'cpp',
'capnp': 'capnproto',
'cc': 'cpp',
'clean': 'clean',
'clj': 'clojure',
'cls': 'cos',
'cmake.in': 'cmake',
'cmd': 'dos',
'coffee': 'coffeescript',
'console': 'shell',
'cos': 'cos',
'cr': 'crystal',
'craftcms': 'twig',
'crm': 'crmsh',
'csharp': 'cs',
'cson': 'coffeescript',
'dcl': 'clean',
'desktop': 'ini',
'dfm': 'delphi',
'do': 'stata',
'docker': 'dockerfile',
'dpr': 'delphi',
'dst': 'dust',
'el': 'lisp',
'erl': 'erlang',
'f90': 'fortran',
'f95': 'fortran',
'feature': 'gherkin',
'freepascal': 'delphi',
'fs': 'fsharp',
'gemspec': 'ruby',
'gms': 'gams',
'golang': 'go',
'graph': 'roboconf',
'gss': 'gauss',
'gyp': 'python',
'h': 'cpp',
'h++': 'cpp',
'hbs': 'handlebars',
'hpp': 'cpp',
'hs': 'haskell',
'html': 'xml',
'html.handlebars': 'handlebars',
'html.hbs': 'handlebars',
'https': 'http',
'hx': 'haxe',
'hylang': 'hy',
'i7': 'inform7',
'iced': 'coffeescript',
'icl': 'clean',
'instances': 'roboconf',
'ipynb': 'json',
'irb': 'ruby',
'jinja': 'django',
'js': 'javascript',
'jsp': 'java',
'jsx': 'javascript',
'k': 'q',
'kdb': 'q',
'lassoscript': 'lasso',
'lazarus': 'delphi',
'lfm': 'delphi',
'lpr': 'delphi',
'ls': 'livescript',
'm': 'objectivec',
'mak': 'makefile',
'md': 'markdown',
'mikrotik': 'routeros',
'mips': 'mipsasm',
'mk': 'makefile',
'mkd': 'markdown',
'mkdown': 'markdown',
'markdown': 'markdown',
'ml': 'ocaml',
'mm': 'objectivec',
'mma': 'mathematica',
'moo': 'mercury',
'moon': 'moonscript',
'nc': 'gcode',
'nginxconf': 'nginx',
'nim': 'nimrod',
'nixos': 'nix',
'obj-c': 'objectivec',
'objc': 'objectivec',
'osascript': 'applescript',
'p21': 'step21',
'pas': 'delphi',
'pascal': 'delphi',
'patch': 'diff',
'pb': 'purebasic',
'pbi': 'purebasic',
'pcmk': 'crmsh',
'pf.conf': 'pf',
'php3': 'php',
'php4': 'php',
'php5': 'php',
'php6': 'php',
'pl': 'perl',
'plist': 'xml',
'pm': 'perl',
'podspec': 'ruby',
'pp': 'puppet',
'ps': 'powershell',
'py': 'python',
'qrc': 'xml',
'qs': 'javascript',
'qt': 'qml',
'rb': 'ruby',
'routeros': 'routeros',
'rs': 'rust',
- 'rst': 'nohighlight-swh',
+ 'rst': 'nohighlight',
'rss': 'xml',
'ru': 'ruby',
'scad': 'openscad',
'sci': 'scilab',
'scpt': 'applescript',
'sh': 'bash',
'smali': 'smali',
'sqf': 'sqf',
'st': 'smalltalk',
'step': 'step21',
'stp': 'step21',
'styl': 'stylus',
'sv': 'verilog',
'svh': 'verilog',
'tao': 'xl',
'thor': 'ruby',
'tk': 'tcl',
'toml': 'ini',
'ui': 'xml',
'v': 'verilog',
'vb': 'vbnet',
'vbs': 'vbscript',
'wildfly-cli': 'jboss-cli',
'xhtml': 'xml',
'xjb': 'xml',
'xls': 'excel',
'xlsx': 'excel',
'xpath': 'xquery',
'xq': 'xquery',
'xsd': 'xml',
'xsl': 'xml',
'yaml': 'yaml',
'yml': 'yaml',
'zep': 'zephir',
'zone': 'dns',
'zsh': 'bash'
}
# dictionary mapping pygment lexers to hljs languages
_pygments_lexer_to_hljs_language = {}
# dictionary mapping mime types to hljs languages
_mime_type_to_hljs_language = {
'text/x-c': 'cpp',
'text/x-c++': 'cpp',
'text/x-msdos-batch': 'dos',
'text/x-lisp': 'lisp',
'text/x-shellscript': 'bash',
}
# function to fill the above dictionnaries
def _init_pygments_to_hljs_map():
if len(_pygments_lexer_to_hljs_language) == 0:
for lexer in get_all_lexers():
lexer_name = lexer[0]
lang_aliases = lexer[1]
lang_mime_types = lexer[3]
lang = None
for lang_alias in lang_aliases:
if lang_alias in _hljs_languages:
lang = lang_alias
_pygments_lexer_to_hljs_language[lexer_name] = lang_alias
break
if lang:
for lang_mime_type in lang_mime_types:
_mime_type_to_hljs_language[lang_mime_type] = lang
def get_hljs_language_from_filename(filename):
"""Function that tries to associate a language supported by highlight.js
from a filename.
Args:
filename: input filename
Returns:
highlight.js language id or None if no correspondence has been found
"""
_init_pygments_to_hljs_map()
if filename:
exts = filename.lower().split('.')
# check if file extension matches an hljs language
# also handle .ext.in cases
for ext in reversed(exts[-2:]):
if ext in _hljs_languages:
return ext
if ext in _hljs_languages_aliases:
return _hljs_languages_aliases[ext]
# otherwise use Pygments language database
lexer = None
# try to find a Pygment lexer
try:
lexer = get_lexer_for_filename(filename)
except Exception:
pass
# if there is a correspondence between the lexer and an hljs
# language, return it
if lexer and lexer.name in _pygments_lexer_to_hljs_language:
return _pygments_lexer_to_hljs_language[lexer.name]
# otherwise, try to find a match between the file extensions
# associated to the lexer and the hljs language aliases
if lexer:
exts = [ext.replace('*.', '') for ext in lexer.filenames]
for ext in exts:
if ext in _hljs_languages_aliases:
return _hljs_languages_aliases[ext]
return None
def get_hljs_language_from_mime_type(mime_type):
"""Function that tries to associate a language supported by highlight.js
from a mime type.
Args:
mime_type: input mime type
Returns:
highlight.js language id or None if no correspondence has been found
"""
_init_pygments_to_hljs_map()
if mime_type and mime_type in _mime_type_to_hljs_language:
return _mime_type_to_hljs_language[mime_type]
return None
diff --git a/swh/web/tests/browse/views/test_content.py b/swh/web/tests/browse/views/test_content.py
index c6823f05..a4d0cbf6 100644
--- a/swh/web/tests/browse/views/test_content.py
+++ b/swh/web/tests/browse/views/test_content.py
@@ -1,315 +1,315 @@
# Copyright (C) 2017-2018 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
import base64
from unittest.mock import patch
from django.utils.html import escape
from django.utils.encoding import DjangoUnicodeDecodeError
from swh.web.browse.utils import get_mimetype_and_encoding_for_content
from swh.web.common.exc import NotFoundExc
from swh.web.common.utils import reverse, get_swh_persistent_id
from swh.web.common.utils import gen_path_info
from swh.web.tests.testcase import SWHWebTestCase
from .data.content_test_data import (
stub_content_text_data,
stub_content_text_path_with_root_dir,
stub_content_bin_data,
stub_content_bin_filename,
stub_content_text_no_highlight_data,
non_utf8_encoded_content_data,
non_utf8_encoded_content,
non_utf8_encoding,
stub_content_too_large_data
)
class SwhBrowseContentTest(SWHWebTestCase):
@patch('swh.web.browse.views.content.request_content')
def test_content_view_text(self, mock_request_content):
mock_request_content.return_value = stub_content_text_data
sha1_git = stub_content_text_data['checksums']['sha1_git']
url = reverse('browse-content',
kwargs={'query_string': stub_content_text_data['checksums']['sha1']}) # noqa
url_raw = reverse('browse-content-raw',
kwargs={'query_string': stub_content_text_data['checksums']['sha1']}) # noqa
resp = self.client.get(url)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed('browse/content.html')
self.assertContains(resp, '')
self.assertContains(resp, escape(stub_content_text_data['raw_data']))
self.assertContains(resp, url_raw)
swh_cnt_id = get_swh_persistent_id('content', sha1_git)
swh_cnt_id_url = reverse('browse-swh-id',
kwargs={'swh_id': swh_cnt_id})
self.assertContains(resp, swh_cnt_id)
self.assertContains(resp, swh_cnt_id_url)
@patch('swh.web.browse.views.content.request_content')
def test_content_view_text_no_highlight(self, mock_request_content):
mock_request_content.return_value = stub_content_text_no_highlight_data
sha1_git = stub_content_text_no_highlight_data['checksums']['sha1_git']
url = reverse('browse-content',
kwargs={'query_string': stub_content_text_no_highlight_data['checksums']['sha1']}) # noqa
url_raw = reverse('browse-content-raw',
kwargs={'query_string': stub_content_text_no_highlight_data['checksums']['sha1']}) # noqa
resp = self.client.get(url)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed('browse/content.html')
- self.assertContains(resp, '')
+ self.assertContains(resp, '')
self.assertContains(resp, escape(stub_content_text_no_highlight_data['raw_data'])) # noqa
self.assertContains(resp, url_raw)
swh_cnt_id = get_swh_persistent_id('content', sha1_git)
swh_cnt_id_url = reverse('browse-swh-id',
kwargs={'swh_id': swh_cnt_id})
self.assertContains(resp, swh_cnt_id)
self.assertContains(resp, swh_cnt_id_url)
@patch('swh.web.browse.utils.service')
def test_content_view_no_utf8_text(self, mock_service):
mock_service.lookup_content.return_value = \
non_utf8_encoded_content_data
mock_service.lookup_content_raw.return_value = \
{'data': non_utf8_encoded_content}
mock_service.lookup_content_filetype.return_value = None
mock_service.lookup_content_language.return_value = None
mock_service.lookup_content_license.return_value = None
sha1_git = non_utf8_encoded_content_data['checksums']['sha1_git']
url = reverse('browse-content',
kwargs={'query_string': non_utf8_encoded_content_data['checksums']['sha1']}) # noqa
try:
resp = self.client.get(url)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed('browse/content.html')
swh_cnt_id = get_swh_persistent_id('content', sha1_git)
swh_cnt_id_url = reverse('browse-swh-id',
kwargs={'swh_id': swh_cnt_id})
self.assertContains(resp, swh_cnt_id_url)
self.assertContains(resp, escape(non_utf8_encoded_content.decode(non_utf8_encoding).encode('utf-8'))) # noqa
except DjangoUnicodeDecodeError:
self.fail('Textual content is not encoded in utf-8')
@patch('swh.web.browse.views.content.request_content')
def test_content_view_image(self, mock_request_content):
mime_type = 'image/png'
mock_request_content.return_value = stub_content_bin_data
url = reverse('browse-content',
kwargs={'query_string': stub_content_bin_data['checksums']['sha1']}) # noqa
url_raw = reverse('browse-content-raw',
kwargs={'query_string': stub_content_bin_data['checksums']['sha1']}) # noqa
resp = self.client.get(url)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed('browse/content.html')
png_encoded = base64.b64encode(stub_content_bin_data['raw_data']) \
.decode('utf-8')
self.assertContains(resp, ''
% (mime_type, png_encoded))
self.assertContains(resp, url_raw)
@patch('swh.web.browse.views.content.request_content')
def test_content_view_with_path(self, mock_request_content):
mock_request_content.return_value = stub_content_text_data
url = reverse('browse-content',
kwargs={'query_string': stub_content_text_data['checksums']['sha1']}, # noqa
query_params={'path': stub_content_text_path_with_root_dir}) # noqa
resp = self.client.get(url)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed('browse/content.html')
self.assertContains(resp, '')
self.assertContains(resp, escape(stub_content_text_data['raw_data']))
split_path = stub_content_text_path_with_root_dir.split('/')
root_dir_sha1 = split_path[0]
filename = split_path[-1]
path = stub_content_text_path_with_root_dir \
.replace(root_dir_sha1 + '/', '') \
.replace(filename, '')
path_info = gen_path_info(path)
root_dir_url = reverse('browse-directory',
kwargs={'sha1_git': root_dir_sha1})
self.assertContains(resp, '
',
count=len(path_info)+1)
self.assertContains(resp, '' +
root_dir_sha1[:7] + '')
for p in path_info:
dir_url = reverse('browse-directory',
kwargs={'sha1_git': root_dir_sha1,
'path': p['path']})
self.assertContains(resp, '' +
p['name'] + '')
self.assertContains(resp, '