Page Menu
Home
Software Heritage
Search
Configure Global Search
Log In
Files
F7124033
D6150.id22330.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
9 KB
Subscribers
None
D6150.id22330.diff
View Options
diff --git a/swh/storage/cassandra/cql.py b/swh/storage/cassandra/cql.py
--- a/swh/storage/cassandra/cql.py
+++ b/swh/storage/cassandra/cql.py
@@ -283,17 +283,7 @@
def _execute_with_retries(self, statement, args) -> ResultSet:
return self._session.execute(statement, args, timeout=1000.0)
- @_prepared_statement(
- "UPDATE object_count SET count = count + ? "
- "WHERE partition_key = 0 AND object_type = ?"
- )
- def _increment_counter(
- self, object_type: str, nb: int, *, statement: PreparedStatement
- ) -> None:
- self._execute_with_retries(statement, [nb, object_type])
-
def _add_one(self, statement, obj: BaseRow) -> None:
- self._increment_counter(obj.TABLE, 1)
self._execute_with_retries(statement, dataclasses.astuple(obj))
_T = TypeVar("_T", bound=BaseRow)
@@ -328,7 +318,6 @@
"""Returned currified by content_add_prepare, to be called when the
content row should be added to the primary table."""
self._execute_with_retries(statement, None)
- self._increment_counter("content", 1)
@_prepared_insert_statement(ContentRow)
def content_add_prepare(
@@ -482,7 +471,6 @@
"""Returned currified by skipped_content_add_prepare, to be called
when the content row should be added to the primary table."""
self._execute_with_retries(statement, None)
- self._increment_counter("skipped_content", 1)
@_prepared_insert_statement(SkippedContentRow)
def skipped_content_add_prepare(
@@ -1219,7 +1207,6 @@
"""Returned currified by extid_add_prepare, to be called when the
extid row should be added to the primary table."""
self._execute_with_retries(statement, None)
- self._increment_counter("extid", 1)
@_prepared_insert_statement(ExtIDRow)
def extid_add_prepare(
@@ -1328,10 +1315,11 @@
# Miscellaneous
##########################
+ def stat_counters(self) -> Iterable[ObjectCountRow]:
+ raise NotImplementedError(
+ "stat_counters is not implemented by the Cassandra backend"
+ )
+
@_prepared_statement("SELECT uuid() FROM revision LIMIT 1;")
def check_read(self, *, statement):
self._execute_with_retries(statement, [])
-
- @_prepared_select_statement(ObjectCountRow, "WHERE partition_key=0")
- def stat_counters(self, *, statement) -> Iterable[ObjectCountRow]:
- return map(ObjectCountRow.from_dict, self._execute_with_retries(statement, []))
diff --git a/swh/storage/cassandra/schema.py b/swh/storage/cassandra/schema.py
--- a/swh/storage/cassandra/schema.py
+++ b/swh/storage/cassandra/schema.py
@@ -267,13 +267,6 @@
PRIMARY KEY ((id))
);""",
"""
-CREATE TABLE IF NOT EXISTS object_count (
- partition_key smallint, -- Constant, must always be 0
- object_type ascii,
- count counter,
- PRIMARY KEY ((partition_key), object_type)
-);""",
- """
CREATE TABLE IF NOT EXISTS extid (
extid_type ascii,
extid blob,
@@ -319,7 +312,6 @@
"origin_visit",
"origin",
"raw_extrinsic_metadata",
- "object_count",
"origin_visit_status",
"metadata_authority",
"metadata_fetcher",
diff --git a/swh/storage/tests/storage_tests.py b/swh/storage/tests/storage_tests.py
--- a/swh/storage/tests/storage_tests.py
+++ b/swh/storage/tests/storage_tests.py
@@ -38,8 +38,10 @@
TargetType,
)
from swh.storage import get_storage
+from swh.storage.cassandra.storage import CassandraStorage
from swh.storage.common import origin_url_to_sha1 as sha1
from swh.storage.exc import HashCollision, StorageArgumentException
+from swh.storage.in_memory import InMemoryStorage
from swh.storage.interface import ListOrder, PagedResult, StorageInterface
from swh.storage.tests.conftest import function_scoped_fixture_check
from swh.storage.utils import (
@@ -187,8 +189,11 @@
assert obj.ctime <= insertion_end_time
assert obj == expected_cont
- swh_storage.refresh_stat_counters()
- assert swh_storage.stat_counters()["content"] == 1
+ if isinstance(swh_storage, InMemoryStorage) or not isinstance(
+ swh_storage, CassandraStorage
+ ):
+ swh_storage.refresh_stat_counters()
+ assert swh_storage.stat_counters()["content"] == 1
def test_content_add_from_lazy_content(self, swh_storage, sample_data):
cont = sample_data.content
@@ -221,8 +226,11 @@
assert obj.ctime <= insertion_end_time
assert attr.evolve(obj, ctime=None).to_dict() == expected_cont.to_dict()
- swh_storage.refresh_stat_counters()
- assert swh_storage.stat_counters()["content"] == 1
+ if isinstance(swh_storage, InMemoryStorage) or not isinstance(
+ swh_storage, CassandraStorage
+ ):
+ swh_storage.refresh_stat_counters()
+ assert swh_storage.stat_counters()["content"] == 1
def test_content_get_data_missing(self, swh_storage, sample_data):
cont, cont2 = sample_data.contents[:2]
@@ -705,8 +713,11 @@
after_missing = list(swh_storage.directory_missing([directory.id]))
assert after_missing == []
- swh_storage.refresh_stat_counters()
- assert swh_storage.stat_counters()["directory"] == 1
+ if isinstance(swh_storage, InMemoryStorage) or not isinstance(
+ swh_storage, CassandraStorage
+ ):
+ swh_storage.refresh_stat_counters()
+ assert swh_storage.stat_counters()["directory"] == 1
def test_directory_add_twice(self, swh_storage, sample_data):
directory = sample_data.directories[1]
@@ -975,8 +986,11 @@
actual_result = swh_storage.revision_add([revision])
assert actual_result == {"revision:add": 0}
- swh_storage.refresh_stat_counters()
- assert swh_storage.stat_counters()["revision"] == 1
+ if isinstance(swh_storage, InMemoryStorage) or not isinstance(
+ swh_storage, CassandraStorage
+ ):
+ swh_storage.refresh_stat_counters()
+ assert swh_storage.stat_counters()["revision"] == 1
def test_revision_add_twice(self, swh_storage, sample_data):
revision, revision2 = sample_data.revisions[:2]
@@ -1376,8 +1390,11 @@
actual_result = swh_storage.release_add([release, release2])
assert actual_result == {"release:add": 0}
- swh_storage.refresh_stat_counters()
- assert swh_storage.stat_counters()["release"] == 2
+ if isinstance(swh_storage, InMemoryStorage) or not isinstance(
+ swh_storage, CassandraStorage
+ ):
+ swh_storage.refresh_stat_counters()
+ assert swh_storage.stat_counters()["release"] == 2
def test_release_add_no_author_date(self, swh_storage, sample_data):
full_release = sample_data.release
@@ -1482,8 +1499,11 @@
[("origin", origin) for origin in origins]
)
- swh_storage.refresh_stat_counters()
- assert swh_storage.stat_counters()["origin"] == len(origins)
+ if isinstance(swh_storage, InMemoryStorage) or not isinstance(
+ swh_storage, CassandraStorage
+ ):
+ swh_storage.refresh_stat_counters()
+ assert swh_storage.stat_counters()["origin"] == len(origins)
def test_origin_add_twice(self, swh_storage, sample_data):
origin, origin2 = sample_data.origins[:2]
@@ -1923,11 +1943,13 @@
]
)
- swh_storage.refresh_stat_counters()
-
- stats = swh_storage.stat_counters()
- assert stats["origin"] == len(origins)
- assert stats["origin_visit"] == len(origins) * len(visits)
+ if isinstance(swh_storage, InMemoryStorage) or not isinstance(
+ swh_storage, CassandraStorage
+ ):
+ swh_storage.refresh_stat_counters()
+ stats = swh_storage.stat_counters()
+ assert stats["origin"] == len(origins)
+ assert stats["origin_visit"] == len(origins) * len(visits)
random_ovs = swh_storage.origin_visit_status_get_random(visit_type)
assert random_ovs
@@ -3124,8 +3146,11 @@
"next_branch": None,
}
- swh_storage.refresh_stat_counters()
- assert swh_storage.stat_counters()["snapshot"] == 2
+ if isinstance(swh_storage, InMemoryStorage) or not isinstance(
+ swh_storage, CassandraStorage
+ ):
+ swh_storage.refresh_stat_counters()
+ assert swh_storage.stat_counters()["snapshot"] == 2
def test_snapshot_add_many_incremental(self, swh_storage, sample_data):
snapshot, _, complete_snapshot = sample_data.snapshots[:3]
@@ -3625,6 +3650,10 @@
assert list(missing_snapshots) == [missing_snapshot.id]
def test_stat_counters(self, swh_storage, sample_data):
+ if isinstance(swh_storage, CassandraStorage) and not isinstance(
+ swh_storage, InMemoryStorage
+ ):
+ pytest.skip("Cassandra backend does not support stat counters")
origin = sample_data.origin
snapshot = sample_data.snapshot
revision = sample_data.revision
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Dec 20 2024, 10:03 AM (11 w, 4 d ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3215658
Attached To
D6150: cassandra: Remove stat_counters.
Event Timeline
Log In to Comment