server: use index-based paging (#123)

This commit is contained in:
rr-
2017-02-09 00:48:06 +01:00
parent ba7ca0cd87
commit fdad08e176
34 changed files with 222 additions and 193 deletions

View File

@ -78,8 +78,8 @@ class Executor:
def execute(
self,
query_text: str,
page: int,
page_size: int
offset: int,
limit: int
) -> Tuple[int, List[model.Base]]:
search_query = self.parser.parse(query_text)
self.config.on_search_query_parsed(search_query)
@ -89,7 +89,7 @@ class Executor:
if token.name == 'random':
disable_eager_loads = True
key = (id(self.config), hash(search_query), page, page_size)
key = (id(self.config), hash(search_query), offset, limit)
if cache.has(key):
return cache.get(key)
@ -97,8 +97,8 @@ class Executor:
filter_query = filter_query.options(sa.orm.lazyload('*'))
filter_query = self._prepare_db_query(filter_query, search_query, True)
entities = filter_query \
.offset(max(page - 1, 0) * page_size) \
.limit(page_size) \
.offset(offset) \
.limit(limit) \
.all()
count_query = self.config.create_count_query(disable_eager_loads)
@ -120,14 +120,13 @@ class Executor:
serializer: Callable[[model.Base], rest.Response]
) -> rest.Response:
query = ctx.get_param_as_string('query', default='')
page = ctx.get_param_as_int('page', default=1, min=1)
page_size = ctx.get_param_as_int(
'pageSize', default=100, min=1, max=100)
count, entities = self.execute(query, page, page_size)
offset = ctx.get_param_as_int('offset', default=0, min=0)
limit = ctx.get_param_as_int('limit', default=100, min=1, max=100)
count, entities = self.execute(query, offset, limit)
return {
'query': query,
'page': page,
'pageSize': page_size,
'offset': offset,
'limit': limit,
'total': count,
'results': [serializer(entity) for entity in entities],
}

View File

@ -23,12 +23,12 @@ def test_retrieving_multiple(user_factory, comment_factory, context_factory):
comments.serialize_comment.return_value = 'serialized comment'
result = api.comment_api.get_comments(
context_factory(
params={'query': '', 'page': 1},
params={'query': '', 'offset': 0},
user=user_factory(rank=model.User.RANK_REGULAR)))
assert result == {
'query': '',
'page': 1,
'pageSize': 100,
'offset': 0,
'limit': 100,
'total': 2,
'results': ['serialized comment', 'serialized comment'],
}
@ -39,7 +39,7 @@ def test_trying_to_retrieve_multiple_without_privileges(
with pytest.raises(errors.AuthError):
api.comment_api.get_comments(
context_factory(
params={'query': '', 'page': 1},
params={'query': '', 'offset': 0},
user=user_factory(rank=model.User.RANK_ANONYMOUS)))

View File

@ -24,12 +24,12 @@ def test_retrieving_multiple(user_factory, post_factory, context_factory):
posts.serialize_post.return_value = 'serialized post'
result = api.post_api.get_posts(
context_factory(
params={'query': '', 'page': 1},
params={'query': '', 'offset': 0},
user=user_factory(rank=model.User.RANK_REGULAR)))
assert result == {
'query': '',
'page': 1,
'pageSize': 100,
'offset': 0,
'limit': 100,
'total': 2,
'results': ['serialized post', 'serialized post'],
}
@ -48,12 +48,12 @@ def test_using_special_tokens(user_factory, post_factory, context_factory):
'serialized post %d' % post.post_id
result = api.post_api.get_posts(
context_factory(
params={'query': 'special:fav', 'page': 1},
params={'query': 'special:fav', 'offset': 0},
user=auth_user))
assert result == {
'query': 'special:fav',
'page': 1,
'pageSize': 100,
'offset': 0,
'limit': 100,
'total': 1,
'results': ['serialized post 1'],
}
@ -67,7 +67,7 @@ def test_trying_to_use_special_tokens_without_logging_in(
with pytest.raises(errors.SearchError):
api.post_api.get_posts(
context_factory(
params={'query': 'special:fav', 'page': 1},
params={'query': 'special:fav', 'offset': 0},
user=user_factory(rank=model.User.RANK_ANONYMOUS)))
@ -76,7 +76,7 @@ def test_trying_to_retrieve_multiple_without_privileges(
with pytest.raises(errors.AuthError):
api.post_api.get_posts(
context_factory(
params={'query': '', 'page': 1},
params={'query': '', 'offset': 0},
user=user_factory(rank=model.User.RANK_ANONYMOUS)))

View File

@ -28,11 +28,11 @@ def test_retrieving_multiple(user_factory, context_factory):
db.session.flush()
result = api.snapshot_api.get_snapshots(
context_factory(
params={'query': '', 'page': 1},
params={'query': '', 'offset': 0},
user=user_factory(rank=model.User.RANK_REGULAR)))
assert result['query'] == ''
assert result['page'] == 1
assert result['pageSize'] == 100
assert result['offset'] == 0
assert result['limit'] == 100
assert result['total'] == 2
assert len(result['results']) == 2
@ -42,5 +42,5 @@ def test_trying_to_retrieve_multiple_without_privileges(
with pytest.raises(errors.AuthError):
api.snapshot_api.get_snapshots(
context_factory(
params={'query': '', 'page': 1},
params={'query': '', 'offset': 0},
user=user_factory(rank=model.User.RANK_ANONYMOUS)))

View File

@ -23,12 +23,12 @@ def test_retrieving_multiple(user_factory, tag_factory, context_factory):
tags.serialize_tag.return_value = 'serialized tag'
result = api.tag_api.get_tags(
context_factory(
params={'query': '', 'page': 1},
params={'query': '', 'offset': 0},
user=user_factory(rank=model.User.RANK_REGULAR)))
assert result == {
'query': '',
'page': 1,
'pageSize': 100,
'offset': 0,
'limit': 100,
'total': 2,
'results': ['serialized tag', 'serialized tag'],
}
@ -39,7 +39,7 @@ def test_trying_to_retrieve_multiple_without_privileges(
with pytest.raises(errors.AuthError):
api.tag_api.get_tags(
context_factory(
params={'query': '', 'page': 1},
params={'query': '', 'offset': 0},
user=user_factory(rank=model.User.RANK_ANONYMOUS)))

View File

@ -28,8 +28,8 @@ def test_retrieving_multiple(user_factory, context_factory):
user=user_factory(rank=model.User.RANK_REGULAR)))
assert result == {
'query': '',
'page': 1,
'pageSize': 100,
'offset': 0,
'limit': 100,
'total': 2,
'results': ['serialized user', 'serialized user'],
}

View File

@ -13,7 +13,7 @@ def executor():
def verify_unpaged(executor):
def verify(input, expected_comment_text):
actual_count, actual_comments = executor.execute(
input, page=1, page_size=100)
input, offset=0, limit=100)
actual_comment_text = [c.text for c in actual_comments]
assert actual_count == len(expected_comment_text)
assert actual_comment_text == expected_comment_text

View File

@ -65,7 +65,7 @@ def auth_executor(executor, user_factory):
def verify_unpaged(executor):
def verify(input, expected_post_ids, test_order=False):
actual_count, actual_posts = executor.execute(
input, page=1, page_size=100)
input, offset=0, limit=100)
actual_post_ids = list([p.post_id for p in actual_posts])
if not test_order:
actual_post_ids = sorted(actual_post_ids)
@ -381,7 +381,7 @@ def test_filter_by_safety(
def test_filter_by_invalid_type(executor):
with pytest.raises(errors.SearchError):
executor.execute('type:invalid', page=1, page_size=100)
executor.execute('type:invalid', offset=0, limit=100)
@pytest.mark.parametrize('input,expected_post_ids', [
@ -458,7 +458,7 @@ def test_filter_by_image_size(
def test_filter_by_invalid_aspect_ratio(executor):
with pytest.raises(errors.SearchError):
executor.execute('image-ar:1:1:1', page=1, page_size=100)
executor.execute('image-ar:1:1:1', offset=0, limit=100)
@pytest.mark.parametrize('input,expected_post_ids', [
@ -706,7 +706,7 @@ def test_own_disliked(
])
def test_someones_score(executor, input):
with pytest.raises(errors.SearchError):
executor.execute(input, page=1, page_size=100)
executor.execute(input, offset=0, limit=100)
def test_own_fav(

View File

@ -13,7 +13,7 @@ def executor():
def verify_unpaged(executor):
def verify(input, expected_tag_names):
actual_count, actual_tags = executor.execute(
input, page=1, page_size=100)
input, offset=0, limit=100)
actual_tag_names = [u.names[0].name for u in actual_tags]
assert actual_count == len(expected_tag_names)
assert actual_tag_names == expected_tag_names
@ -183,7 +183,7 @@ def test_filter_by_post_count(
])
def test_filter_by_invalid_input(executor, input):
with pytest.raises(errors.SearchError):
executor.execute(input, page=1, page_size=100)
executor.execute(input, offset=0, limit=100)
@pytest.mark.parametrize('input,expected_tag_names', [

View File

@ -13,7 +13,7 @@ def executor():
def verify_unpaged(executor):
def verify(input, expected_user_names):
actual_count, actual_users = executor.execute(
input, page=1, page_size=100)
input, offset=0, limit=100)
actual_user_names = [u.name for u in actual_users]
assert actual_count == len(expected_user_names)
assert actual_user_names == expected_user_names
@ -135,21 +135,23 @@ def test_combining_tokens(
@pytest.mark.parametrize(
'page,page_size,expected_total_count,expected_user_names', [
(1, 1, 2, ['u1']),
(2, 1, 2, ['u2']),
(3, 1, 2, []),
'offset,limit,expected_total_count,expected_user_names', [
(0, 1, 2, ['u1']),
(1, 1, 2, ['u2']),
(2, 1, 2, []),
(-1, 1, 2, ['u1']),
(0, 2, 2, ['u1', 'u2']),
(3, 1, 2, []),
(0, 0, 2, []),
])
def test_paging(
executor, user_factory, page, page_size,
executor, user_factory, offset, limit,
expected_total_count, expected_user_names):
db.session.add(user_factory(name='u1'))
db.session.add(user_factory(name='u2'))
db.session.flush()
actual_count, actual_users = executor.execute(
'', page=page, page_size=page_size)
'', offset=offset, limit=limit)
actual_user_names = [u.name for u in actual_users]
assert actual_count == expected_total_count
assert actual_user_names == expected_user_names
@ -222,7 +224,7 @@ def test_random_sort(executor, user_factory):
db.session.add_all([user3, user1, user2])
db.session.flush()
actual_count, actual_users = executor.execute(
'sort:random', page=1, page_size=100)
'sort:random', offset=0, limit=100)
actual_user_names = [u.name for u in actual_users]
assert actual_count == 3
assert len(actual_user_names) == 3
@ -251,4 +253,4 @@ def test_random_sort(executor, user_factory):
])
def test_bad_tokens(executor, input, expected_error):
with pytest.raises(expected_error):
executor.execute(input, page=1, page_size=100)
executor.execute(input, offset=0, limit=100)