summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJohn Christopher Anderson <jchris@apache.org>2009-01-04 02:28:08 +0000
committerJohn Christopher Anderson <jchris@apache.org>2009-01-04 02:28:08 +0000
commit14d8a23c9b5bd69099b4bd2c3ca6c3eb0441a0b3 (patch)
tree42e4446fecdcaf379119c4d71d6156b9167ddf65 /src
parente27fb8eaa628128f0ec8f1797805e92ec39ec6cb (diff)
change count to limit in view query params
git-svn-id: https://svn.apache.org/repos/asf/couchdb/trunk@731159 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'src')
-rw-r--r--src/couchdb/couch_db.hrl4
-rw-r--r--src/couchdb/couch_httpd_db.erl8
-rw-r--r--src/couchdb/couch_httpd_view.erl68
-rw-r--r--src/couchdb/couch_rep.erl2
4 files changed, 41 insertions, 41 deletions
diff --git a/src/couchdb/couch_db.hrl b/src/couchdb/couch_db.hrl
index eb7bd9a1..22fad2ef 100644
--- a/src/couchdb/couch_db.hrl
+++ b/src/couchdb/couch_db.hrl
@@ -139,8 +139,8 @@
-record(view_query_args, {
start_key = nil,
end_key = {},
- count = 10000000000, % a huge huge default number. Picked so we don't have
- % to do different logic for when there is no count
+ limit = 10000000000, % a huge huge default number. Picked so we don't have
+ % to do different logic for when there is no limit
% limit
update = true,
direction = fwd,
diff --git a/src/couchdb/couch_httpd_db.erl b/src/couchdb/couch_httpd_db.erl
index eb41801f..84a0a812 100644
--- a/src/couchdb/couch_httpd_db.erl
+++ b/src/couchdb/couch_httpd_db.erl
@@ -173,7 +173,7 @@ db_req(#httpd{path_parts=[_,<<"_all_docs">>]}=Req, _Db) ->
db_req(#httpd{method='GET',path_parts=[_,<<"_all_docs_by_seq">>]}=Req, Db) ->
#view_query_args{
start_key = StartKey,
- count = Count,
+ limit = Limit,
skip = SkipCount,
direction = Dir
} = QueryArgs = couch_httpd_view:parse_view_query(Req),
@@ -215,7 +215,7 @@ db_req(#httpd{method='GET',path_parts=[_,<<"_all_docs_by_seq">>]}=Req, Db) ->
end
},
FoldlFun({{UpdateSeq, Id}, Json}, Offset, Acc)
- end, {Count, SkipCount, undefined, []}),
+ end, {Limit, SkipCount, undefined, []}),
couch_httpd_view:finish_view_fold(Req, TotalRowCount, {ok, FoldResult});
db_req(#httpd{path_parts=[_,<<"_all_docs_by_seq">>]}=Req, _Db) ->
@@ -259,7 +259,7 @@ all_docs_view(Req, Db, Keys) ->
#view_query_args{
start_key = StartKey,
start_docid = StartDocId,
- count = Count,
+ limit = Limit,
skip = SkipCount,
direction = Dir
} = QueryArgs = couch_httpd_view:parse_view_query(Req, Keys),
@@ -268,7 +268,7 @@ all_docs_view(Req, Db, Keys) ->
StartId = if is_binary(StartKey) -> StartKey;
true -> StartDocId
end,
- FoldAccInit = {Count, SkipCount, undefined, []},
+ FoldAccInit = {Limit, SkipCount, undefined, []},
case Keys of
nil ->
diff --git a/src/couchdb/couch_httpd_view.erl b/src/couchdb/couch_httpd_view.erl
index 3a25bd42..80e06908 100644
--- a/src/couchdb/couch_httpd_view.erl
+++ b/src/couchdb/couch_httpd_view.erl
@@ -84,7 +84,7 @@ handle_slow_view_req(Req, _Db) ->
output_map_view(Req, View, Db, QueryArgs, nil) ->
#view_query_args{
- count = Count,
+ limit = Limit,
direction = Dir,
skip = SkipCount,
start_key = StartKey,
@@ -94,19 +94,19 @@ output_map_view(Req, View, Db, QueryArgs, nil) ->
Start = {StartKey, StartDocId},
FoldlFun = make_view_fold_fun(Req, QueryArgs, Db, RowCount,
fun couch_view:reduce_to_count/1),
- FoldAccInit = {Count, SkipCount, undefined, []},
+ FoldAccInit = {Limit, SkipCount, undefined, []},
FoldResult = couch_view:fold(View, Start, Dir, FoldlFun, FoldAccInit),
finish_view_fold(Req, RowCount, FoldResult);
output_map_view(Req, View, Db, QueryArgs, Keys) ->
#view_query_args{
- count = Count,
+ limit = Limit,
direction = Dir,
skip = SkipCount,
start_docid = StartDocId
} = QueryArgs,
{ok, RowCount} = couch_view:get_row_count(View),
- FoldAccInit = {Count, SkipCount, undefined, []},
+ FoldAccInit = {Limit, SkipCount, undefined, []},
FoldResult = lists:foldl(
fun(Key, {ok, FoldAcc}) ->
Start = {Key, StartDocId},
@@ -123,7 +123,7 @@ output_reduce_view(Req, View, QueryArgs, nil) ->
#view_query_args{
start_key = StartKey,
end_key = EndKey,
- count = Count,
+ limit = Limit,
skip = Skip,
direction = Dir,
start_docid = StartDocId,
@@ -134,13 +134,13 @@ output_reduce_view(Req, View, QueryArgs, nil) ->
{ok, GroupRowsFun, RespFun} = make_reduce_fold_funs(Resp, GroupLevel),
send_chunk(Resp, "{\"rows\":["),
{ok, _} = couch_view:fold_reduce(View, Dir, {StartKey, StartDocId},
- {EndKey, EndDocId}, GroupRowsFun, RespFun, {"", Skip, Count}),
+ {EndKey, EndDocId}, GroupRowsFun, RespFun, {"", Skip, Limit}),
send_chunk(Resp, "]}"),
end_json_response(Resp);
output_reduce_view(Req, View, QueryArgs, Keys) ->
#view_query_args{
- count = Count,
+ limit = Limit,
skip = Skip,
direction = Dir,
start_docid = StartDocId,
@@ -154,7 +154,7 @@ output_reduce_view(Req, View, QueryArgs, Keys) ->
fun(Key, AccSeparator) ->
{ok, {NewAcc, _, _}} = couch_view:fold_reduce(View, Dir, {Key, StartDocId},
{Key, EndDocId}, GroupRowsFun, RespFun,
- {AccSeparator, Skip, Count}),
+ {AccSeparator, Skip, Limit}),
NewAcc % Switch to comma
end,
"", Keys), % Start with no comma
@@ -171,25 +171,25 @@ make_reduce_fold_funs(Resp, GroupLevel) ->
({Key1,_}, {Key2,_}) ->
Key1 == Key2
end,
- RespFun = fun(_Key, _Red, {AccSeparator,AccSkip,AccCount}) when AccSkip > 0 ->
- {ok, {AccSeparator,AccSkip-1,AccCount}};
- (_Key, _Red, {AccSeparator,0,AccCount}) when AccCount == 0 ->
- {stop, {AccSeparator,0,AccCount}};
- (_Key, Red, {AccSeparator,0,AccCount}) when GroupLevel == 0 ->
+ RespFun = fun(_Key, _Red, {AccSeparator,AccSkip,AccLimit}) when AccSkip > 0 ->
+ {ok, {AccSeparator,AccSkip-1,AccLimit}};
+ (_Key, _Red, {AccSeparator,0,AccLimit}) when AccLimit == 0 ->
+ {stop, {AccSeparator,0,AccLimit}};
+ (_Key, Red, {AccSeparator,0,AccLimit}) when GroupLevel == 0 ->
Json = ?JSON_ENCODE({[{key, null}, {value, Red}]}),
send_chunk(Resp, AccSeparator ++ Json),
- {ok, {",",0,AccCount-1}};
- (Key, Red, {AccSeparator,0,AccCount})
+ {ok, {",",0,AccLimit-1}};
+ (Key, Red, {AccSeparator,0,AccLimit})
when is_integer(GroupLevel)
andalso is_list(Key) ->
Json = ?JSON_ENCODE(
{[{key, lists:sublist(Key, GroupLevel)},{value, Red}]}),
send_chunk(Resp, AccSeparator ++ Json),
- {ok, {",",0,AccCount-1}};
- (Key, Red, {AccSeparator,0,AccCount}) ->
+ {ok, {",",0,AccLimit-1}};
+ (Key, Red, {AccSeparator,0,AccLimit}) ->
Json = ?JSON_ENCODE({[{key, Key}, {value, Red}]}),
send_chunk(Resp, AccSeparator ++ Json),
- {ok, {",",0,AccCount-1}}
+ {ok, {",",0,AccLimit-1}}
end,
{ok, GroupRowsFun, RespFun}.
@@ -241,17 +241,17 @@ parse_view_query(Req, Keys, IsReduce) ->
Msg = io_lib:format("Query parameter \"~s\" not compatible with multi key mode.", [Key]),
throw({query_parse_error, Msg})
end;
- {"count", Value} ->
+ {"limit", Value} ->
case (catch list_to_integer(Value)) of
- Count when is_integer(Count) ->
- if Count < 0 ->
- Msg = io_lib:format("Count must be a positive integer: count=~s", [Value]),
+ Limit when is_integer(Limit) ->
+ if Limit < 0 ->
+ Msg = io_lib:format("Limit must be a positive integer: limit=~s", [Value]),
throw({query_parse_error, Msg});
true ->
- Args#view_query_args{count=Count}
+ Args#view_query_args{limit=Limit}
end;
_Error ->
- Msg = io_lib:format("Bad URL query value, number expected: count=~s", [Value]),
+ Msg = io_lib:format("Bad URL query value, number expected: limit=~s", [Value]),
throw({query_parse_error, Msg})
end;
{"update", "false"} ->
@@ -276,8 +276,8 @@ parse_view_query(Req, Keys, IsReduce) ->
Args;
{"skip", Value} ->
case (catch list_to_integer(Value)) of
- Count when is_integer(Count) ->
- Args#view_query_args{skip=Count};
+ Limit when is_integer(Limit) ->
+ Args#view_query_args{skip=Limit};
_Error ->
Msg = lists:flatten(io_lib:format(
"Bad URL query value, number expected: skip=~s", [Value])),
@@ -381,17 +381,17 @@ make_view_fold_fun(Req, QueryArgs, Db, TotalViewCount, ReduceCountFun) ->
end,
fun({{Key, DocId}, Value}, OffsetReds,
- {AccCount, AccSkip, Resp, AccRevRows}) ->
+ {AccLimit, AccSkip, Resp, AccRevRows}) ->
PassedEnd = PassedEndFun(Key, DocId),
- case {PassedEnd, AccCount, AccSkip, Resp} of
+ case {PassedEnd, AccLimit, AccSkip, Resp} of
{true, _, _, _} ->
% The stop key has been passed, stop looping.
- {stop, {AccCount, AccSkip, Resp, AccRevRows}};
+ {stop, {AccLimit, AccSkip, Resp, AccRevRows}};
{_, 0, _, _} ->
- % we've done "count" rows, stop foldling
+ % we've done "limit" rows, stop foldling
{stop, {0, 0, Resp, AccRevRows}};
{_, _, AccSkip, _} when AccSkip > 0 ->
- {ok, {AccCount, AccSkip - 1, Resp, AccRevRows}};
+ {ok, {AccLimit, AccSkip - 1, Resp, AccRevRows}};
{_, _, _, undefined} ->
{ok, Resp2} = start_json_response(Req, 200),
Offset = ReduceCountFun(OffsetReds),
@@ -399,11 +399,11 @@ make_view_fold_fun(Req, QueryArgs, Db, TotalViewCount, ReduceCountFun) ->
[TotalViewCount, Offset]),
JsonObj = view_row_obj(Db, {{Key, DocId}, Value}, IncludeDocs),
send_chunk(Resp2, JsonBegin ++ ?JSON_ENCODE(JsonObj)),
- {ok, {AccCount - 1, 0, Resp2, AccRevRows}};
- {_, AccCount, _, Resp} when (AccCount > 0) ->
+ {ok, {AccLimit - 1, 0, Resp2, AccRevRows}};
+ {_, AccLimit, _, Resp} when (AccLimit > 0) ->
JsonObj = view_row_obj(Db, {{Key, DocId}, Value}, IncludeDocs),
send_chunk(Resp, ",\r\n" ++ ?JSON_ENCODE(JsonObj)),
- {ok, {AccCount - 1, 0, Resp, AccRevRows}}
+ {ok, {AccLimit - 1, 0, Resp, AccRevRows}}
end
end.
diff --git a/src/couchdb/couch_rep.erl b/src/couchdb/couch_rep.erl
index 99cca65d..881525f0 100644
--- a/src/couchdb/couch_rep.erl
+++ b/src/couchdb/couch_rep.erl
@@ -277,7 +277,7 @@ close_db(Db)->
couch_db:close(Db).
get_doc_info_list(#http_db{uri=DbUrl, headers=Headers}, StartSeq) ->
- Url = DbUrl ++ "_all_docs_by_seq?count=100&startkey="
+ Url = DbUrl ++ "_all_docs_by_seq?limit=100&startkey="
++ integer_to_list(StartSeq),
{Results} = do_http_request(Url, get, Headers),
lists:map(fun({RowInfoList}) ->