1 /* -*- MODE: C++; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
3 * Copyright 2010 Couchbase, Inc
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 // Usage: (to run just a single test case)
19 // make engine_tests EP_TEST_NUM=3
27 #include <condition_variable>
38 #include <unordered_map>
39 #include <unordered_set>
43 #include "ep_test_apis.h"
45 #include "ep_testsuite_common.h"
47 #include <libcouchstore/couch_db.h>
48 #include <memcached/engine.h>
49 #include <memcached/engine_testapp.h>
50 #include <platform/cb_malloc.h>
51 #include <platform/dirutils.h>
52 #include <JSON_checker.h>
53 #include <memcached/types.h>
54 #include <string_utilities.h>
55 #include <xattr/blob.h>
56 #include <xattr/utils.h>
59 /* /usr/include/netinet/in.h defines macros from ntohs() to _bswap_nn to
60 * optimize the conversion functions, but the prototypes generate warnings
61 * from gcc. The conversion methods isn't the bottleneck for my app, so
62 * just remove the warnings by undef'ing the optimization ..
70 // ptr_fun don't like the extern "C" thing for unlock cookie.. cast it
72 typedef void (*UNLOCK_COOKIE_T)(const void *cookie);
74 #define IMMEDIATE_MEM_STATS ";mem_merge_count_threshold=1"
75 #define MULTI_DISPATCHER_CONFIG \
76 "ht_size=129;ht_locks=3;chk_remover_stime=1;chk_period=60"
80 ThreadData(ENGINE_HANDLE *eh, ENGINE_HANDLE_V1 *ehv1,
81 int e=0) : h(eh), h1(ehv1), extra(e) {}
87 enum class BucketType { EP, Ephemeral };
89 static void check_observe_seqno(bool failover,
90 BucketType bucket_type,
94 uint64_t last_persisted_seqno,
95 uint64_t current_seqno,
96 uint64_t failover_vbuuid = 0,
97 uint64_t failover_seqno = 0) {
98 uint8_t recv_format_type;
100 uint64_t recv_vb_uuid;
101 uint64_t recv_last_persisted_seqno;
102 uint64_t recv_current_seqno;
103 uint64_t recv_failover_vbuuid;
104 uint64_t recv_failover_seqno;
106 memcpy(&recv_format_type, last_body.data(), sizeof(uint8_t));
107 checkeq(format_type, recv_format_type, "Wrong format type in result");
108 memcpy(&recv_vb_id, last_body.data() + 1, sizeof(uint16_t));
109 checkeq(vb_id, ntohs(recv_vb_id), "Wrong vbucket id in result");
110 memcpy(&recv_vb_uuid, last_body.data() + 3, sizeof(uint64_t));
111 checkeq(vb_uuid, ntohll(recv_vb_uuid), "Wrong vbucket uuid in result");
112 memcpy(&recv_last_persisted_seqno, last_body.data() + 11, sizeof(uint64_t));
114 switch (bucket_type) {
116 // Should get the "real" persisted seqno:
117 checkeq(last_persisted_seqno,
118 ntohll(recv_last_persisted_seqno),
119 "Wrong persisted seqno in result (EP)");
121 case BucketType::Ephemeral:
122 // For ephemeral, this should always be zero, as there is no
125 ntohll(recv_last_persisted_seqno),
126 "Wrong persisted seqno in result (Ephemeral)");
130 memcpy(&recv_current_seqno, last_body.data() + 19, sizeof(uint64_t));
131 checkeq(current_seqno, ntohll(recv_current_seqno), "Wrong current seqno in result");
134 memcpy(&recv_failover_vbuuid, last_body.data() + 27, sizeof(uint64_t));
135 checkeq(failover_vbuuid, ntohll(recv_failover_vbuuid),
136 "Wrong failover uuid in result");
137 memcpy(&recv_failover_seqno, last_body.data() + 35, sizeof(uint64_t));
138 checkeq(failover_seqno, ntohll(recv_failover_seqno),
139 "Wrong failover seqno in result");
143 static enum test_result test_replace_with_eviction(ENGINE_HANDLE *h,
144 ENGINE_HANDLE_V1 *h1) {
146 checkeq(ENGINE_SUCCESS,
147 store(h, h1, NULL, OPERATION_SET,"key", "somevalue", &i),
148 "Failed to set value.");
149 h1->release(h, NULL, i);
150 wait_for_flusher_to_settle(h, h1);
151 evict_key(h, h1, "key");
152 int numBgFetched = get_int_stat(h, h1, "ep_bg_fetched");
154 checkeq(ENGINE_SUCCESS,
155 store(h, h1, NULL, OPERATION_REPLACE,"key", "somevalue1", &i),
156 "Failed to replace existing value.");
158 checkeq(ENGINE_SUCCESS,
159 h1->get_stats(h, NULL, NULL, 0, add_stats),
160 "Failed to get stats.");
161 std::string eviction_policy = vals.find("ep_item_eviction_policy")->second;
162 if (eviction_policy == "full_eviction") {
166 checkeq(numBgFetched,
167 get_int_stat(h, h1, "ep_bg_fetched"),
168 "Bg fetched value didn't match");
170 h1->release(h, NULL, i);
171 check_key_value(h, h1, "key", "somevalue1", 10);
175 static enum test_result test_wrong_vb_mutation(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1,
176 ENGINE_STORE_OPERATION op) {
178 int numNotMyVBucket = get_int_stat(h, h1, "ep_num_not_my_vbuckets");
180 if (op == OPERATION_ADD) {
181 // Add operation with cas != 0 doesn't make sense
184 checkeq(ENGINE_NOT_MY_VBUCKET,
185 store(h, h1, NULL, op, "key", "somevalue", &i, cas, 1),
186 "Expected not_my_vbucket");
187 h1->release(h, NULL, i);
188 wait_for_stat_change(h, h1, "ep_num_not_my_vbuckets", numNotMyVBucket);
192 static enum test_result test_pending_vb_mutation(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1,
193 ENGINE_STORE_OPERATION op) {
194 const void *cookie = testHarness.create_cookie();
195 testHarness.set_ewouldblock_handling(cookie, false);
197 check(set_vbucket_state(h, h1, 1, vbucket_state_pending),
198 "Failed to set vbucket state.");
199 check(verify_vbucket_state(h, h1, 1, vbucket_state_pending),
200 "Bucket state was not set to pending.");
202 if (op == OPERATION_ADD) {
203 // Add operation with cas != 0 doesn't make sense..
206 checkeq(ENGINE_EWOULDBLOCK,
207 store(h, h1, cookie, op, "key", "somevalue", &i, cas, 1),
208 "Expected ewouldblock");
209 h1->release(h, NULL, i);
210 testHarness.destroy_cookie(cookie);
214 static enum test_result test_replica_vb_mutation(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1,
215 ENGINE_STORE_OPERATION op) {
217 check(set_vbucket_state(h, h1, 1, vbucket_state_replica),
218 "Failed to set vbucket state.");
219 check(verify_vbucket_state(h, h1, 1, vbucket_state_replica),
220 "Bucket state was not set to replica.");
221 int numNotMyVBucket = get_int_stat(h, h1, "ep_num_not_my_vbuckets");
224 if (op == OPERATION_ADD) {
225 // performing add with a CAS != 0 doesn't make sense...
228 checkeq(ENGINE_NOT_MY_VBUCKET,
229 store(h, h1, NULL, op, "key", "somevalue", &i, cas, 1),
230 "Expected not my vbucket");
231 wait_for_stat_change(h, h1, "ep_num_not_my_vbuckets", numNotMyVBucket);
232 h1->release(h, NULL, i);
237 // ----------------------------------------------------------------------
238 // The actual tests are below.
239 // ----------------------------------------------------------------------
242 static int checkCurrItemsAfterShutdown(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1,
243 int numItems2Load, bool shutdownForce) {
244 if (!isWarmupEnabled(h, h1)) {
248 std::vector<std::string> keys;
249 for (int index = 0; index < numItems2Load; ++index) {
251 s << "keys_2_load-" << index;
252 std::string key(s.str());
256 checkeq(0, get_int_stat(h, h1, "ep_total_persisted"),
257 "Expected ep_total_persisted equals 0");
258 checkeq(0, get_int_stat(h, h1, "curr_items"),
259 "Expected curr_items equals 0");
261 // stop flusher before loading new items
262 protocol_binary_request_header *pkt = createPacket(PROTOCOL_BINARY_CMD_STOP_PERSISTENCE);
263 checkeq(ENGINE_SUCCESS,
264 h1->unknown_command(h, NULL, pkt, add_response, testHarness.doc_namespace),
265 "CMD_STOP_PERSISTENCE failed!");
266 checkeq(PROTOCOL_BINARY_RESPONSE_SUCCESS,
268 "Failed to stop persistence!");
271 std::vector<std::string>::iterator itr;
272 for (itr = keys.begin(); itr != keys.end(); ++itr) {
274 checkeq(ENGINE_SUCCESS,
275 store(h, h1, NULL, OPERATION_SET, itr->c_str(), "oracle", &i, 0, 0),
276 "Failed to store a value");
277 h1->release(h, NULL, i);
280 checkeq(0, get_int_stat(h, h1, "ep_total_persisted"),
281 "Incorrect ep_total_persisted, expected 0");
282 std::stringstream ss;
283 ss << "Incorrect curr_items, expected " << numItems2Load;
284 std::string errmsg(ss.str());
285 checkeq(numItems2Load, get_int_stat(h, h1, "curr_items"),
288 // resume flusher before shutdown + warmup
289 pkt = createPacket(PROTOCOL_BINARY_CMD_START_PERSISTENCE);
290 checkeq(ENGINE_SUCCESS, h1->unknown_command(h, NULL, pkt, add_response, testHarness.doc_namespace),
291 "CMD_START_PERSISTENCE failed!");
292 checkeq(PROTOCOL_BINARY_RESPONSE_SUCCESS, last_status.load(),
293 "Failed to start persistence!");
296 // shutdown engine force and restart
297 testHarness.reload_engine(&h, &h1,
298 testHarness.engine_path,
299 testHarness.get_current_testcase()->cfg,
300 true, shutdownForce);
301 wait_for_warmup_complete(h, h1);
302 return get_int_stat(h, h1, "curr_items");
305 static enum test_result test_flush_shutdown_force(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
306 if (!isWarmupEnabled(h, h1)) {
310 int numItems2load = 3000;
311 bool shutdownForce = true;
312 int currItems = checkCurrItemsAfterShutdown(h, h1, numItems2load, shutdownForce);
313 check (currItems <= numItems2load,
314 "Number of curr items should be <= 3000, unless previous "
315 "shutdown force had to wait for the flusher");
319 static enum test_result test_flush_shutdown_noforce(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
320 if (!isWarmupEnabled(h, h1)) {
324 int numItems2load = 3000;
325 bool shutdownForce = false;
326 int currItems = checkCurrItemsAfterShutdown(h, h1, numItems2load, shutdownForce);
327 check (currItems == numItems2load,
328 "Number of curr items should be equal to 3000, unless previous "
329 "shutdown did not wait for the flusher");
333 static enum test_result test_flush_restart(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
334 if (!isWarmupEnabled(h, h1)) {
339 // First try to delete something we know to not be there.
340 checkeq(ENGINE_KEY_ENOENT, del(h, h1, "key", 0, 0),
341 "Failed to fail initial delete.");
342 checkeq(ENGINE_SUCCESS,
343 store(h, h1, NULL, OPERATION_SET, "key", "somevalue", &i),
345 h1->release(h, NULL, i);
346 check_key_value(h, h1, "key", "somevalue", 9);
348 // Restart once to ensure written to disk.
349 testHarness.reload_engine(&h, &h1,
350 testHarness.engine_path,
351 testHarness.get_current_testcase()->cfg,
353 wait_for_warmup_complete(h, h1);
355 // Read value from disk.
356 check_key_value(h, h1, "key", "somevalue", 9);
359 set_degraded_mode(h, h1, NULL, true);
360 checkeq(ENGINE_SUCCESS, h1->flush(h, NULL),
362 set_degraded_mode(h, h1, NULL, false);
364 checkeq(ENGINE_SUCCESS,
365 store(h, h1, NULL, OPERATION_SET, "key2", "somevalue", &i),
366 "Failed post-flush set.");
367 h1->release(h, NULL, i);
368 check_key_value(h, h1, "key2", "somevalue", 9);
370 // Restart again, ensure written to disk.
371 testHarness.reload_engine(&h, &h1,
372 testHarness.engine_path,
373 testHarness.get_current_testcase()->cfg,
375 wait_for_warmup_complete(h, h1);
377 checkeq(ENGINE_SUCCESS,
378 store(h, h1, NULL, OPERATION_SET, "key3", "somevalue", &i),
379 "Failed post-flush, post-restart set.");
380 h1->release(h, NULL, i);
381 check_key_value(h, h1, "key3", "somevalue", 9);
383 // Read value again, should not be there.
384 checkeq(ENGINE_KEY_ENOENT, verify_key(h, h1, "key"),
385 "Expected missing key");
389 static enum test_result test_shutdown_snapshot_range(ENGINE_HANDLE *h,
390 ENGINE_HANDLE_V1 *h1) {
391 if (!isWarmupEnabled(h, h1)) {
395 const int num_items = 100;
396 for (int j = 0; j < num_items; ++j) {
398 std::stringstream ss;
400 checkeq(ENGINE_SUCCESS,
401 store(h, h1, NULL, OPERATION_SET, ss.str().c_str(), "data", &i),
402 "Failed to store a value");
403 h1->release(h, NULL, i);
406 wait_for_flusher_to_settle(h, h1);
407 int end = get_int_stat(h, h1, "vb_0:high_seqno", "vbucket-seqno");
409 /* change vb state to replica before restarting (as it happens in graceful
411 check(set_vbucket_state(h, h1, 0, vbucket_state_replica),
412 "Failed set vbucket 0 to replica state.");
414 /* trigger persist vb state task */
415 check(set_param(h, h1, protocol_binary_engine_param_flush,
416 "vb_state_persist_run", "0"),
417 "Failed to trigger vb state persist");
419 /* restart the engine */
420 testHarness.reload_engine(&h, &h1,
421 testHarness.engine_path,
422 testHarness.get_current_testcase()->cfg,
424 wait_for_warmup_complete(h, h1);
426 /* Check if snapshot range is persisted correctly */
427 checkeq(end, get_int_stat(h, h1, "vb_0:last_persisted_snap_start",
429 "Wrong snapshot start persisted");
430 checkeq(end, get_int_stat(h, h1, "vb_0:last_persisted_snap_end",
432 "Wrong snapshot end persisted");
437 static enum test_result test_flush_multiv_restart(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
438 if (!isWarmupEnabled(h, h1)) {
443 check(set_vbucket_state(h, h1, 2, vbucket_state_active),
444 "Failed to set vbucket state.");
445 checkeq(ENGINE_SUCCESS,
446 store(h, h1, NULL, OPERATION_SET, "key", "somevalue", &i),
448 h1->release(h, NULL, i);
449 checkeq(ENGINE_SUCCESS,
450 store(h, h1, NULL, OPERATION_SET, "key2", "somevalue", &i, 0, 2),
451 "Failed set in vb2.");
452 h1->release(h, NULL, i);
454 // Restart once to ensure written to disk.
455 testHarness.reload_engine(&h, &h1,
456 testHarness.engine_path,
457 testHarness.get_current_testcase()->cfg,
459 wait_for_warmup_complete(h, h1);
461 // Read value from disk.
462 check_key_value(h, h1, "key", "somevalue", 9);
465 set_degraded_mode(h, h1, NULL, true);
466 checkeq(ENGINE_SUCCESS, h1->flush(h, NULL),
468 set_degraded_mode(h, h1, NULL, false);
470 // Restart again, ensure written to disk.
471 testHarness.reload_engine(&h, &h1,
472 testHarness.engine_path,
473 testHarness.get_current_testcase()->cfg,
475 wait_for_warmup_complete(h, h1);
477 // Read value again, should not be there.
478 checkeq(ENGINE_KEY_ENOENT, verify_key(h, h1, "key"), "Expected missing key");
479 check(verify_vbucket_missing(h, h1, 2), "Bucket 2 came back.");
483 static enum test_result test_restart(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
484 if (!isWarmupEnabled(h, h1)) {
489 static const char val[] = "somevalue";
490 ENGINE_ERROR_CODE ret = store(h, h1, NULL, OPERATION_SET, "key", val, &i);
491 checkeq(ENGINE_SUCCESS, ret, "Failed set.");
492 h1->release(h, NULL, i);
494 testHarness.reload_engine(&h, &h1,
495 testHarness.engine_path,
496 testHarness.get_current_testcase()->cfg,
498 wait_for_warmup_complete(h, h1);
499 check_key_value(h, h1, "key", val, strlen(val));
503 static enum test_result test_restart_session_stats(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
504 const void* cookie = createTapConn(h, h1, "tap_client_thread");
505 testHarness.unlock_cookie(cookie);
506 testHarness.destroy_cookie(cookie);
508 testHarness.reload_engine(&h, &h1,
509 testHarness.engine_path,
510 testHarness.get_current_testcase()->cfg,
512 wait_for_warmup_complete(h, h1);
513 cookie = createTapConn(h, h1, "tap_client_thread");
515 checkeq(ENGINE_SUCCESS, h1->get_stats(h, NULL, "tap", 3, add_stats),
516 "Failed to get stats.");
517 std::string val = vals["eq_tapq:tap_client_thread:backfill_completed"];
518 checkeq(0, strcmp(val.c_str(), "true"), "Don't expect the backfill upon restart");
519 testHarness.unlock_cookie(cookie);
520 testHarness.destroy_cookie(cookie);
524 static enum test_result test_specialKeys(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
526 ENGINE_ERROR_CODE ret;
528 // Simplified Chinese "Couchbase"
529 static const char key0[] = "沙发数据库";
530 static const char val0[] = "some Chinese value";
531 check((ret = store(h, h1, NULL, OPERATION_SET, key0, val0, &i)) == ENGINE_SUCCESS,
532 "Failed set Chinese key");
533 check_key_value(h, h1, key0, val0, strlen(val0));
534 h1->release(h, NULL, i);
535 // Traditional Chinese "Couchbase"
536 static const char key1[] = "沙發數據庫";
537 static const char val1[] = "some Traditional Chinese value";
538 check((ret = store(h, h1, NULL, OPERATION_SET, key1, val1, &i)) == ENGINE_SUCCESS,
539 "Failed set Traditional Chinese key");
540 h1->release(h, NULL, i);
541 // Korean "couch potato"
542 static const char key2[] = "쇼파감자";
543 static const char val2[] = "some Korean value";
544 check((ret = store(h, h1, NULL, OPERATION_SET, key2, val2, &i)) == ENGINE_SUCCESS,
545 "Failed set Korean key");
546 h1->release(h, NULL, i);
547 // Russian "couch potato"
548 static const char key3[] = "лодырь, лентяй";
549 static const char val3[] = "some Russian value";
550 check((ret = store(h, h1, NULL, OPERATION_SET, key3, val3, &i)) == ENGINE_SUCCESS,
551 "Failed set Russian key");
552 h1->release(h, NULL, i);
553 // Japanese "couch potato"
554 static const char key4[] = "カウチポテト";
555 static const char val4[] = "some Japanese value";
556 check((ret = store(h, h1, NULL, OPERATION_SET, key4, val4, &i)) == ENGINE_SUCCESS,
557 "Failed set Japanese key");
558 h1->release(h, NULL, i);
559 // Indian char key, and no idea what it is
560 static const char key5[] = "हरियानवी";
561 static const char val5[] = "some Indian value";
562 check((ret = store(h, h1, NULL, OPERATION_SET, key5, val5, &i)) == ENGINE_SUCCESS,
563 "Failed set Indian key");
564 h1->release(h, NULL, i);
565 // Portuguese translation "couch potato"
566 static const char key6[] = "sedentário";
567 static const char val6[] = "some Portuguese value";
568 check((ret = store(h, h1, NULL, OPERATION_SET, key6, val6, &i)) == ENGINE_SUCCESS,
569 "Failed set Portuguese key");
570 h1->release(h, NULL, i);
571 // Arabic translation "couch potato"
572 static const char key7[] = "الحافلةالبطاطة";
573 static const char val7[] = "some Arabic value";
574 check((ret = store(h, h1, NULL, OPERATION_SET, key7, val7, &i)) == ENGINE_SUCCESS,
575 "Failed set Arabic key");
576 h1->release(h, NULL, i);
578 if (isWarmupEnabled(h, h1)) {
579 // Check that after warmup the keys are still present.
580 testHarness.reload_engine(&h, &h1,
581 testHarness.engine_path,
582 testHarness.get_current_testcase()->cfg,
584 wait_for_warmup_complete(h, h1);
585 check_key_value(h, h1, key0, val0, strlen(val0));
586 check_key_value(h, h1, key1, val1, strlen(val1));
587 check_key_value(h, h1, key2, val2, strlen(val2));
588 check_key_value(h, h1, key3, val3, strlen(val3));
589 check_key_value(h, h1, key4, val4, strlen(val4));
590 check_key_value(h, h1, key5, val5, strlen(val5));
591 check_key_value(h, h1, key6, val6, strlen(val6));
592 check_key_value(h, h1, key7, val7, strlen(val7));
597 static enum test_result test_binKeys(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
599 ENGINE_ERROR_CODE ret;
601 // binary key with char values beyond 0x7F
602 static const char key0[] = "\xe0\xed\xf1\x6f\x7f\xf8\xfa";
603 static const char val0[] = "some value val8";
604 check((ret = store(h, h1, NULL, OPERATION_SET, key0, val0, &i)) == ENGINE_SUCCESS,
605 "Failed set binary key0");
606 check_key_value(h, h1, key0, val0, strlen(val0));
607 h1->release(h, NULL, i);
608 // binary keys with char values beyond 0x7F
609 static const char key1[] = "\xf1\xfd\xfe\xff\xf0\xf8\xef";
610 static const char val1[] = "some value val9";
611 check((ret = store(h, h1, NULL, OPERATION_SET, key1, val1, &i)) == ENGINE_SUCCESS,
612 "Failed set binary key1");
613 check_key_value(h, h1, key1, val1, strlen(val1));
614 h1->release(h, NULL, i);
615 // binary keys with special utf-8 BOM (Byte Order Mark) values 0xBB 0xBF 0xEF
616 static const char key2[] = "\xff\xfe\xbb\xbf\xef";
617 static const char val2[] = "some utf-8 bom value";
618 check((ret = store(h, h1, NULL, OPERATION_SET, key2, val2, &i)) == ENGINE_SUCCESS,
619 "Failed set binary utf-8 bom key");
620 check_key_value(h, h1, key2, val2, strlen(val2));
621 h1->release(h, NULL, i);
622 // binary keys with special utf-16BE BOM values "U+FEFF"
623 static const char key3[] = "U+\xfe\xff\xefU+\xff\xfe";
624 static const char val3[] = "some utf-16 bom value";
625 check((ret = store(h, h1, NULL, OPERATION_SET, key3, val3, &i)) == ENGINE_SUCCESS,
626 "Failed set binary utf-16 bom key");
627 check_key_value(h, h1, key3, val3, strlen(val3));
628 h1->release(h, NULL, i);
630 if (isWarmupEnabled(h, h1)) {
631 testHarness.reload_engine(&h, &h1,
632 testHarness.engine_path,
633 testHarness.get_current_testcase()->cfg,
635 wait_for_warmup_complete(h, h1);
636 check_key_value(h, h1, key0, val0, strlen(val0));
637 check_key_value(h, h1, key1, val1, strlen(val1));
638 check_key_value(h, h1, key2, val2, strlen(val2));
639 check_key_value(h, h1, key3, val3, strlen(val3));
644 static enum test_result test_restart_bin_val(ENGINE_HANDLE *h,
645 ENGINE_HANDLE_V1 *h1) {
646 if (!isWarmupEnabled(h, h1)) {
650 char binaryData[] = "abcdefg\0gfedcba";
651 cb_assert(sizeof(binaryData) != strlen(binaryData));
654 checkeq(ENGINE_SUCCESS,
655 storeCasVb11(h, h1, NULL, OPERATION_SET, "key",
656 binaryData, sizeof(binaryData), 82758, &i, 0, 0),
658 h1->release(h, NULL, i);
660 testHarness.reload_engine(&h, &h1,
661 testHarness.engine_path,
662 testHarness.get_current_testcase()->cfg,
664 wait_for_warmup_complete(h, h1);
666 check_key_value(h, h1, "key", binaryData, sizeof(binaryData));
670 static enum test_result test_wrong_vb_get(ENGINE_HANDLE *h,
671 ENGINE_HANDLE_V1 *h1) {
672 int numNotMyVBucket = get_int_stat(h, h1, "ep_num_not_my_vbuckets");
673 checkeq(ENGINE_NOT_MY_VBUCKET, verify_key(h, h1, "key", 1),
674 "Expected wrong bucket.");
675 wait_for_stat_change(h, h1, "ep_num_not_my_vbuckets", numNotMyVBucket);
679 static enum test_result test_vb_get_pending(ENGINE_HANDLE *h,
680 ENGINE_HANDLE_V1 *h1) {
681 check(set_vbucket_state(h, h1, 1, vbucket_state_pending),
682 "Failed to set vbucket state.");
683 const void *cookie = testHarness.create_cookie();
684 testHarness.set_ewouldblock_handling(cookie, false);
687 checkeq(ENGINE_EWOULDBLOCK,
688 get(h, h1, cookie, &i, "key", 1),
689 "Expected woodblock.");
690 h1->release(h, NULL, i);
692 testHarness.destroy_cookie(cookie);
696 static enum test_result test_vb_get_replica(ENGINE_HANDLE *h,
697 ENGINE_HANDLE_V1 *h1) {
698 check(set_vbucket_state(h, h1, 1, vbucket_state_replica),
699 "Failed to set vbucket state.");
700 int numNotMyVBucket = get_int_stat(h, h1, "ep_num_not_my_vbuckets");
701 checkeq(ENGINE_NOT_MY_VBUCKET,
702 verify_key(h, h1, "key", 1),
703 "Expected not my bucket.");
704 wait_for_stat_change(h, h1, "ep_num_not_my_vbuckets", numNotMyVBucket);
708 static enum test_result test_wrong_vb_set(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
709 return test_wrong_vb_mutation(h, h1, OPERATION_SET);
712 static enum test_result test_wrong_vb_cas(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
713 return test_wrong_vb_mutation(h, h1, OPERATION_CAS);
716 static enum test_result test_wrong_vb_add(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
717 return test_wrong_vb_mutation(h, h1, OPERATION_ADD);
720 static enum test_result test_wrong_vb_replace(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
721 return test_wrong_vb_mutation(h, h1, OPERATION_REPLACE);
724 static enum test_result test_wrong_vb_del(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
725 int numNotMyVBucket = get_int_stat(h, h1, "ep_num_not_my_vbuckets");
726 checkeq(ENGINE_NOT_MY_VBUCKET, del(h, h1, "key", 0, 1),
727 "Expected wrong bucket.");
728 wait_for_stat_change(h, h1, "ep_num_not_my_vbuckets", numNotMyVBucket);
732 /* Returns a string in the format "%Y-%m-%d %H:%M:%S" of the specified
735 std::string make_time_string(std::chrono::system_clock::time_point time_point) {
736 time_t tt = std::chrono::system_clock::to_time_t(time_point);
738 // Windows' gmtime() is already thread-safe.
739 struct tm* split = gmtime(&tt);
741 struct tm local_storage;
742 struct tm* split = gmtime_r(&tt, &local_storage);
745 strftime(timeStr, 20, "%Y-%m-%d %H:%M:%S", split);
749 static enum test_result test_expiry_pager_settings(ENGINE_HANDLE *h,
750 ENGINE_HANDLE_V1 *h1) {
752 cb_assert(!get_bool_stat(h, h1, "ep_exp_pager_enabled"));
753 checkeq(3600, get_int_stat(h, h1, "ep_exp_pager_stime"),
754 "Expiry pager sleep time not expected");
755 set_param(h, h1, protocol_binary_engine_param_flush,
756 "exp_pager_stime", "1");
757 checkeq(1, get_int_stat(h, h1, "ep_exp_pager_stime"),
758 "Expiry pager sleep time not updated");
759 cb_assert(!get_bool_stat(h, h1, "ep_exp_pager_enabled"));
761 checkeq(0, get_int_stat(h, h1, "ep_num_expiry_pager_runs"),
762 "Expiry pager run count is not zero");
764 set_param(h, h1, protocol_binary_engine_param_flush,
765 "exp_pager_enabled", "true");
766 checkeq(1, get_int_stat(h, h1, "ep_exp_pager_stime"),
767 "Expiry pager sleep time not updated");
768 wait_for_stat_to_be_gte(h, h1, "ep_num_expiry_pager_runs", 1);
771 testHarness.reload_engine(&h, &h1,
772 testHarness.engine_path,
773 testHarness.get_current_testcase()->cfg,
775 wait_for_warmup_complete(h, h1);
776 cb_assert(!get_bool_stat(h, h1, "ep_exp_pager_enabled"));
778 // Enable expiry pager again
779 set_param(h, h1, protocol_binary_engine_param_flush,
780 "exp_pager_enabled", "true");
782 checkeq(get_int_stat(h, h1, "ep_exp_pager_initial_run_time"), -1,
783 "Task time should be disable upon warmup");
786 // Update exp_pager_initial_run_time and ensure the update is successful
787 set_param(h, h1, protocol_binary_engine_param_flush,
788 "exp_pager_initial_run_time", "3");
789 std::string expected_time = "03:00";
790 std::string str = get_str_stat(h, h1, "ep_expiry_pager_task_time");
791 err_msg.assign("Updated time incorrect, expect: " +
792 expected_time + ", actual: " + str.substr(11, 5));
793 checkeq(0, str.substr(11, 5).compare(expected_time), err_msg.c_str());
795 // Update exp_pager_stime by 30 minutes and ensure that the update is successful
796 const std::chrono::minutes update_by{30};
797 std::string targetTaskTime1{make_time_string(std::chrono::system_clock::now() +
800 set_param(h, h1, protocol_binary_engine_param_flush, "exp_pager_stime",
801 std::to_string(update_by.count() * 60).c_str());
802 str = get_str_stat(h, h1, "ep_expiry_pager_task_time");
804 std::string targetTaskTime2{make_time_string(std::chrono::system_clock::now() +
807 // ep_expiry_pager_task_time should fall within the range of
808 // targetTaskTime1 and targetTaskTime2
809 err_msg.assign("Unexpected task time range, expect: " +
810 targetTaskTime1 + " <= " + str + " <= " + targetTaskTime2);
811 check(targetTaskTime1 <= str, err_msg.c_str());
812 check(str <= targetTaskTime2, err_msg.c_str());
817 static enum test_result test_expiry_with_xattr(ENGINE_HANDLE* h,
818 ENGINE_HANDLE_V1* h1) {
819 const char* key = "test_expiry";
820 cb::xattr::Blob blob;
823 blob.set(to_const_byte_buffer("user"),
824 to_const_byte_buffer("{\"author\":\"bubba\"}"));
825 blob.set(to_const_byte_buffer("_sync"),
826 to_const_byte_buffer("{\"cas\":\"0xdeadbeefcafefeed\"}"));
827 blob.set(to_const_byte_buffer("meta"),
828 to_const_byte_buffer("{\"content-type\":\"text\"}"));
830 auto xattr_value = blob.finalize();
832 //Now, append user data to the xattrs and store the data
833 std::string value_data("test_expiry_value");
834 std::vector<char> data;
835 std::copy(xattr_value.buf, xattr_value.buf + xattr_value.len,
836 std::back_inserter(data));
837 std::copy(value_data.c_str(), value_data.c_str() + value_data.length(),
838 std::back_inserter(data));
840 const void* cookie = testHarness.create_cookie();
843 checkeq(ENGINE_SUCCESS,
844 storeCasVb11(h, h1, cookie, OPERATION_SET, key,
845 reinterpret_cast<char*>(data.data()),
846 data.size(), 9258, &itm, 0, 0, 10,
847 PROTOCOL_BINARY_DATATYPE_XATTR),
848 "Failed to store xattr document");
849 h1->release(h, nullptr, itm);
851 if (isPersistentBucket(h, h1)) {
852 wait_for_flusher_to_settle(h, h1);
855 testHarness.time_travel(11);
858 get_meta(h, h1, "test_expiry", true, GetMetaVersion::V2, cookie),
859 "Get meta command failed");
860 auto prev_revseqno = last_meta.revSeqno;
862 checkeq(static_cast<uint8_t>(PROTOCOL_BINARY_DATATYPE_XATTR),
863 last_datatype.load(), "Datatype is not XATTR");
865 checkeq(ENGINE_SUCCESS,
866 get(h, h1, cookie, &itm, key, 0,
867 DocStateFilter::AliveOrDeleted),
868 "Unable to get a deleted item");
871 get_meta(h, h1, "test_expiry", false, GetMetaVersion::V1, cookie),
872 "Get meta command failed");
874 checkeq(last_meta.revSeqno, prev_revseqno + 1,
875 "rev seqno must have incremented by 1");
877 /* Retrieve the item info and create a new blob out of the data */
879 checkeq(true, h1->get_item_info(h, cookie, itm, &info),
880 "Unable to retrieve item info");
882 cb::byte_buffer value_buf{static_cast<uint8_t*>(info.value[0].iov_base),
883 info.value[0].iov_len};
885 cb::xattr::Blob new_blob(value_buf);
887 /* Only system extended attributes need to be present at this point.
888 * Thus, check the blob length with the system size.
890 const auto systemsize = new_blob.finalize().len;
892 checkeq(systemsize, new_blob.get_system_size(),
893 "The size of the blob doesn't match the size of system attributes");
895 const std::string& cas_str{"{\"cas\":\"0xdeadbeefcafefeed\"}"};
896 const std::string& sync_str = to_string(blob.get(to_const_byte_buffer("_sync")));
898 checkeq(cas_str, sync_str , "system xattr is invalid");
900 h1->release(h, nullptr, itm);
901 testHarness.destroy_cookie(cookie);
906 static enum test_result test_expiry(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
907 const char *key = "test_expiry";
908 const char *data = "some test data here.";
912 ENGINE_ERROR_CODE rv;
913 rv = allocate(h, h1, NULL, &it, key, strlen(data), 0, 2,
914 PROTOCOL_BINARY_RAW_BYTES, 0);
915 checkeq(ENGINE_SUCCESS, rv, "Allocation failed.");
918 if (!h1->get_item_info(h, NULL, it, &info)) {
921 memcpy(info.value[0].iov_base, data, strlen(data));
924 rv = h1->store(h, NULL, it, &cas, OPERATION_SET, DocumentState::Alive);
925 checkeq(ENGINE_SUCCESS, rv, "Set failed.");
926 check_key_value(h, h1, key, data, strlen(data));
927 h1->release(h, NULL, it);
929 testHarness.time_travel(5);
930 checkeq(ENGINE_KEY_ENOENT,
931 get(h, h1, NULL, &it, key, 0),
932 "Item didn't expire");
934 int expired_access = get_int_stat(h, h1, "ep_expired_access");
935 int expired_pager = get_int_stat(h, h1, "ep_expired_pager");
936 int active_expired = get_int_stat(h, h1, "vb_active_expired");
937 checkeq(0, expired_pager, "Expected zero expired item by pager");
938 checkeq(1, expired_access, "Expected an expired item on access");
939 checkeq(1, active_expired, "Expected an expired active item");
940 checkeq(ENGINE_SUCCESS, store(h, h1, NULL, OPERATION_SET, key, data, &it),
942 h1->release(h, NULL, it);
944 // When run under full eviction, the total item stats are set from the
945 // flusher. So we need to wait for it to finish before checking the
946 // total number of items.
947 wait_for_flusher_to_settle(h, h1);
949 std::stringstream ss;
950 ss << "curr_items stat should be still 1 after ";
951 ss << "overwriting the key that was expired, but not purged yet";
952 checkeq(1, get_int_stat(h, h1, "curr_items"), ss.str().c_str());
957 static enum test_result test_expiry_loader(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
958 if (!isWarmupEnabled(h, h1)) {
961 const char *key = "test_expiry_loader";
962 const char *data = "some test data here.";
966 ENGINE_ERROR_CODE rv;
967 rv = allocate(h, h1, NULL, &it, key, strlen(data), 0, 2,
968 PROTOCOL_BINARY_RAW_BYTES, 0);
969 checkeq(ENGINE_SUCCESS, rv, "Allocation failed.");
972 if (!h1->get_item_info(h, NULL, it, &info)) {
975 memcpy(info.value[0].iov_base, data, strlen(data));
978 rv = h1->store(h, NULL, it, &cas, OPERATION_SET, DocumentState::Alive);
979 checkeq(ENGINE_SUCCESS, rv, "Set failed.");
980 check_key_value(h, h1, key, data, strlen(data));
981 h1->release(h, NULL, it);
983 testHarness.time_travel(3);
985 checkeq(ENGINE_KEY_ENOENT,
986 get(h, h1, NULL, &it, key, 0),
987 "Item didn't expire");
989 // Restart the engine to ensure the above expired item is not loaded
990 testHarness.reload_engine(&h, &h1,
991 testHarness.engine_path,
992 testHarness.get_current_testcase()->cfg,
994 wait_for_warmup_complete(h, h1);
995 cb_assert(0 == get_int_stat(h, h1, "ep_warmup_value_count", "warmup"));
1000 static enum test_result test_expiration_on_compaction(ENGINE_HANDLE *h,
1001 ENGINE_HANDLE_V1 *h1) {
1002 if (get_bool_stat(h, h1, "ep_exp_pager_enabled")) {
1003 set_param(h, h1, protocol_binary_engine_param_flush,
1004 "exp_pager_enabled", "false");
1007 checkeq(1, get_int_stat(h, h1, "vb_0:persistence:num_visits",
1008 "checkpoint"), "Cursor moved before item load");
1010 for (int i = 0; i < 50; i++) {
1012 std::stringstream ss;
1014 checkeq(ENGINE_SUCCESS,
1015 store(h, h1, NULL, OPERATION_SET, ss.str().c_str(),
1016 "somevalue", &itm, 0, 0, 10,
1017 PROTOCOL_BINARY_RAW_BYTES),
1019 h1->release(h, NULL, itm);
1022 wait_for_flusher_to_settle(h, h1);
1023 checkeq(50, get_int_stat(h, h1, "curr_items"),
1024 "Unexpected number of items on database");
1025 check(1 < get_int_stat(h, h1, "vb_0:persistence:num_visits", "checkpoint"),
1026 "Cursor not moved even after flusher runs");
1028 testHarness.time_travel(15);
1030 // Compaction on VBucket
1031 compact_db(h, h1, 0, 0, 0, 0, 0);
1032 wait_for_stat_to_be(h, h1, "ep_pending_compactions", 0);
1034 checkeq(50, get_int_stat(h, h1, "ep_expired_compactor"),
1035 "Unexpected expirations by compactor");
1040 static enum test_result test_expiration_on_warmup(ENGINE_HANDLE *h,
1041 ENGINE_HANDLE_V1 *h1) {
1042 if (!isWarmupEnabled(h, h1)) {
1046 set_param(h, h1, protocol_binary_engine_param_flush,
1047 "exp_pager_enabled", "false");
1048 int pager_runs = get_int_stat(h, h1, "ep_num_expiry_pager_runs");
1050 const char *key = "KEY";
1051 const char *data = "VALUE";
1055 ENGINE_ERROR_CODE rv;
1056 rv = allocate(h, h1, NULL, &it, key, strlen(data), 0, 10,
1057 PROTOCOL_BINARY_RAW_BYTES, 0);
1058 checkeq(ENGINE_SUCCESS, rv, "Allocation failed.");
1061 if (!h1->get_item_info(h, NULL, it, &info)) {
1064 memcpy(info.value[0].iov_base, data, strlen(data));
1067 rv = h1->store(h, NULL, it, &cas, OPERATION_SET, DocumentState::Alive);
1068 checkeq(ENGINE_SUCCESS, rv, "Set failed.");
1069 check_key_value(h, h1, key, data, strlen(data));
1070 h1->release(h, NULL, it);
1071 wait_for_flusher_to_settle(h, h1);
1073 checkeq(1, get_int_stat(h, h1, "curr_items"), "Failed store item");
1074 testHarness.time_travel(15);
1076 checkeq(pager_runs, get_int_stat(h, h1, "ep_num_expiry_pager_runs"),
1077 "Expiry pager shouldn't have run during this time");
1079 // Restart the engine to ensure the above item is expired
1080 testHarness.reload_engine(&h, &h1,
1081 testHarness.engine_path,
1082 testHarness.get_current_testcase()->cfg,
1084 wait_for_warmup_complete(h, h1);
1085 check(get_bool_stat(h, h1, "ep_exp_pager_enabled"),
1086 "Expiry pager should be enabled on warmup");
1088 // Wait for the expiry pager to run and expire our item.
1089 wait_for_stat_to_be_gte(h, h1, "ep_expired_pager", 1, nullptr, /*secs*/10);
1091 // Note: previously we checked that curr_items was zero here (immediately
1092 // after waiting for ep_expired_pager == 1), however we cannot assume that
1093 // - items are actually expired asynchronously.
1094 // See EPStore::deleteExpiredItem - for non-temporary, expired items we
1095 // call processSoftDelete (soft-marking the item as deleted in the
1096 // hashtable), and then call queueDirty to queue a deletion, and then
1097 // increment the expired stat. Only when that delete is actually persisted
1098 // and the deleted callback is invoked -
1099 // PeristenceCallback::callback(int&) - is curr_items finally decremented.
1100 // Therefore we need to wait for the flusher to settle (i.e. delete
1101 // callback to be called) for the curr_items stat to be accurate.
1102 wait_for_flusher_to_settle(h, h1);
1104 checkeq(0, get_int_stat(h, h1, "curr_items"),
1105 "The item should have been expired.");
1111 static enum test_result test_bug3454(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1112 if (!isWarmupEnabled(h, h1)) {
1116 const char *key = "test_expiry_duplicate_warmup";
1117 const char *data = "some test data here.";
1121 ENGINE_ERROR_CODE rv;
1122 rv = allocate(h, h1, NULL, &it, key, strlen(data), 0, 5,
1123 PROTOCOL_BINARY_RAW_BYTES, 0);
1124 checkeq(ENGINE_SUCCESS, rv, "Allocation failed.");
1127 if (!h1->get_item_info(h, NULL, it, &info)) {
1130 memcpy(info.value[0].iov_base, data, strlen(data));
1133 rv = h1->store(h, NULL, it, &cas, OPERATION_SET, DocumentState::Alive);
1134 checkeq(ENGINE_SUCCESS, rv, "Set failed.");
1135 check_key_value(h, h1, key, data, strlen(data));
1136 h1->release(h, NULL, it);
1137 wait_for_flusher_to_settle(h, h1);
1139 // Advance the ep_engine time by 10 sec for the above item to be expired.
1140 testHarness.time_travel(10);
1141 checkeq(ENGINE_KEY_ENOENT,
1142 get(h, h1, NULL, &it, key, 0),
1143 "Item didn't expire");
1145 rv = allocate(h, h1, NULL, &it, key, strlen(data), 0, 0,
1146 PROTOCOL_BINARY_RAW_BYTES, 0);
1147 checkeq(ENGINE_SUCCESS, rv, "Allocation failed.");
1149 if (!h1->get_item_info(h, NULL, it, &info)) {
1152 memcpy(info.value[0].iov_base, data, strlen(data));
1155 // Add a new item with the same key.
1156 rv = h1->store(h, NULL, it, &cas, OPERATION_ADD, DocumentState::Alive);
1157 checkeq(ENGINE_SUCCESS, rv, "Add failed.");
1158 check_key_value(h, h1, key, data, strlen(data));
1159 h1->release(h, NULL, it);
1160 wait_for_flusher_to_settle(h, h1);
1162 checkeq(ENGINE_SUCCESS,
1163 get(h, h1, NULL, &it, key, 0),
1164 "Item shouldn't expire");
1165 h1->release(h, NULL, it);
1167 // Restart the engine to ensure the above unexpired new item is loaded
1168 testHarness.reload_engine(&h, &h1,
1169 testHarness.engine_path,
1170 testHarness.get_current_testcase()->cfg,
1172 wait_for_warmup_complete(h, h1);
1173 cb_assert(1 == get_int_stat(h, h1, "ep_warmup_value_count", "warmup"));
1174 cb_assert(0 == get_int_stat(h, h1, "ep_warmup_dups", "warmup"));
1179 static enum test_result test_bug3522(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1180 if (!isWarmupEnabled(h, h1)) {
1184 const char *key = "test_expiry_no_items_warmup";
1185 const char *data = "some test data here.";
1189 ENGINE_ERROR_CODE rv;
1190 rv = allocate(h, h1, NULL, &it, key, strlen(data), 0, 0,
1191 PROTOCOL_BINARY_RAW_BYTES, 0);
1192 checkeq(ENGINE_SUCCESS, rv, "Allocation failed.");
1195 if (!h1->get_item_info(h, NULL, it, &info)) {
1198 memcpy(info.value[0].iov_base, data, strlen(data));
1201 rv = h1->store(h, NULL, it, &cas, OPERATION_SET, DocumentState::Alive);
1202 checkeq(ENGINE_SUCCESS, rv, "Set failed.");
1203 check_key_value(h, h1, key, data, strlen(data));
1204 h1->release(h, NULL, it);
1205 wait_for_flusher_to_settle(h, h1);
1207 // Add a new item with the same key and 2 sec of expiration.
1208 const char *new_data = "new data here.";
1209 rv = allocate(h, h1, NULL, &it, key, strlen(new_data), 0, 2,
1210 PROTOCOL_BINARY_RAW_BYTES, 0);
1211 checkeq(ENGINE_SUCCESS, rv, "Allocation failed.");
1213 if (!h1->get_item_info(h, NULL, it, &info)) {
1216 memcpy(info.value[0].iov_base, new_data, strlen(new_data));
1218 int pager_runs = get_int_stat(h, h1, "ep_num_expiry_pager_runs");
1220 rv = h1->store(h, NULL, it, &cas, OPERATION_SET, DocumentState::Alive);
1221 checkeq(ENGINE_SUCCESS, rv, "Set failed.");
1222 check_key_value(h, h1, key, new_data, strlen(new_data));
1223 h1->release(h, NULL, it);
1224 testHarness.time_travel(3);
1225 wait_for_stat_change(h, h1, "ep_num_expiry_pager_runs", pager_runs);
1226 wait_for_flusher_to_settle(h, h1);
1228 // Restart the engine.
1229 testHarness.reload_engine(&h, &h1,
1230 testHarness.engine_path,
1231 testHarness.get_current_testcase()->cfg,
1233 wait_for_warmup_complete(h, h1);
1234 // TODO: modify this for a better test case
1235 cb_assert(0 == get_int_stat(h, h1, "ep_warmup_dups", "warmup"));
1240 static enum test_result test_get_replica_active_state(ENGINE_HANDLE *h,
1241 ENGINE_HANDLE_V1 *h1) {
1242 protocol_binary_request_header *pkt;
1243 pkt = prepare_get_replica(h, h1, vbucket_state_active);
1244 checkeq(ENGINE_SUCCESS,
1245 h1->unknown_command(h, NULL, pkt, add_response, testHarness.doc_namespace),
1246 "Get Replica Failed");
1247 checkeq(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET, last_status.load(),
1248 "Expected PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET response.");
1254 static enum test_result test_get_replica_pending_state(ENGINE_HANDLE *h,
1255 ENGINE_HANDLE_V1 *h1) {
1256 protocol_binary_request_header *pkt;
1258 const void *cookie = testHarness.create_cookie();
1259 testHarness.set_ewouldblock_handling(cookie, false);
1260 pkt = prepare_get_replica(h, h1, vbucket_state_pending);
1261 checkeq(ENGINE_EWOULDBLOCK,
1262 h1->unknown_command(h, cookie, pkt, add_response, testHarness.doc_namespace),
1263 "Should have returned error for pending state");
1264 testHarness.destroy_cookie(cookie);
1269 static enum test_result test_get_replica_dead_state(ENGINE_HANDLE *h,
1270 ENGINE_HANDLE_V1 *h1) {
1271 protocol_binary_request_header *pkt;
1272 pkt = prepare_get_replica(h, h1, vbucket_state_dead);
1273 checkeq(ENGINE_SUCCESS,
1274 h1->unknown_command(h, NULL, pkt, add_response, testHarness.doc_namespace),
1275 "Get Replica Failed");
1276 checkeq(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET, last_status.load(),
1277 "Expected PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET response.");
1283 static enum test_result test_get_replica(ENGINE_HANDLE *h,
1284 ENGINE_HANDLE_V1 *h1) {
1285 protocol_binary_request_header *pkt;
1286 pkt = prepare_get_replica(h, h1, vbucket_state_replica);
1287 checkeq(ENGINE_SUCCESS,
1288 h1->unknown_command(h, NULL, pkt, add_response, testHarness.doc_namespace),
1289 "Get Replica Failed");
1290 checkeq(PROTOCOL_BINARY_RESPONSE_SUCCESS, last_status.load(),
1291 "Expected PROTOCOL_BINARY_RESPONSE_SUCCESS response.");
1292 checkeq(std::string("replicadata"), last_body,
1293 "Should have returned identical value");
1299 static enum test_result test_get_replica_non_resident(ENGINE_HANDLE *h,
1300 ENGINE_HANDLE_V1 *h1) {
1303 checkeq(ENGINE_SUCCESS,
1304 store(h, h1, NULL, OPERATION_SET, "key", "value", &i, 0, 0),
1306 h1->release(h, NULL, i);
1307 wait_for_flusher_to_settle(h, h1);
1308 wait_for_stat_to_be(h, h1, "ep_total_persisted", 1);
1310 evict_key(h, h1, "key", 0, "Ejected.");
1311 check(set_vbucket_state(h, h1, 0, vbucket_state_replica),
1312 "Failed to set vbucket to replica");
1314 get_replica(h, h1, "key", 0);
1315 checkeq(PROTOCOL_BINARY_RESPONSE_SUCCESS, last_status.load(),
1316 "Expected success");
1321 static enum test_result test_get_replica_invalid_key(ENGINE_HANDLE *h,
1322 ENGINE_HANDLE_V1 *h1) {
1323 protocol_binary_request_header *pkt;
1324 bool makeinvalidkey = true;
1325 pkt = prepare_get_replica(h, h1, vbucket_state_replica, makeinvalidkey);
1326 checkeq(ENGINE_SUCCESS,
1327 h1->unknown_command(h, NULL, pkt, add_response, testHarness.doc_namespace),
1328 "Get Replica Failed");
1329 checkeq(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET, last_status.load(),
1330 "Expected PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET response.");
1335 static enum test_result test_vb_del_pending(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1336 const void *cookie = testHarness.create_cookie();
1337 testHarness.set_ewouldblock_handling(cookie, false);
1338 check(set_vbucket_state(h, h1, 1, vbucket_state_pending),
1339 "Failed to set vbucket state.");
1340 checkeq(ENGINE_EWOULDBLOCK, del(h, h1, "key", 0, 1, cookie),
1341 "Expected woodblock.");
1342 testHarness.destroy_cookie(cookie);
1346 static enum test_result test_vb_del_replica(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1347 check(set_vbucket_state(h, h1, 1, vbucket_state_replica),
1348 "Failed to set vbucket state.");
1349 int numNotMyVBucket = get_int_stat(h, h1, "ep_num_not_my_vbuckets");
1350 checkeq(ENGINE_NOT_MY_VBUCKET, del(h, h1, "key", 0, 1),
1351 "Expected not my vbucket.");
1352 wait_for_stat_change(h, h1, "ep_num_not_my_vbuckets", numNotMyVBucket);
1356 static enum test_result test_vbucket_get_miss(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1357 return verify_vbucket_missing(h, h1, 1) ? SUCCESS : FAIL;
1360 static enum test_result test_vbucket_get(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1361 return verify_vbucket_state(h, h1, 0, vbucket_state_active) ? SUCCESS : FAIL;
1364 static enum test_result test_vbucket_create(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1365 if (!verify_vbucket_missing(h, h1, 1)) {
1366 fprintf(stderr, "vbucket wasn't missing.\n");
1370 if (!set_vbucket_state(h, h1, 1, vbucket_state_active)) {
1371 fprintf(stderr, "set state failed.\n");
1375 return verify_vbucket_state(h, h1, 1, vbucket_state_active) ? SUCCESS : FAIL;
1378 static enum test_result test_takeover_stats_race_with_vb_create_TAP(
1379 ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1380 check(set_vbucket_state(h, h1, 1, vbucket_state_active),
1381 "Failed to set vbucket state information");
1384 get_int_stat(h, h1, "on_disk_deletes", "tap-vbtakeover 1"),
1385 "Invalid number of on-disk deletes");
1390 static enum test_result test_takeover_stats_race_with_vb_create_DCP(
1391 ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1392 check(set_vbucket_state(h, h1, 1, vbucket_state_active),
1393 "Failed to set vbucket state information");
1396 get_int_stat(h, h1, "on_disk_deletes", "dcp-vbtakeover 1"),
1397 "Invalid number of on-disk deletes");
1402 static enum test_result test_takeover_stats_num_persisted_deletes(
1403 ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1405 std::string key("key");
1406 checkeq(ENGINE_SUCCESS,
1407 store(h, h1, NULL, OPERATION_SET, key.c_str(), "data", nullptr),
1408 "Failed to store an item");
1410 /* delete the item */
1411 checkeq(ENGINE_SUCCESS, del(h, h1, key.c_str(), 0, 0),
1412 "Failed to delete the item");
1414 /* wait for persistence */
1415 wait_for_flusher_to_settle(h, h1);
1417 /* check if persisted deletes stats is got correctly */
1419 get_int_stat(h, h1, "on_disk_deletes", "dcp-vbtakeover 0"),
1420 "Invalid number of on-disk deletes");
1425 static enum test_result test_vbucket_compact(ENGINE_HANDLE *h,
1426 ENGINE_HANDLE_V1 *h1) {
1427 const char *key = "Carss";
1428 const char *value = "pollute";
1429 if (!verify_vbucket_missing(h, h1, 0)) {
1430 fprintf(stderr, "vbucket wasn't missing.\n");
1434 if (!set_vbucket_state(h, h1, 0, vbucket_state_active)) {
1435 fprintf(stderr, "set state failed.\n");
1439 check(verify_vbucket_state(h, h1, 0, vbucket_state_active),
1440 "VBucket state not active");
1442 // Set two keys - one to be expired and other to remain...
1444 checkeq(ENGINE_SUCCESS,
1445 store(h, h1, NULL, OPERATION_SET, key, value, &itm),
1447 h1->release(h, NULL, itm);
1449 check_key_value(h, h1, key, value, strlen(value));
1451 // Set a non-expiring key...
1452 checkeq(ENGINE_SUCCESS,
1453 store(h, h1, NULL, OPERATION_SET, "trees", "cleanse", &itm),
1455 h1->release(h, NULL, itm);
1457 check_key_value(h, h1, "trees", "cleanse", strlen("cleanse"));
1459 checkeq(ENGINE_SUCCESS, touch(h, h1, key, 0, 11), "touch Carss");
1461 testHarness.time_travel(12);
1462 wait_for_flusher_to_settle(h, h1);
1464 // Store a dummy item since we do not purge the item with highest seqno
1465 checkeq(ENGINE_SUCCESS,
1466 store(h, h1, NULL, OPERATION_SET, "dummykey", "dummyvalue", &itm,
1469 h1->release(h, NULL, itm);
1471 wait_for_flusher_to_settle(h, h1);
1473 checkeq(0, get_int_stat(h, h1, "vb_0:purge_seqno", "vbucket-seqno"),
1474 "purge_seqno not found to be zero before compaction");
1476 // Compaction on VBucket
1477 compact_db(h, h1, 0, 0, 2, 3, 1);
1479 wait_for_stat_to_be(h, h1, "ep_pending_compactions", 0);
1481 // the key tree and its value should be intact...
1482 checkeq(ENGINE_SUCCESS, verify_key(h, h1, "trees"),
1483 "key trees should be found.");
1484 // the key Carrs should have disappeared...
1485 ENGINE_ERROR_CODE val = verify_key(h, h1, "Carss");
1486 checkeq(ENGINE_KEY_ENOENT, val, "Key Carss has not expired.");
1488 checkeq(4, get_int_stat(h, h1, "vb_0:purge_seqno", "vbucket-seqno"),
1489 "purge_seqno didn't match expected value");
1494 static enum test_result test_compaction_config(ENGINE_HANDLE *h,
1495 ENGINE_HANDLE_V1 *h1) {
1498 get_int_stat(h, h1, "ep_compaction_write_queue_cap"),
1499 "Expected compaction queue cap to be 10000");
1500 set_param(h, h1, protocol_binary_engine_param_flush,
1501 "compaction_write_queue_cap", "100000");
1502 checkeq(100000, get_int_stat(h, h1, "ep_compaction_write_queue_cap"),
1503 "Expected compaction queue cap to be 100000");
1507 struct comp_thread_ctx {
1509 ENGINE_HANDLE_V1 *h1;
1511 uint16_t db_file_id;
1515 static void compaction_thread(void *arg) {
1516 struct comp_thread_ctx *ctx = static_cast<comp_thread_ctx *>(arg);
1517 compact_db(ctx->h, ctx->h1, ctx->vbid, ctx->db_file_id, 0, 0, 0);
1521 static enum test_result test_multiple_vb_compactions(ENGINE_HANDLE *h,
1522 ENGINE_HANDLE_V1 *h1) {
1523 for (uint16_t i = 0; i < 4; ++i) {
1524 if (!set_vbucket_state(h, h1, i, vbucket_state_active)) {
1525 fprintf(stderr, "set state failed for vbucket %d.\n", i);
1528 check(verify_vbucket_state(h, h1, i, vbucket_state_active),
1529 "VBucket state not active");
1532 std::vector<std::string> keys;
1533 for (int j = 0; j < 20000; ++j) {
1534 std::stringstream ss;
1536 std::string key(ss.str());
1537 keys.push_back(key);
1541 std::vector<std::string>::iterator it;
1542 for (it = keys.begin(); it != keys.end(); ++it) {
1543 uint16_t vbid = count % 4;
1545 checkeq(ENGINE_SUCCESS,
1546 store(h, h1, NULL, OPERATION_SET, it->c_str(), it->c_str(), &i, 0, vbid),
1547 "Failed to store a value");
1548 h1->release(h, NULL, i);
1552 // Compact multiple vbuckets.
1553 const int n_threads = 4;
1554 cb_thread_t threads[n_threads];
1555 struct comp_thread_ctx ctx[n_threads];
1557 const int num_shards = get_int_stat(h, h1, "ep_workload:num_shards",
1560 for (int i = 0; i < n_threads; i++) {
1563 ctx[i].vbid = static_cast<uint16_t>(i);
1564 ctx[i].db_file_id = ctx[i].vbid % num_shards;
1565 int r = cb_create_thread(&threads[i], compaction_thread, &ctx[i], 0);
1569 for (int i = 0; i < n_threads; i++) {
1570 int r = cb_join_thread(threads[i]);
1574 wait_for_stat_to_be(h, h1, "ep_pending_compactions", 0);
1579 static enum test_result
1580 test_multi_vb_compactions_with_workload(ENGINE_HANDLE *h,
1581 ENGINE_HANDLE_V1 *h1) {
1582 for (uint16_t i = 0; i < 4; ++i) {
1583 if (!set_vbucket_state(h, h1, i, vbucket_state_active)) {
1584 fprintf(stderr, "set state failed for vbucket %d.\n", i);
1587 check(verify_vbucket_state(h, h1, i, vbucket_state_active),
1588 "VBucket state not active");
1591 std::vector<std::string> keys;
1592 for (int j = 0; j < 10000; ++j) {
1593 std::stringstream ss;
1595 std::string key(ss.str());
1596 keys.push_back(key);
1600 std::vector<std::string>::iterator it;
1601 for (it = keys.begin(); it != keys.end(); ++it) {
1602 uint16_t vbid = count % 4;
1604 checkeq(ENGINE_SUCCESS,
1605 store(h, h1, NULL, OPERATION_SET, it->c_str(), it->c_str(),
1607 "Failed to store a value");
1608 h1->release(h, NULL, i);
1611 wait_for_flusher_to_settle(h, h1);
1613 for (int i = 0; i < 2; ++i) {
1615 for (it = keys.begin(); it != keys.end(); ++it) {
1616 uint16_t vbid = count % 4;
1618 checkeq(ENGINE_SUCCESS,
1619 get(h, h1, NULL, &i, it->c_str(), vbid),
1620 "Unable to get stored item");
1621 h1->release(h, NULL, i);
1625 wait_for_stat_to_be(h, h1, "ep_workload_pattern", std::string{"read_heavy"});
1627 // Compact multiple vbuckets.
1628 const int n_threads = 4;
1629 cb_thread_t threads[n_threads];
1630 struct comp_thread_ctx ctx[n_threads];
1632 for (int i = 0; i < n_threads; i++) {
1635 ctx[i].vbid = static_cast<uint16_t>(i);
1636 int r = cb_create_thread(&threads[i], compaction_thread, &ctx[i], 0);
1640 for (int i = 0; i < n_threads; i++) {
1641 int r = cb_join_thread(threads[i]);
1645 wait_for_stat_to_be(h, h1, "ep_pending_compactions", 0);
1650 static enum test_result vbucket_destroy(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1,
1651 const char* value = NULL) {
1652 check(set_vbucket_state(h, h1, 1, vbucket_state_active),
1653 "Failed to set vbucket state.");
1655 vbucketDelete(h, h1, 2, value);
1656 checkeq(PROTOCOL_BINARY_RESPONSE_NOT_MY_VBUCKET,
1658 "Expected failure deleting non-existent bucket.");
1660 check(set_vbucket_state(h, h1, 1, vbucket_state_dead),
1661 "Failed set set vbucket 1 state.");
1663 vbucketDelete(h, h1, 1, value);
1664 checkeq(PROTOCOL_BINARY_RESPONSE_SUCCESS, last_status.load(),
1665 "Expected failure deleting non-existent bucket.");
1667 check(verify_vbucket_missing(h, h1, 1),
1668 "vbucket 0 was not missing after deleting it.");
1673 static enum test_result test_vbucket_destroy_stats(ENGINE_HANDLE *h,
1674 ENGINE_HANDLE_V1 *h1) {
1676 int cacheSize = get_int_stat(h, h1, "ep_total_cache_size");
1677 int overhead = get_int_stat(h, h1, "ep_overhead");
1678 int nonResident = get_int_stat(h, h1, "ep_num_non_resident");
1680 check(set_vbucket_state(h, h1, 1, vbucket_state_active),
1681 "Failed to set vbucket state.");
1683 std::vector<std::string> keys;
1684 for (int j = 0; j < 2000; ++j) {
1685 std::stringstream ss;
1687 std::string key(ss.str());
1688 keys.push_back(key);
1691 int itemsRemoved = get_int_stat(h, h1, "ep_items_rm_from_checkpoints");
1692 std::vector<std::string>::iterator it;
1693 for (it = keys.begin(); it != keys.end(); ++it) {
1695 checkeq(ENGINE_SUCCESS,
1696 store(h, h1, NULL, OPERATION_SET, it->c_str(), it->c_str(),
1698 "Failed to store a value");
1699 h1->release(h, NULL, i);
1701 wait_for_flusher_to_settle(h, h1);
1702 testHarness.time_travel(65);
1703 wait_for_stat_change(h, h1, "ep_items_rm_from_checkpoints", itemsRemoved);
1705 check(set_vbucket_state(h, h1, 1, vbucket_state_dead),
1706 "Failed set set vbucket 1 state.");
1708 int vbucketDel = get_int_stat(h, h1, "ep_vbucket_del");
1709 vbucketDelete(h, h1, 1);
1710 checkeq(PROTOCOL_BINARY_RESPONSE_SUCCESS,
1712 "Expected failure deleting non-existent bucket.");
1714 check(verify_vbucket_missing(h, h1, 1),
1715 "vbucket 1 was not missing after deleting it.");
1717 wait_for_stat_change(h, h1, "ep_vbucket_del", vbucketDel);
1719 wait_for_stat_to_be(h, h1, "ep_total_cache_size", cacheSize);
1720 wait_for_stat_to_be(h, h1, "ep_overhead", overhead);
1721 wait_for_stat_to_be(h, h1, "ep_num_non_resident", nonResident);
1726 static enum test_result vbucket_destroy_restart(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1,
1727 const char* value = NULL) {
1728 if (!isWarmupEnabled(h, h1)) {
1732 check(set_vbucket_state(h, h1, 1, vbucket_state_active),
1733 "Failed to set vbucket state.");
1735 // Store a value so the restart will try to resurrect it.
1737 checkeq(ENGINE_SUCCESS,
1738 store(h, h1, NULL, OPERATION_SET, "key", "somevalue", &i, 0, 1),
1739 "Failed to set a value");
1740 check_key_value(h, h1, "key", "somevalue", 9, 1);
1741 h1->release(h, NULL, i);
1743 // Reload to get a flush forced.
1744 testHarness.reload_engine(&h, &h1,
1745 testHarness.engine_path,
1746 testHarness.get_current_testcase()->cfg,
1748 wait_for_warmup_complete(h, h1);
1750 check(verify_vbucket_state(h, h1, 1, vbucket_state_active),
1751 "Bucket state was what it was initially, after restart.");
1752 check(set_vbucket_state(h, h1, 1, vbucket_state_active),
1753 "Failed to set vbucket state.");
1754 check_key_value(h, h1, "key", "somevalue", 9, 1);
1756 check(set_vbucket_state(h, h1, 1, vbucket_state_dead),
1757 "Failed set set vbucket 1 state.");
1759 vbucketDelete(h, h1, 1, value);
1760 checkeq(PROTOCOL_BINARY_RESPONSE_SUCCESS, last_status.load(),
1761 "Expected failure deleting non-existent bucket.");
1763 check(verify_vbucket_missing(h, h1, 1),
1764 "vbucket 1 was not missing after deleting it.");
1766 testHarness.reload_engine(&h, &h1,
1767 testHarness.engine_path,
1768 testHarness.get_current_testcase()->cfg,
1770 wait_for_warmup_complete(h, h1);
1772 if (verify_vbucket_state(h, h1, 1, vbucket_state_pending, true)) {
1773 std::cerr << "Bucket came up in pending state after delete." << std::endl;
1777 check(verify_vbucket_missing(h, h1, 1),
1778 "vbucket 1 was not missing after restart.");
1783 static enum test_result test_async_vbucket_destroy(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1784 return vbucket_destroy(h, h1);
1787 static enum test_result test_sync_vbucket_destroy(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1788 return vbucket_destroy(h, h1, "async=0");
1791 static enum test_result test_async_vbucket_destroy_restart(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1792 return vbucket_destroy_restart(h, h1);
1795 static enum test_result test_sync_vbucket_destroy_restart(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1796 return vbucket_destroy_restart(h, h1, "async=0");
1799 static enum test_result test_vb_set_pending(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1800 return test_pending_vb_mutation(h, h1, OPERATION_SET);
1803 static enum test_result test_vb_add_pending(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1804 return test_pending_vb_mutation(h, h1, OPERATION_ADD);
1807 static enum test_result test_vb_cas_pending(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1808 return test_pending_vb_mutation(h, h1, OPERATION_CAS);
1811 static enum test_result test_vb_set_replica(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1812 return test_replica_vb_mutation(h, h1, OPERATION_SET);
1815 static enum test_result test_vb_replace_replica(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1816 return test_replica_vb_mutation(h, h1, OPERATION_REPLACE);
1819 static enum test_result test_vb_replace_pending(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1820 return test_pending_vb_mutation(h, h1, OPERATION_REPLACE);
1823 static enum test_result test_vb_add_replica(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1824 return test_replica_vb_mutation(h, h1, OPERATION_ADD);
1827 static enum test_result test_vb_cas_replica(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1828 return test_replica_vb_mutation(h, h1, OPERATION_CAS);
1831 static enum test_result test_stats_seqno(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1832 check(set_vbucket_state(h, h1, 1, vbucket_state_active),
1833 "Failed to set vbucket state.");
1836 for (int ii = 0; ii < num_keys; ++ii) {
1837 std::stringstream ss;
1839 checkeq(ENGINE_SUCCESS,
1840 store(h, h1, NULL, OPERATION_SET, ss.str().c_str(),
1841 "value", NULL, 0, 0),
1842 "Failed to store an item.");
1844 wait_for_flusher_to_settle(h, h1);
1846 checkeq(100, get_int_stat(h, h1, "vb_0:high_seqno", "vbucket-seqno"),
1849 if (isPersistentBucket(h, h1)) {
1851 get_int_stat(h, h1, "vb_0:last_persisted_seqno", "vbucket-seqno"),
1852 "Unexpected last_persisted_seqno");
1854 checkeq(0, get_int_stat(h, h1, "vb_1:high_seqno", "vbucket-seqno"),
1856 checkeq(0, get_int_stat(h, h1, "vb_1:high_seqno", "vbucket-seqno 1"),
1858 if (isPersistentBucket(h, h1)) {
1860 get_int_stat(h, h1, "vb_1:last_persisted_seqno", "vbucket-seqno 1"),
1861 "Invalid last_persisted_seqno");
1864 uint64_t vb_uuid = get_ull_stat(h, h1, "vb_1:0:id", "failovers");
1866 auto seqno_stats = get_all_stats(h, h1, "vbucket-seqno 1");
1867 checkeq(vb_uuid, uint64_t(std::stoull(seqno_stats.at("vb_1:uuid"))),
1870 checkeq(size_t(7), seqno_stats.size(), "Expected seven stats");
1872 // Check invalid vbucket
1873 checkeq(ENGINE_NOT_MY_VBUCKET,
1874 h1->get_stats(h, NULL, "vbucket-seqno 2", 15, add_stats),
1875 "Expected not my vbucket");
1877 // Check bad vbucket parameter (not numeric)
1878 checkeq(ENGINE_EINVAL,
1879 h1->get_stats(h, NULL, "vbucket-seqno tt2", 17, add_stats),
1880 "Expected invalid");
1882 // Check extra spaces at the end
1883 checkeq(ENGINE_EINVAL,
1884 h1->get_stats(h, NULL, "vbucket-seqno ", 17, add_stats),
1885 "Expected invalid");
1890 static enum test_result test_stats_diskinfo(ENGINE_HANDLE *h,
1891 ENGINE_HANDLE_V1 *h1) {
1892 check(set_vbucket_state(h, h1, 1, vbucket_state_active),
1893 "Failed to set vbucket state.");
1896 for (int ii = 0; ii < num_keys; ++ii) {
1897 std::stringstream ss;
1899 checkeq(ENGINE_SUCCESS,
1900 store(h, h1, NULL, OPERATION_SET, ss.str().c_str(),
1901 "value", NULL, 0, 1),
1902 "Failed to store an item.");
1904 wait_for_flusher_to_settle(h, h1);
1906 size_t file_size = get_int_stat(h, h1, "ep_db_file_size", "diskinfo");
1907 size_t data_size = get_int_stat(h, h1, "ep_db_data_size", "diskinfo");
1908 check(file_size > 0, "DB file size should be greater than 0");
1909 check(data_size > 0, "DB data size should be greater than 0");
1910 check(file_size >= data_size, "DB file size should be >= DB data size");
1911 check(get_int_stat(h, h1, "vb_1:data_size", "diskinfo detail") > 0,
1912 "VB 1 data size should be greater than 0");
1914 checkeq(ENGINE_EINVAL,
1915 h1->get_stats(h, NULL, "diskinfo ", 9, add_stats),
1916 "Expected invalid");
1918 checkeq(ENGINE_EINVAL,
1919 h1->get_stats(h, NULL, "diskinfo detai", 14, add_stats),
1920 "Expected invalid");
1922 checkeq(ENGINE_EINVAL,
1923 h1->get_stats(h, NULL, "diskinfo detaillll", 18, add_stats),
1924 "Expected invalid");
1929 static enum test_result test_uuid_stats(ENGINE_HANDLE *h,
1930 ENGINE_HANDLE_V1 *h1)
1933 checkeq(ENGINE_SUCCESS,
1934 h1->get_stats(h, NULL, "uuid", 4, add_stats),
1935 "Failed to get stats.");
1936 check(vals["uuid"] == "foobar", "Incorrect uuid");
1940 static enum test_result test_item_stats(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1942 checkeq(ENGINE_SUCCESS,
1943 store(h, h1, NULL, OPERATION_SET, "key", "somevalue", &i, 0, 0),
1945 h1->release(h, NULL, i);
1946 wait_for_flusher_to_settle(h, h1);
1947 checkeq(ENGINE_SUCCESS,
1948 store(h, h1, NULL, OPERATION_SET, "key", "somevalueX", &i, 0, 0),
1950 h1->release(h, NULL, i);
1951 wait_for_flusher_to_settle(h, h1);
1952 checkeq(ENGINE_SUCCESS,
1953 store(h, h1, NULL, OPERATION_SET, "key1", "somevalueY", &i, 0, 0),
1955 h1->release(h, NULL, i);
1956 wait_for_flusher_to_settle(h, h1);
1958 check_key_value(h, h1, "key", "somevalueX", 10);
1959 check_key_value(h, h1, "key1", "somevalueY", 10);
1961 checkeq(ENGINE_SUCCESS, del(h, h1, "key1", 0, 0),
1962 "Failed remove with value.");
1963 wait_for_flusher_to_settle(h, h1);
1965 checkeq(ENGINE_SUCCESS,
1966 store(h, h1, NULL, OPERATION_SET, "key1", "someothervalue", &i, 0, 0),
1968 h1->release(h, NULL, i);
1969 wait_for_flusher_to_settle(h, h1);
1971 check_key_value(h, h1, "key1", "someothervalue", 14);
1974 get_int_stat(h, h1, "vb_active_ops_create"),
1975 "Expected 3 creations");
1977 get_int_stat(h, h1, "vb_active_ops_update"),
1978 "Expected 1 updation");
1980 get_int_stat(h, h1, "vb_active_ops_delete"),
1981 "Expected 1 deletion");
1986 static enum test_result test_stats(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1988 checkeq(ENGINE_SUCCESS,
1989 h1->get_stats(h, NULL, NULL, 0, add_stats),
1990 "Failed to get stats.");
1991 check(vals.size() > 10, "Kind of expected more stats than that.");
1992 check(vals.find("ep_version") != vals.end(), "Found no ep_version.");
1997 static enum test_result test_mem_stats(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
1999 memset(value, 'b', sizeof(value));
2000 strcpy(value + sizeof(value) - 4, "\r\n");
2001 int itemsRemoved = get_int_stat(h, h1, "ep_items_rm_from_checkpoints");
2002 wait_for_persisted_value(h, h1, "key", value);
2003 testHarness.time_travel(65);
2004 if (isPersistentBucket(h, h1)) {
2005 wait_for_stat_change(h, h1, "ep_items_rm_from_checkpoints", itemsRemoved);
2007 int mem_used = get_int_stat(h, h1, "mem_used");
2008 int cache_size = get_int_stat(h, h1, "ep_total_cache_size");
2009 int overhead = get_int_stat(h, h1, "ep_overhead");
2010 int value_size = get_int_stat(h, h1, "ep_value_size");
2011 check((mem_used - overhead) > cache_size,
2012 "ep_kv_size should be greater than the hashtable cache size due to the checkpoint overhead");
2014 if (isPersistentBucket(h, h1)) {
2015 evict_key(h, h1, "key", 0, "Ejected.");
2017 check(get_int_stat(h, h1, "ep_total_cache_size") <= cache_size,
2018 "Evict a value shouldn't increase the total cache size");
2019 check(get_int_stat(h, h1, "mem_used") < mem_used,
2020 "Expected mem_used to decrease when an item is evicted");
2022 check_key_value(h, h1, "key", value, strlen(value), 0); // Load an item from disk again.
2024 check(get_int_stat(h, h1, "mem_used") >= mem_used,
2025 "Expected mem_used to remain the same after an item is loaded from disk");
2026 check(get_int_stat(h, h1, "ep_value_size") == value_size,
2027 "Expected ep_value_size to remain the same after item is loaded from disk");
2033 static enum test_result test_io_stats(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
2034 int exp_read_bytes = 4, exp_write_bytes;
2035 std::string backend = get_str_stat(h, h1, "ep_backend");
2036 if (backend == "forestdb") {
2037 exp_write_bytes = 35; /* TBD: Do not hard code the value */
2038 } else if (backend == "couchdb") {
2039 exp_write_bytes = 22; /* TBD: Do not hard code the value */
2044 h1->reset_stats(h, NULL);
2046 checkeq(0, get_int_stat(h, h1, "rw_0:io_num_read", "kvstore"),
2047 "Expected reset stats to set io_num_read to zero");
2048 checkeq(0, get_int_stat(h, h1, "rw_0:io_num_write", "kvstore"),
2049 "Expected reset stats to set io_num_write to zero");
2050 checkeq(0, get_int_stat(h, h1, "rw_0:io_read_bytes", "kvstore"),
2051 "Expected reset stats to set io_read_bytes to zero");
2052 checkeq(0, get_int_stat(h, h1, "rw_0:io_write_bytes", "kvstore"),
2053 "Expected reset stats to set io_write_bytes to zero");
2055 wait_for_persisted_value(h, h1, "a", "b\r\n");
2056 checkeq(0, get_int_stat(h, h1, "rw_0:io_num_read", "kvstore"),
2057 "Expected storing one value to not change the read counter");
2058 checkeq(0, get_int_stat(h, h1, "rw_0:io_read_bytes", "kvstore"),
2059 "Expected storing one value to not change the read bytes");
2060 checkeq(1, get_int_stat(h, h1, "rw_0:io_num_write", "kvstore"),
2061 "Expected storing the key to update the write counter");
2062 checkeq(exp_write_bytes,
2063 get_int_stat(h, h1, "rw_0:io_write_bytes", "kvstore"),
2064 "Expected storing the key to update the write bytes");
2066 evict_key(h, h1, "a", 0, "Ejected.");
2068 check_key_value(h, h1, "a", "b\r\n", 3, 0);
2070 std::stringstream numReadStatStr;
2071 std::stringstream readBytesStatStr;
2073 if (backend == "couchdb") {
2074 numReadStatStr << "ro_" << 0 << ":io_num_read";
2075 readBytesStatStr << "ro_" << 0 << ":io_read_bytes";
2076 } else if (backend == "forestdb") {
2077 numReadStatStr << "rw_" << 0 << ":io_num_read";
2078 readBytesStatStr << "rw_" << 0 << ":io_read_bytes";
2083 checkeq(1, get_int_stat(h, h1, numReadStatStr.str().c_str(), "kvstore"),
2084 "Expected reading the value back in to update the read counter");
2085 checkeq(exp_read_bytes,
2086 get_int_stat(h, h1, readBytesStatStr.str().c_str(), "kvstore"),
2087 "Expected reading the value back in to update the read bytes");
2088 checkeq(1, get_int_stat(h, h1, "rw_0:io_num_write", "kvstore"),
2089 "Expected reading the value back in to not update the write counter");
2090 checkeq(exp_write_bytes,
2091 get_int_stat(h, h1, "rw_0:io_write_bytes", "kvstore"),
2092 "Expected reading the value back in to not update the write bytes");
2097 static enum test_result test_vb_file_stats(ENGINE_HANDLE *h,
2098 ENGINE_HANDLE_V1 *h1) {
2099 wait_for_flusher_to_settle(h, h1);
2100 wait_for_stat_change(h, h1, "ep_db_data_size", 0);
2102 int old_data_size = get_int_stat(h, h1, "ep_db_data_size");
2103 int old_file_size = get_int_stat(h, h1, "ep_db_file_size");
2104 check(old_file_size != 0, "Expected a non-zero value for ep_db_file_size");
2106 // Write a value and test ...
2107 wait_for_persisted_value(h, h1, "a", "b\r\n");
2108 check(get_int_stat(h, h1, "ep_db_data_size") > old_data_size,
2109 "Expected the DB data size to increase");
2110 check(get_int_stat(h, h1, "ep_db_file_size") > old_file_size,
2111 "Expected the DB file size to increase");
2113 check(get_int_stat(h, h1, "vb_0:db_data_size", "vbucket-details 0") > 0,
2114 "Expected the vbucket DB data size to non-zero");
2115 check(get_int_stat(h, h1, "vb_0:db_file_size", "vbucket-details 0") > 0,
2116 "Expected the vbucket DB file size to non-zero");
2120 static enum test_result test_vb_file_stats_after_warmup(ENGINE_HANDLE *h,
2121 ENGINE_HANDLE_V1 *h1) {
2122 if (!isWarmupEnabled(h, h1)) {
2127 for (int i = 0; i < 100; ++i) {
2128 std::stringstream key;
2130 checkeq(ENGINE_SUCCESS,
2131 store(h, h1, NULL, OPERATION_SET, key.str().c_str(), "somevalue", &it),
2133 h1->release(h, NULL, it);
2135 wait_for_flusher_to_settle(h, h1);
2137 int fileSize = get_int_stat(h, h1, "vb_0:db_file_size", "vbucket-details 0");
2138 int spaceUsed = get_int_stat(h, h1, "vb_0:db_data_size", "vbucket-details 0");
2140 // Restart the engine.
2141 testHarness.reload_engine(&h, &h1,
2142 testHarness.engine_path,
2143 testHarness.get_current_testcase()->cfg,
2145 wait_for_warmup_complete(h, h1);
2147 int newFileSize = get_int_stat(h, h1, "vb_0:db_file_size", "vbucket-details 0");
2148 int newSpaceUsed = get_int_stat(h, h1, "vb_0:db_data_size", "vbucket-details 0");
2150 check((float)newFileSize >= 0.9 * fileSize, "Unexpected fileSize for vbucket");
2151 check((float)newSpaceUsed >= 0.9 * spaceUsed, "Unexpected spaceUsed for vbucket");
2156 static enum test_result test_bg_stats(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
2157 h1->reset_stats(h, NULL);
2158 wait_for_persisted_value(h, h1, "a", "b\r\n");
2159 evict_key(h, h1, "a", 0, "Ejected.");
2160 testHarness.time_travel(43);
2161 check_key_value(h, h1, "a", "b\r\n", 3, 0);
2163 auto stats = get_all_stats(h, h1);
2164 checkeq(1, std::stoi(stats.at("ep_bg_num_samples")),
2165 "Expected one sample");
2167 const char* bg_keys[] = { "ep_bg_min_wait",
2173 for (const auto* key : bg_keys) {
2174 check(stats.find(key) != stats.end(),
2175 (std::string("Found no ") + key).c_str());
2178 evict_key(h, h1, "a", 0, "Ejected.");
2179 check_key_value(h, h1, "a", "b\r\n", 3, 0);
2180 check(get_int_stat(h, h1, "ep_bg_num_samples") == 2,
2181 "Expected one sample");
2183 h1->reset_stats(h, NULL);
2184 checkeq(0, get_int_stat(h, h1, "ep_bg_fetched"),
2185 "ep_bg_fetched is not reset to 0");
2189 static enum test_result test_bg_meta_stats(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
2191 h1->reset_stats(h, NULL);
2193 wait_for_persisted_value(h, h1, "k1", "v1");
2194 wait_for_persisted_value(h, h1, "k2", "v2");
2196 evict_key(h, h1, "k1", 0, "Ejected.");
2197 checkeq(ENGINE_SUCCESS,
2198 del(h, h1, "k2", 0, 0), "Failed remove with value.");
2199 wait_for_stat_to_be(h, h1, "curr_items", 1);
2201 checkeq(0, get_int_stat(h, h1, "ep_bg_fetched"), "Expected bg_fetched to be 0");
2202 checkeq(0, get_int_stat(h, h1, "ep_bg_meta_fetched"), "Expected bg_meta_fetched to be 0");
2204 check(get_meta(h, h1, "k2"), "Get meta failed");
2205 checkeq(0, get_int_stat(h, h1, "ep_bg_fetched"), "Expected bg_fetched to be 0");
2206 checkeq(1, get_int_stat(h, h1, "ep_bg_meta_fetched"), "Expected bg_meta_fetched to be 1");
2208 checkeq(ENGINE_SUCCESS, get(h, h1, NULL, &itm, "k1", 0), "Missing key");
2209 checkeq(1, get_int_stat(h, h1, "ep_bg_fetched"), "Expected bg_fetched to be 1");
2210 checkeq(1, get_int_stat(h, h1, "ep_bg_meta_fetched"), "Expected bg_meta_fetched to be 1");
2211 h1->release(h, NULL, itm);
2213 // store new key with some random metadata
2214 const size_t keylen = strlen("k3");
2215 ItemMetaData itemMeta;
2216 itemMeta.revSeqno = 10;
2217 itemMeta.cas = 0xdeadbeef;
2218 itemMeta.exptime = 0;
2219 itemMeta.flags = 0xdeadbeef;
2221 add_with_meta(h, h1, "k3", keylen, NULL, 0, 0, &itemMeta);
2222 checkeq(PROTOCOL_BINARY_RESPONSE_SUCCESS, last_status.load(), "Set meta failed");
2224 check(get_meta(h, h1, "k2"), "Get meta failed");
2225 checkeq(1, get_int_stat(h, h1, "ep_bg_fetched"), "Expected bg_fetched to be 1");
2226 checkeq(1, get_int_stat(h, h1, "ep_bg_meta_fetched"),
2227 "Expected bg_meta_fetched to remain at 1");
2232 static enum test_result test_key_stats(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
2235 check(set_vbucket_state(h, h1, 1, vbucket_state_active), "Failed set vbucket 1 state.");
2237 // set (k1,v1) in vbucket 0
2238 checkeq(ENGINE_SUCCESS,
2239 store(h, h1, NULL, OPERATION_SET,"k1", "v1", &i, 0, 0),
2240 "Failed to store an item.");
2241 h1->release(h, NULL, i);
2242 // set (k2,v2) in vbucket 1
2243 checkeq(ENGINE_SUCCESS,
2244 store(h, h1, NULL, OPERATION_SET,"k2", "v2", &i, 0, 1),
2245 "Failed to store an item.");
2246 h1->release(h, NULL, i);
2248 const void *cookie = testHarness.create_cookie();
2250 // stat for key "k1" and vbucket "0"
2251 const char *statkey1 = "key k1 0";
2252 checkeq(ENGINE_SUCCESS,
2253 h1->get_stats(h, cookie, statkey1, strlen(statkey1), add_stats),
2254 "Failed to get stats.");
2255 check(vals.find("key_is_dirty") != vals.end(), "Found no key_is_dirty");
2256 check(vals.find("key_exptime") != vals.end(), "Found no key_exptime");
2257 check(vals.find("key_flags") != vals.end(), "Found no key_flags");
2258 check(vals.find("key_cas") != vals.end(), "Found no key_cas");
2259 check(vals.find("key_vb_state") != vals.end(), "Found no key_vb_state");
2261 // stat for key "k2" and vbucket "1"
2262 const char *statkey2 = "key k2 1";
2263 checkeq(ENGINE_SUCCESS,
2264 h1->get_stats(h, cookie, statkey2, strlen(statkey2), add_stats),
2265 "Failed to get stats.");
2266 check(vals.find("key_is_dirty") != vals.end(), "Found no key_is_dirty");
2267 check(vals.find("key_exptime") != vals.end(), "Found no key_exptime");
2268 check(vals.find("key_flags") != vals.end(), "Found no key_flags");
2269 check(vals.find("key_cas") != vals.end(), "Found no key_cas");
2270 check(vals.find("key_vb_state") != vals.end(), "Found no key_vb_state");
2272 testHarness.destroy_cookie(cookie);
2276 static enum test_result test_vkey_stats(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
2277 check(set_vbucket_state(h, h1, 1, vbucket_state_active), "Failed set vbucket 1 state.");
2278 check(set_vbucket_state(h, h1, 2, vbucket_state_active), "Failed set vbucket 2 state.");
2279 check(set_vbucket_state(h, h1, 3, vbucket_state_active), "Failed set vbucket 3 state.");
2280 check(set_vbucket_state(h, h1, 4, vbucket_state_active), "Failed set vbucket 4 state.");
2282 wait_for_persisted_value(h, h1, "k1", "v1");
2283 wait_for_persisted_value(h, h1, "k2", "v2", 1);
2284 wait_for_persisted_value(h, h1, "k3", "v3", 2);
2285 wait_for_persisted_value(h, h1, "k4", "v4", 3);
2286 wait_for_persisted_value(h, h1, "k5", "v5", 4);
2288 check(set_vbucket_state(h, h1, 2, vbucket_state_replica), "Failed to set VB2 state.");
2289 check(set_vbucket_state(h, h1, 3, vbucket_state_pending), "Failed to set VB3 state.");
2290 check(set_vbucket_state(h, h1, 4, vbucket_state_dead), "Failed to set VB4 state.");
2292 const void *cookie = testHarness.create_cookie();
2294 // stat for key "k1" and vbucket "0"
2295 const char *statkey1 = "vkey k1 0";
2296 checkeq(ENGINE_SUCCESS,
2297 h1->get_stats(h, cookie, statkey1, strlen(statkey1), add_stats),
2298 "Failed to get stats.");
2299 check(vals.find("key_is_dirty") != vals.end(), "Found no key_is_dirty");
2300 check(vals.find("key_exptime") != vals.end(), "Found no key_exptime");
2301 check(vals.find("key_flags") != vals.end(), "Found no key_flags");
2302 check(vals.find("key_cas") != vals.end(), "Found no key_cas");
2303 check(vals.find("key_vb_state") != vals.end(), "Found no key_vb_state");
2304 check(vals.find("key_valid") != vals.end(), "Found no key_valid");
2306 // stat for key "k2" and vbucket "1"
2307 const char *statkey2 = "vkey k2 1";
2308 checkeq(ENGINE_SUCCESS,
2309 h1->get_stats(h, cookie, statkey2, strlen(statkey2), add_stats),
2310 "Failed to get stats.");
2311 check(vals.find("key_is_dirty") != vals.end(), "Found no key_is_dirty");
2312 check(vals.find("key_exptime") != vals.end(), "Found no key_exptime");
2313 check(vals.find("key_flags") != vals.end(), "Found no key_flags");
2314 check(vals.find("key_cas") != vals.end(), "Found no key_cas");
2315 check(vals.find("key_vb_state") != vals.end(), "Found no key_vb_state");
2316 check(vals.find("key_valid") != vals.end(), "Found no key_valid");
2318 // stat for key "k3" and vbucket "2"
2319 const char *statkey3 = "vkey k3 2";
2320 checkeq(ENGINE_SUCCESS,
2321 h1->get_stats(h, cookie, statkey3, strlen(statkey3), add_stats),
2322 "Failed to get stats.");
2323 check(vals.find("key_is_dirty") != vals.end(), "Found no key_is_dirty");
2324 check(vals.find("key_exptime") != vals.end(), "Found no key_exptime");
2325 check(vals.find("key_flags") != vals.end(), "Found no key_flags");
2326 check(vals.find("key_cas") != vals.end(), "Found no key_cas");
2327 check(vals.find("key_vb_state") != vals.end(), "Found no key_vb_state");
2328 check(vals.find("key_valid") != vals.end(), "Found no key_valid");
2330 // stat for key "k4" and vbucket "3"
2331 const char *statkey4 = "vkey k4 3";
2332 checkeq(ENGINE_SUCCESS,
2333 h1->get_stats(h, cookie, statkey4, strlen(statkey4), add_stats),
2334 "Failed to get stats.");
2335 check(vals.find("key_is_dirty") != vals.end(), "Found no key_is_dirty");
2336 check(vals.find("key_exptime") != vals.end(), "Found no key_exptime");
2337 check(vals.find("key_flags") != vals.end(), "Found no key_flags");
2338 check(vals.find("key_cas") != vals.end(), "Found no key_cas");
2339 check(vals.find("key_vb_state") != vals.end(), "Found no key_vb_state");
2340 check(vals.find("key_valid") != vals.end(), "Found no key_valid");
2342 // stat for key "k5" and vbucket "4"
2343 const char *statkey5 = "vkey k5 4";
2344 checkeq(ENGINE_SUCCESS,
2345 h1->get_stats(h, cookie, statkey5, strlen(statkey5), add_stats),
2346 "Failed to get stats.");
2347 check(vals.find("key_is_dirty") != vals.end(), "Found no key_is_dirty");
2348 check(vals.find("key_exptime") != vals.end(), "Found no key_exptime");
2349 check(vals.find("key_flags") != vals.end(), "Found no key_flags");
2350 check(vals.find("key_cas") != vals.end(), "Found no key_cas");
2351 check(vals.find("key_vb_state") != vals.end(), "Found no key_vb_state");
2352 check(vals.find("key_valid") != vals.end(), "Found no key_valid");
2354 testHarness.destroy_cookie(cookie);
2358 static enum test_result test_warmup_conf(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
2359 if (!isWarmupEnabled(h, h1)) {
2363 checkeq(100, get_int_stat(h, h1, "ep_warmup_min_items_threshold"),
2364 "Incorrect initial warmup min items threshold.");
2365 checkeq(100, get_int_stat(h, h1, "ep_warmup_min_memory_threshold"),
2366 "Incorrect initial warmup min memory threshold.");
2368 check(!set_param(h, h1, protocol_binary_engine_param_flush,
2369 "warmup_min_items_threshold", "a"),
2370 "Set warmup_min_items_threshold should have failed");
2371 check(!set_param(h, h1, protocol_binary_engine_param_flush,
2372 "warmup_min_items_threshold", "a"),
2373 "Set warmup_min_memory_threshold should have failed");
2375 check(set_param(h, h1, protocol_binary_engine_param_flush,
2376 "warmup_min_items_threshold", "80"),
2377 "Set warmup_min_items_threshold should have worked");
2378 check(set_param(h, h1, protocol_binary_engine_param_flush,
2379 "warmup_min_memory_threshold", "80"),
2380 "Set warmup_min_memory_threshold should have worked");
2382 checkeq(80, get_int_stat(h, h1, "ep_warmup_min_items_threshold"),
2383 "Incorrect smaller warmup min items threshold.");
2384 checkeq(80, get_int_stat(h, h1, "ep_warmup_min_memory_threshold"),
2385 "Incorrect smaller warmup min memory threshold.");
2388 for (int i = 0; i < 100; ++i) {
2389 std::stringstream key;
2391 checkeq(ENGINE_SUCCESS,
2392 store(h, h1, NULL, OPERATION_SET, key.str().c_str(), "somevalue", &it),
2394 h1->release(h, NULL, it);
2397 // Restart the server.
2398 std::string config(testHarness.get_current_testcase()->cfg);
2399 config = config + "warmup_min_memory_threshold=0";
2400 testHarness.reload_engine(&h, &h1,
2401 testHarness.engine_path,
2404 wait_for_warmup_complete(h, h1);
2406 const std::string eviction_policy = get_str_stat(h, h1, "ep_item_eviction_policy");
2407 if (eviction_policy == "value_only") {
2408 checkeq(100, get_int_stat(h, h1, "ep_warmup_key_count", "warmup"),
2409 "Expected 100 keys loaded after warmup");
2410 } else { // Full eviction mode
2411 checkeq(0, get_int_stat(h, h1, "ep_warmup_key_count", "warmup"),
2412 "Expected 0 keys loaded after warmup");
2415 checkeq(0, get_int_stat(h, h1, "ep_warmup_value_count", "warmup"),
2416 "Expected 0 values loaded after warmup");
2421 static enum test_result test_bloomfilter_conf(ENGINE_HANDLE *h,
2422 ENGINE_HANDLE_V1 *h1) {
2424 if (get_bool_stat(h, h1, "ep_bfilter_enabled") == false) {
2425 check(set_param(h, h1, protocol_binary_engine_param_flush,
2426 "bfilter_enabled", "true"),
2427 "Set bloomfilter_enabled should have worked");
2429 check(get_bool_stat(h, h1, "ep_bfilter_enabled"),
2430 "Bloom filter wasn't enabled");
2432 check(get_float_stat(h, h1, "ep_bfilter_residency_threshold") == (float)0.1,
2433 "Incorrect initial bfilter_residency_threshold.");
2435 check(set_param(h, h1, protocol_binary_engine_param_flush,
2436 "bfilter_enabled", "false"),
2437 "Set bloomfilter_enabled should have worked.");
2438 check(set_param(h, h1, protocol_binary_engine_param_flush,
2439 "bfilter_residency_threshold", "0.15"),
2440 "Set bfilter_residency_threshold should have worked.");
2442 check(get_bool_stat(h, h1, "ep_bfilter_enabled") == false,
2443 "Bloom filter should have been disabled.");
2444 check(get_float_stat(h, h1, "ep_bfilter_residency_threshold") == (float)0.15,
2445 "Incorrect bfilter_residency_threshold.");
2450 static enum test_result test_bloomfilters(ENGINE_HANDLE *h,
2451 ENGINE_HANDLE_V1 *h1) {
2453 if (get_bool_stat(h, h1, "ep_bfilter_enabled") == false) {
2454 check(set_param(h, h1, protocol_binary_engine_param_flush,
2455 "bfilter_enabled", "true"),
2456 "Set bloomfilter_enabled should have worked");
2458 check(get_bool_stat(h, h1, "ep_bfilter_enabled"),
2459 "Bloom filter wasn't enabled");
2461 // Key is only present if bgOperations is non-zero.
2462 int num_read_attempts = get_int_stat_or_default(h, h1, 0,
2463 "ep_bg_num_samples");
2465 // Ensure vbucket's bloom filter is enabled
2466 checkeq(std::string("ENABLED"),
2467 get_str_stat(h, h1, "vb_0:bloom_filter", "vbucket-details 0"),
2468 "Vbucket 0's bloom filter wasn't enabled upon setup!");
2474 for (i = 0; i < 10; ++i) {
2475 std::stringstream key;
2477 checkeq(ENGINE_SUCCESS,
2478 store(h, h1, NULL, OPERATION_SET, key.str().c_str(),
2481 h1->release(h, NULL, it);
2483 wait_for_flusher_to_settle(h, h1);
2485 // Evict all 10 items.
2486 for (i = 0; i < 10; ++i) {
2487 std::stringstream key;
2489 evict_key(h, h1, key.str().c_str(), 0, "Ejected.");
2491 wait_for_flusher_to_settle(h, h1);
2493 // Ensure 10 items are non-resident.
2494 cb_assert(10 == get_int_stat(h, h1, "ep_num_non_resident"));
2496 // Issue delete on first 5 items.
2497 for (i = 0; i < 5; ++i) {
2498 std::stringstream key;
2500 checkeq(ENGINE_SUCCESS,
2501 del(h, h1, key.str().c_str(), 0, 0),
2502 "Failed remove with value.");
2504 wait_for_flusher_to_settle(h, h1);
2506 // Ensure that there are 5 non-resident items
2507 cb_assert(5 == get_int_stat(h, h1, "ep_num_non_resident"));
2508 cb_assert(5 == get_int_stat(h, h1, "curr_items"));
2510 checkeq(ENGINE_SUCCESS,
2511 h1->get_stats(h, NULL, NULL, 0, add_stats),
2512 "Failed to get stats.");
2513 std::string eviction_policy = vals.find("ep_item_eviction_policy")->second;
2515 useconds_t sleepTime = 128;
2517 if (eviction_policy == "value_only") { // VALUE-ONLY EVICTION MODE
2520 get_int_stat(h, h1, "vb_0:bloom_filter_key_count",
2521 "vbucket-details 0"),
2522 "Unexpected no. of keys in bloom filter");
2524 checkeq(num_read_attempts,
2525 get_int_stat_or_default(h, h1, 0, "ep_bg_num_samples"),
2526 "Expected bgFetch attempts to remain unchanged");
2528 for (i = 0; i < 5; ++i) {
2529 std::stringstream key;
2531 check(get_meta(h, h1, key.str().c_str()), "Get meta failed");
2534 // GetMeta would cause bgFetches as bloomfilter contains
2535 // the deleted items.
2536 checkeq(num_read_attempts + 5,
2537 get_int_stat(h, h1, "ep_bg_num_samples"),
2538 "Expected bgFetch attempts to increase by five");
2540 // Run compaction, with drop_deletes
2541 compact_db(h, h1, 0, 0, 15, 15, 1);
2542 while (get_int_stat(h, h1, "ep_pending_compactions") != 0) {
2543 decayingSleep(&sleepTime);
2546 for (i = 0; i < 5; ++i) {
2547 std::stringstream key;
2549 check(get_meta(h, h1, key.str().c_str()), "Get meta failed");
2551 checkeq(num_read_attempts + 5,
2552 get_int_stat(h, h1, "ep_bg_num_samples"),
2553 "Expected bgFetch attempts to stay as before");
2555 } else { // FULL EVICTION MODE
2558 get_int_stat(h, h1, "vb_0:bloom_filter_key_count",
2559 "vbucket-details 0"),
2560 "Unexpected no. of keys in bloom filter");
2563 // Because of issuing deletes on non-resident items
2564 checkeq(num_read_attempts + 5,
2565 get_int_stat(h, h1, "ep_bg_num_samples"),
2566 "Expected bgFetch attempts to increase by five, after deletes");
2568 // Run compaction, with drop_deletes, to exclude deleted items
2569 // from bloomfilter.
2570 compact_db(h, h1, 0, 0, 15, 15, 1);
2571 while (get_int_stat(h, h1, "ep_pending_compactions") != 0) {
2572 decayingSleep(&sleepTime);
2575 for (i = 0; i < 5; i++) {
2576 std::stringstream key;
2578 checkeq(ENGINE_KEY_ENOENT,
2579 get(h, h1, NULL, &it, key.str(), 0),
2580 "Unable to get stored item");
2582 // + 6 because last delete is not purged by the compactor
2583 checkeq(num_read_attempts + 6,
2584 get_int_stat(h, h1, "ep_bg_num_samples"),
2585 "Expected bgFetch attempts to stay as before");
2591 static enum test_result test_bloomfilters_with_store_apis(ENGINE_HANDLE *h,
2592 ENGINE_HANDLE_V1 *h1) {
2593 if (get_bool_stat(h, h1, "ep_bfilter_enabled") == false) {
2594 check(set_param(h, h1, protocol_binary_engine_param_flush,
2595 "bfilter_enabled", "true"),
2596 "Set bloomfilter_enabled should have worked");
2598 check(get_bool_stat(h, h1, "ep_bfilter_enabled"),
2599 "Bloom filter wasn't enabled");
2601 int num_read_attempts = get_int_stat_or_default(h, h1, 0,
2602 "ep_bg_num_samples");
2604 // Ensure vbucket's bloom filter is enabled
2605 checkeq(std::string("ENABLED"),
2606 get_str_stat(h, h1, "vb_0:bloom_filter", "vbucket-details 0"),
2607 "Vbucket 0's bloom filter wasn't enabled upon setup!");
2609 for (int i = 0; i < 1000; i++) {
2610 std::stringstream key;
2612 check(!get_meta(h, h1, key.str().c_str()),
2613 "Get meta should fail.");
2616 checkeq(num_read_attempts,
2617 get_int_stat_or_default(h, h1, 0, "ep_bg_num_samples"),
2618 "Expected no bgFetch attempts");
2620 checkeq(ENGINE_SUCCESS,
2621 h1->get_stats(h, NULL, NULL, 0, add_stats),
2622 "Failed to get stats.");
2623 std::string eviction_policy = vals.find("ep_item_eviction_policy")->second;
2625 if (eviction_policy == "full_eviction") { // FULL EVICTION MODE
2628 for (j = 0; j < 10; j++) {
2629 uint64_t cas_for_set = last_cas;
2630 // init some random metadata
2631 ItemMetaData itm_meta;
2632 itm_meta.revSeqno = 10;
2633 itm_meta.cas = 0xdeadbeef;
2634 itm_meta.exptime = time(NULL) + 300;
2635 itm_meta.flags = 0xdeadbeef;
2637 std::stringstream key;
2639 set_with_meta(h, h1, key.str().c_str(), key.str().length(),
2640 "somevalue", 9, 0, &itm_meta, cas_for_set);
2643 checkeq(num_read_attempts,
2644 get_int_stat_or_default(h, h1, 0, "ep_bg_num_samples"),
2645 "Expected no bgFetch attempts");
2649 for (j = 0; j < 10; j++) {
2650 std::stringstream key;
2653 checkeq(ENGINE_SUCCESS,
2654 store(h, h1, NULL, OPERATION_ADD, key.str().c_str(),
2656 "Failed to add value again.");
2657 h1->release(h, NULL, itm);
2660 checkeq(num_read_attempts,
2661 get_int_stat_or_default(h, h1, 0, "ep_bg_num_samples"),
2662 "Expected no bgFetch attempts");
2665 for (j = 0; j < 10; j++) {
2666 std::stringstream key;
2668 checkeq(ENGINE_KEY_ENOENT,
2669 del(h, h1, key.str().c_str(), 0, 0),
2670 "Failed remove with value.");
2673 checkeq(num_read_attempts,
2674 get_int_stat_or_default(h, h1, 0, "ep_bg_num_samples"),
2675 "Expected no bgFetch attempts");
2682 static enum test_result test_bloomfilter_delete_plus_set_scenario(
2683 ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
2684 if (get_bool_stat(h, h1, "ep_bfilter_enabled") == false) {
2685 check(set_param(h, h1, protocol_binary_engine_param_flush,
2686 "bfilter_enabled", "true"),
2687 "Set bloomfilter_enabled should have worked");
2689 check(get_bool_stat(h, h1, "ep_bfilter_enabled"),
2690 "Bloom filter wasn't enabled");
2692 // Ensure vbucket's bloom filter is enabled
2693 checkeq(std::string("ENABLED"),
2694 get_str_stat(h, h1, "vb_0:bloom_filter", "vbucket-details 0"),
2695 "Vbucket 0's bloom filter wasn't enabled upon setup!");
2698 checkeq(ENGINE_SUCCESS,
2699 store(h, h1, NULL, OPERATION_SET,"k1", "v1", &itm),
2700 "Failed to fail to store an item.");
2701 h1->release(h, NULL, itm);
2703 wait_for_flusher_to_settle(h, h1);
2704 int num_writes = get_int_stat(h, h1, "rw_0:io_num_write", "kvstore");
2705 int num_persisted = get_int_stat(h, h1, "ep_total_persisted");
2706 cb_assert(num_writes == 1 && num_persisted == 1);
2708 checkeq(ENGINE_SUCCESS,
2709 del(h, h1, "k1", 0, 0), "Failed remove with value.");
2710 stop_persistence(h, h1);
2711 checkeq(ENGINE_SUCCESS,
2712 store(h, h1, NULL, OPERATION_SET,"k1", "v2", &itm, 0, 0),
2713 "Failed to fail to store an item.");
2714 h1->release(h, NULL, itm);
2715 int key_count = get_int_stat(h, h1, "vb_0:bloom_filter_key_count",
2716 "vbucket-details 0");
2718 if (key_count == 0) {
2719 check(get_int_stat(h, h1, "rw_0:io_num_write", "kvstore") <= 2,
2720 "Unexpected number of writes");
2721 start_persistence(h, h1);
2722 wait_for_flusher_to_settle(h, h1);
2723 checkeq(0, get_int_stat(h, h1, "vb_0:bloom_filter_key_count",
2724 "vbucket-details 0"),
2725 "Unexpected number of keys in bloomfilter");
2727 cb_assert(key_count == 1);
2728 checkeq(2, get_int_stat(h, h1, "rw_0:io_num_write", "kvstore"),
2729 "Unexpected number of writes");
2730 start_persistence(h, h1);
2731 wait_for_flusher_to_settle(h, h1);
2732 checkeq(1, get_int_stat(h, h1, "vb_0:bloom_filter_key_count",
2733 "vbucket-details 0"),
2734 "Unexpected number of keys in bloomfilter");
2740 static enum test_result test_datatype(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
2741 const void *cookie = testHarness.create_cookie();
2742 testHarness.set_datatype_support(cookie, true);
2745 const std::string key("{\"foo\":\"bar\"}");
2746 const protocol_binary_datatype_t datatype = PROTOCOL_BINARY_DATATYPE_JSON;
2748 std::string value("x");
2749 checkeq(ENGINE_SUCCESS,
2750 storeCasOut(h, h1, NULL, 0, key, value, datatype, itm, cas),
2751 "Expected set to succeed");
2753 checkeq(ENGINE_SUCCESS,
2754 get(h, h1, cookie, &itm, key, 0),
2755 "Unable to get stored item");
2758 h1->get_item_info(h, cookie, itm, &info);
2759 h1->release(h, cookie, itm);
2760 checkeq(static_cast<uint8_t>(PROTOCOL_BINARY_DATATYPE_JSON),
2761 info.datatype, "Invalid datatype");
2763 const char* key1 = "foo";
2764 const char* val1 = "{\"foo1\":\"bar1\"}";
2765 ItemMetaData itm_meta;
2766 itm_meta.revSeqno = 10;
2767 itm_meta.cas = info.cas;
2768 itm_meta.exptime = info.exptime;
2769 itm_meta.flags = info.flags;
2770 set_with_meta(h, h1, key1, strlen(key1), val1, strlen(val1), 0, &itm_meta,
2771 last_cas, 0, info.datatype, cookie);
2773 checkeq(ENGINE_SUCCESS,
2774 get(h, h1, cookie, &itm, key1, 0),
2775 "Unable to get stored item");
2777 h1->get_item_info(h, cookie, itm, &info);
2778 h1->release(h, cookie, itm);
2779 checkeq(static_cast<uint8_t>(PROTOCOL_BINARY_DATATYPE_JSON),
2780 info.datatype, "Invalid datatype, when setWithMeta");
2782 testHarness.destroy_cookie(cookie);
2786 static enum test_result test_datatype_with_unknown_command(ENGINE_HANDLE *h,
2787 ENGINE_HANDLE_V1 *h1) {
2788 const void *cookie = testHarness.create_cookie();
2789 testHarness.set_datatype_support(cookie, true);
2791 const char* key = "foo";
2792 const char* val = "{\"foo\":\"bar\"}";
2793 uint8_t datatype = PROTOCOL_BINARY_DATATYPE_JSON;
2795 ItemMetaData itm_meta;
2796 itm_meta.revSeqno = 10;
2798 itm_meta.exptime = 0;
2802 set_with_meta(h, h1, key, strlen(key), val, strlen(val), 0, &itm_meta,
2803 0, 0, datatype, cookie);
2805 checkeq(ENGINE_SUCCESS,
2806 get(h, h1, cookie, &itm, key, 0),
2807 "Unable to get stored item");
2810 h1->get_item_info(h, cookie, itm, &info);
2811 h1->release(h, NULL, itm);
2812 checkeq(static_cast<uint8_t>(PROTOCOL_BINARY_DATATYPE_JSON),
2813 info.datatype, "Invalid datatype, when setWithMeta");
2816 set_ret_meta(h, h1, "foo1", 4, val, strlen(val), 0, 0, 0, 0, datatype,
2818 checkeq(PROTOCOL_BINARY_RESPONSE_SUCCESS, last_status.load(),
2819 "Expected set returing meta to succeed");
2820 checkeq(static_cast<uint8_t>(PROTOCOL_BINARY_DATATYPE_JSON),
2821 last_datatype.load(), "Invalid datatype, when set_return_meta");
2823 testHarness.destroy_cookie(cookie);
2827 static enum test_result test_session_cas_validation(ENGINE_HANDLE *h,
2828 ENGINE_HANDLE_V1 *h1) {
2829 //Testing PROTOCOL_BINARY_CMD_SET_VBUCKET..
2831 protocol_binary_request_header *pkt;
2832 vbucket_state_t state = vbucket_state_active;
2833 uint32_t val = static_cast<uint32_t>(state);
2835 memcpy(ext, (char*)&val, sizeof(val));
2837 uint64_t cas = 0x0101010101010101;
2838 pkt = createPacket(PROTOCOL_BINARY_CMD_SET_VBUCKET, 0, cas, ext, 4);
2839 checkeq(ENGINE_SUCCESS,
2840 h1->unknown_command(h, NULL, pkt, add_response, testHarness.doc_namespace),
2841 "SET_VBUCKET command failed");
2843 cb_assert(last_status == PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS);
2845 cas = 0x0102030405060708;
2846 pkt = createPacket(PROTOCOL_BINARY_CMD_SET_VBUCKET, 0, cas, ext, 4);
2847 checkeq(ENGINE_SUCCESS,
2848 h1->unknown_command(h, NULL, pkt, add_response, testHarness.doc_namespace),
2849 "SET_VBUCKET command failed");
2851 cb_assert(last_status == PROTOCOL_BINARY_RESPONSE_SUCCESS);
2856 static enum test_result test_access_scanner_settings(ENGINE_HANDLE *h,
2857 ENGINE_HANDLE_V1 *h1) {
2858 if (!isWarmupEnabled(h, h1)) {
2859 // Access scanner n/a without warmup.
2863 // Create a unique access log path by combining with the db path.
2864 checkeq(ENGINE_SUCCESS,
2865 h1->get_stats(h, NULL, NULL, 0, add_stats),
2866 "Failed to get stats.");
2867 std::string dbname = vals.find("ep_dbname")->second;
2869 const auto alog_path = std::string("alog_path=") + dbname +
2870 DIRECTORY_SEPARATOR_CHARACTER + "access.log";
2871 std::string newconfig = std::string(testHarness.get_current_testcase()->cfg)
2874 testHarness.reload_engine(&h, &h1,
2875 testHarness.engine_path,
2878 wait_for_warmup_complete(h, h1);
2880 std::string err_msg;
2881 // Check access scanner is enabled and alog_task_time is at default
2882 checkeq(true, get_bool_stat(h, h1, "ep_access_scanner_enabled"),
2883 "Expected access scanner to be enabled");
2884 cb_assert(get_int_stat(h, h1, "ep_alog_task_time") == 2);
2886 // Ensure access_scanner_task_time is what its expected to be.
2887 // Need to wait until the AccessScanner task has been setup.
2888 wait_for_stat_change(h, h1, "ep_access_scanner_task_time",
2889 std::string{"NOT_SCHEDULED"});
2891 std::string str = get_str_stat(h, h1, "ep_access_scanner_task_time");
2892 std::string expected_time = "02:00";
2893 err_msg.assign("Initial time incorrect, expect: " +
2894 expected_time + ", actual: " + str.substr(11, 5));
2895 checkeq(0, str.substr(11, 5).compare(expected_time), err_msg.c_str());
2897 // Update alog_task_time and ensure the update is successful
2898 set_param(h, h1, protocol_binary_engine_param_flush, "alog_task_time", "5");
2899 expected_time = "05:00";
2900 str = get_str_stat(h, h1, "ep_access_scanner_task_time");
2901 err_msg.assign("Updated time incorrect, expect: " +
2902 expected_time + ", actual: " + str.substr(11, 5));
2903 checkeq(0, str.substr(11, 5).compare(expected_time), err_msg.c_str());
2905 // Update alog_sleep_time by 10 mins and ensure the update is successful.
2906 const std::chrono::minutes update_by{10};
2907 std::string targetTaskTime1{make_time_string(std::chrono::system_clock::now() +
2910 set_param(h, h1, protocol_binary_engine_param_flush, "alog_sleep_time",
2911 std::to_string(update_by.count()).c_str());
2912 str = get_str_stat(h, h1, "ep_access_scanner_task_time");
2914 // Recalculate now() + 10mins as upper bound on when the task should be
2916 std::string targetTaskTime2{make_time_string(std::chrono::system_clock::now() +
2919 // ep_access_scanner_task_time should fall within the range of
2920 // targetTaskTime1 and targetTaskTime2
2921 err_msg.assign("Unexpected task time range, expect: " +
2922 targetTaskTime1 + " <= " + str + " <= " + targetTaskTime2);
2923 check(targetTaskTime1 <= str, err_msg.c_str());
2924 check(str <= targetTaskTime2, err_msg.c_str());
2929 static enum test_result test_access_scanner(ENGINE_HANDLE *h,
2930 ENGINE_HANDLE_V1 *h1) {
2931 if (!isWarmupEnabled(h, h1)) {
2932 // Access scanner not applicable without warmup.
2936 // Create a unique access log path by combining with the db path.
2937 checkeq(ENGINE_SUCCESS,
2938 h1->get_stats(h, NULL, NULL, 0, add_stats),
2939 "Failed to get stats.");
2940 const auto dbname = vals.find("ep_dbname")->second;
2942 const auto alog_path = std::string("alog_path=") + dbname +
2943 DIRECTORY_SEPARATOR_CHARACTER + "access.log";
2945 /* We do not want the access scanner task to be running while we initiate it
2946 explicitly below. Hence set the alog_task_time to about 1 ~ 2 hours
2948 const time_t now = time(nullptr);
2950 cb_gmtime_r(&now, &tm_now);
2951 const auto two_hours_hence = (tm_now.tm_hour + 2) % 24;
2953 const auto alog_task_time = std::string("alog_task_time=") +
2954 std::to_string(two_hours_hence);
2956 const auto newconfig = std::string(testHarness.get_current_testcase()->cfg)
2957 + alog_path + ";" + alog_task_time;
2959 testHarness.reload_engine(&h, &h1,
2960 testHarness.engine_path,
2963 wait_for_warmup_complete(h, h1);
2965 /* Check that alog_task_time was correctly updated. */
2966 checkeq(get_int_stat(h, h1, "ep_alog_task_time"),
2968 "Failed to set alog_task_time to 2 hours in the future");
2970 checkeq(ENGINE_SUCCESS,
2971 h1->get_stats(h, NULL, NULL, 0, add_stats),
2972 "Failed to get stats.");
2973 std::string name = vals.find("ep_alog_path")->second;
2975 /* Check access scanner is enabled */
2976 checkeq(true, get_bool_stat(h, h1, "ep_access_scanner_enabled"),
2977 "Access scanner task not enabled by default. Check test config");
2979 const int num_shards = get_int_stat(h, h1, "ep_workload:num_shards",
2982 std::string prev(name + ".old");
2984 /* Get the resident ratio down to below 95% - point at which access.log
2985 * generation occurs.
2988 // Size chosen to create ~2000 items (i.e. 2x more than we sanity-check below)
2989 // with the given max_size for this test.
2990 const std::string value(2000, 'x');
2992 // Gathering stats on every store is expensive, just check every 100 iterations
2993 if ((num_items % 100) == 0) {
2994 if (get_int_stat(h, h1, "vb_active_perc_mem_resident") < 94) {
3000 std::string key("key" + std::to_string(num_items));
3001 ENGINE_ERROR_CODE ret = store(h, h1, NULL, OPERATION_SET,
3002 key.c_str(), value.c_str(), &itm);
3004 case ENGINE_SUCCESS:
3006 h1->release(h, NULL, itm);
3010 case ENGINE_TMPFAIL:
3011 // Returned when at high watermark; simply retry the op.
3012 h1->release(h, NULL, itm);
3016 fprintf(stderr, "test_access_scanner: Unexpected result from store(): %d\n",
3023 // Sanity check - ensure we have enough vBucket quota (max_size)
3024 // such that we have 1000 items - enough to give us 0.1%
3025 // granuarity in any residency calculations. */
3026 if (num_items < 1000) {
3027 std::cerr << "Error: test_access_scanner: "
3028 "expected at least 1000 items after filling vbucket, "
3029 "but only have " << num_items << ". "
3030 "Check max_size setting for test." << std::endl;
3034 wait_for_flusher_to_settle(h, h1);
3035 verify_curr_items(h, h1, num_items, "Wrong number of items");
3036 int num_non_resident = get_int_stat(h, h1, "vb_active_num_non_resident");
3037 checkge(num_non_resident, num_items * 6 / 100,
3038 "Expected num_non_resident to be at least 6% of total items");
3040 /* Run access scanner task once and expect it to generate access log */
3041 check(set_param(h, h1, protocol_binary_engine_param_flush,
3042 "access_scanner_run", "true"),
3043 "Failed to trigger access scanner");
3045 // Wait for the number of runs to equal the number of shards.
3046 wait_for_stat_to_be(h, h1, "ep_num_access_scanner_runs", num_shards);
3048 /* This time since resident ratio is < 95% access log should be generated */
3049 checkeq(0, access(name.c_str(), F_OK),
3050 (std::string("access log file (") + name +
3051 ") should exist (got errno:" + std::to_string(errno)).c_str());
3053 /* Increase resident ratio by deleting items */
3054 vbucketDelete(h, h1, 0);
3055 check(set_vbucket_state(h, h1, 0, vbucket_state_active),
3056 "Failed to set VB0 state.");
3058 /* Run access scanner task once */
3059 const int access_scanner_skips =
3060 get_int_stat(h, h1, "ep_num_access_scanner_skips");
3061 check(set_param(h, h1, protocol_binary_engine_param_flush,
3062 "access_scanner_run", "true"),
3063 "Failed to trigger access scanner");
3064 wait_for_stat_to_be(h, h1, "ep_num_access_scanner_skips",
3065 access_scanner_skips + num_shards);
3067 /* Access log files should be removed because resident ratio > 95% */
3068 checkeq(-1, access(prev.c_str(), F_OK),
3069 ".old access log file should not exist");
3070 checkeq(-1, access(name.c_str(), F_OK), "access log file should not exist");
3075 static enum test_result test_set_param_message(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3076 set_param(h, h1, protocol_binary_engine_param_flush, "alog_task_time", "50");
3078 checkeq(PROTOCOL_BINARY_RESPONSE_EINVAL, last_status.load(),
3079 "Expected an invalid value error for an out of bounds alog_task_time");
3080 check(std::string("Validation Error").compare(last_body), "Expected a "
3081 "validation error in the response body");
3085 static enum test_result test_warmup_stats(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3086 if (!isWarmupEnabled(h, h1)) {
3091 check(set_vbucket_state(h, h1, 0, vbucket_state_active), "Failed to set VB0 state.");
3092 check(set_vbucket_state(h, h1, 1, vbucket_state_replica), "Failed to set VB1 state.");
3094 for (int i = 0; i < 5000; ++i) {
3095 std::stringstream key;
3097 checkeq(ENGINE_SUCCESS,
3098 store(h, h1, NULL, OPERATION_SET, key.str().c_str(),
3101 h1->release(h, NULL, it);
3104 // Restart the server.
3105 testHarness.reload_engine(&h, &h1,
3106 testHarness.engine_path,
3107 testHarness.get_current_testcase()->cfg,
3110 wait_for_warmup_complete(h, h1);
3112 const auto warmup_stats = get_all_stats(h, h1, "warmup");
3114 // Check all expected warmup stats exists.
3115 const char* warmup_keys[] = { "ep_warmup_thread",
3116 "ep_warmup_value_count",
3117 "ep_warmup_key_count",
3121 for (const auto* key : warmup_keys) {
3122 check(warmup_stats.find(key) != warmup_stats.end(),
3123 (std::string("Found no ") + key).c_str());
3126 std::string warmup_time = warmup_stats.at("ep_warmup_time");
3127 cb_assert(std::stoi(warmup_time) > 0);
3129 const auto prev_vb_stats = get_all_stats(h, h1, "prev-vbucket");
3131 check(prev_vb_stats.find("vb_0") != prev_vb_stats.end(),
3132 "Found no previous state for VB0");
3133 check(prev_vb_stats.find("vb_1") != prev_vb_stats.end(),
3134 "Found no previous state for VB1");
3136 checkeq(std::string("active"), prev_vb_stats.at("vb_0"),
3137 "Unexpected stats for vb 0");
3138 checkeq(std::string("replica"), prev_vb_stats.at("vb_1"),
3139 "Unexpected stats for vb 1");
3141 const auto vb_details_stats = get_all_stats(h, h1, "vbucket-details");
3142 checkeq(5000, std::stoi(vb_details_stats.at("vb_0:num_items")),
3143 "Unexpected item count for vb 0");
3144 checkeq(0, std::stoi(vb_details_stats.at("vb_1:num_items")),
3145 "Unexpected item count for vb 1");
3150 static enum test_result test_warmup_with_threshold(ENGINE_HANDLE *h,
3151 ENGINE_HANDLE_V1 *h1) {
3152 if (!isWarmupEnabled(h, h1)) {
3157 check(set_vbucket_state(h, h1, 0, vbucket_state_active), "Failed set vbucket 1 state.");
3158 check(set_vbucket_state(h, h1, 1, vbucket_state_active), "Failed set vbucket 2 state.");
3159 check(set_vbucket_state(h, h1, 2, vbucket_state_active), "Failed set vbucket 3 state.");
3160 check(set_vbucket_state(h, h1, 3, vbucket_state_active), "Failed set vbucket 4 state.");
3162 for (int i = 0; i < 10000; ++i) {
3163 std::stringstream key;
3165 checkeq(ENGINE_SUCCESS,
3166 store(h, h1, NULL, OPERATION_SET, key.str().c_str(), "somevalue", &it,
3169 h1->release(h, NULL, it);
3172 // Restart the server.
3173 testHarness.reload_engine(&h, &h1,
3174 testHarness.engine_path,
3175 testHarness.get_current_testcase()->cfg,
3178 wait_for_warmup_complete(h, h1);
3181 get_int_stat(h, h1, "ep_warmup_min_item_threshold", "warmup"),
3182 "Unable to set warmup_min_item_threshold to 1%");
3184 const std::string policy = get_str_stat(h, h1, "ep_item_eviction_policy");
3186 if (policy == "full_eviction") {
3187 checkeq(get_int_stat(h, h1, "ep_warmup_key_count", "warmup"),
3188 get_int_stat(h, h1, "ep_warmup_value_count", "warmup"),
3189 "Warmed up key count didn't match warmed up value count");
3191 checkeq(10000, get_int_stat(h, h1, "ep_warmup_key_count", "warmup"),
3192 "Warmup didn't warmup all keys");
3194 check(get_int_stat(h, h1, "ep_warmup_value_count", "warmup") <= 110,
3195 "Warmed up value count found to be greater than 1%");
3197 cb_assert(get_int_stat(h, h1, "ep_warmup_time", "warmup") > 0);
3203 // Comment out the entire test since the hack gave warnings on win32
3204 static enum test_result test_warmup_accesslog(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3206 /* I'm getting a weird link error from clang.. disable the test until I
3213 int n_items_to_store1 = 10;
3214 for (int i = 0; i < n_items_to_store1; ++i) {
3215 std::stringstream key;
3217 const char* keystr = key.str().c_str();
3218 checkeq(ENGINE_SUCCESS,
3219 store(h, h1, NULL, OPERATION_SET, keystr, "somevalue", &it, 0, 0),
3221 h1->release(h, NULL, it);
3224 wait_for_flusher_to_settle(h, h1);
3226 int n_items_to_access = 10;
3227 for (int i = 0; i < n_items_to_access; ++i) {
3228 std::stringstream key;
3230 const char* keystr = key.str().c_str();
3231 checkeq(ENGINE_SUCCESS,
3232 get(h, h1, NULL, &it, keystr, 0),
3234 h1->release(h, NULL, it);
3237 // sleep so that scanner task can have timew to generate access log
3240 // store additional items
3241 int n_items_to_store2 = 10;
3242 for (int i = 0; i < n_items_to_store2; ++i) {
3243 std::stringstream key;
3244 key << "key2-" << i;
3245 const char* keystr = key.str().c_str();
3246 checkeq(ENGINE_SUCCESS,
3247 store(h, h1, NULL, OPERATION_SET, keystr, "somevalue", &it, 0, 0),
3249 h1->release(h, NULL, it);
3252 // Restart the server.
3253 testHarness.reload_engine(&h, &h1,
3254 testHarness.engine_path,
3255 testHarness.get_current_testcase()->cfg,
3258 wait_for_warmup_complete(h, h1);
3259 // n_items_to_access items should be loaded from access log first
3260 // but we continue to load until we hit 75% item watermark
3262 int warmedup = get_int_stat(h, h1, "ep_warmup_value_count", "warmup");
3263 // std::cout << "ep_warmup_value_count = " << warmedup << std::endl;
3264 int expected = (n_items_to_store1 + n_items_to_store2) * 0.75 + 1;
3266 check(warmedup == expected, "Expected 16 items to be resident");
3272 static enum test_result test_warmup_oom(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3273 if (!isWarmupEnabled(h, h1)) {
3277 write_items(h, h1, 20000, 0, "superlongnameofkey1234567890123456789012345678902");
3279 wait_for_flusher_to_settle(h, h1);
3281 std::string config(testHarness.get_current_testcase()->cfg);
3282 config = config + "max_size=2097152;item_eviction_policy=value_only";
3284 testHarness.reload_engine(&h, &h1,
3285 testHarness.engine_path,
3289 wait_for_warmup_complete(h, h1);
3291 protocol_binary_request_header *pkt = createPacket(PROTOCOL_BINARY_CMD_ENABLE_TRAFFIC);
3292 checkeq(ENGINE_SUCCESS,
3293 h1->unknown_command(h, NULL, pkt, add_response, testHarness.doc_namespace),
3294 "Failed to send data traffic command to the services");
3295 checkeq(PROTOCOL_BINARY_RESPONSE_ENOMEM, last_status.load(),
3296 "Data traffic command should have failed with enomem");
3302 static enum test_result test_cbd_225(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3305 // get engine startup token
3306 time_t token1 = get_int_stat(h, h1, "ep_startup_time");
3307 check(token1 != 0, "Expected non-zero startup token");
3309 // store some random data
3310 checkeq(ENGINE_SUCCESS,
3311 store(h, h1, NULL, OPERATION_SET,"k1", "v1", &i),
3312 "Failed to fail to store an item.");
3313 h1->release(h, NULL, i);
3314 checkeq(ENGINE_SUCCESS,
3315 store(h, h1, NULL, OPERATION_SET,"k2", "v2", &i),
3316 "Failed to fail to store an item.");
3317 h1->release(h, NULL, i);
3318 wait_for_flusher_to_settle(h, h1);
3320 // check token again, which should be the same as before
3321 time_t token2 = get_int_stat(h, h1, "ep_startup_time");
3322 check(token2 == token1, "Expected the same startup token");
3324 // reload the engine
3325 testHarness.time_travel(10);
3326 testHarness.reload_engine(&h, &h1,
3327 testHarness.engine_path,
3328 testHarness.get_current_testcase()->cfg,
3330 wait_for_warmup_complete(h, h1);
3332 // check token, this time we should get a different one
3333 time_t token3 = get_int_stat(h, h1, "ep_startup_time");
3334 check(token3 != token1, "Expected a different startup token");
3339 static enum test_result test_workload_stats(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3340 const void* cookie = testHarness.create_cookie();
3341 checkeq(ENGINE_SUCCESS,
3342 h1->get_stats(h, cookie, "workload",
3343 strlen("workload"), add_stats),
3344 "Falied to get workload stats");
3345 testHarness.destroy_cookie(cookie);
3346 int num_read_threads = get_int_stat(h, h1, "ep_workload:num_readers",
3348 int num_write_threads = get_int_stat(h, h1, "ep_workload:num_writers",
3350 int num_auxio_threads = get_int_stat(h, h1, "ep_workload:num_auxio",
3352 int num_nonio_threads = get_int_stat(h, h1, "ep_workload:num_nonio",
3354 int max_read_threads = get_int_stat(h, h1, "ep_workload:max_readers",
3356 int max_write_threads = get_int_stat(h, h1, "ep_workload:max_writers",
3358 int max_auxio_threads = get_int_stat(h, h1, "ep_workload:max_auxio",
3360 int max_nonio_threads = get_int_stat(h, h1, "ep_workload:max_nonio",
3362 int num_shards = get_int_stat(h, h1, "ep_workload:num_shards", "workload");
3363 checkeq(4, num_read_threads, "Incorrect number of readers");
3364 // MB-12279: limiting max writers to 4 for DGM bgfetch performance
3365 checkeq(4, num_write_threads, "Incorrect number of writers");
3366 checkeq(1, num_auxio_threads, "Incorrect number of auxio threads");
3367 check(num_nonio_threads > 1 && num_nonio_threads <= 8,
3368 "Incorrect number of nonio threads");
3369 checkeq(4, max_read_threads, "Incorrect limit of readers");
3370 // MB-12279: limiting max writers to 4 for DGM bgfetch performance
3371 checkeq(4, max_write_threads, "Incorrect limit of writers");
3372 checkeq(1, max_auxio_threads, "Incorrect limit of auxio threads");
3373 check(max_nonio_threads > 1 && max_nonio_threads <=8,
3374 "Incorrect limit of nonio threads");
3375 checkeq(5, num_shards, "Incorrect number of shards");
3379 static enum test_result test_max_workload_stats(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3380 const void* cookie = testHarness.create_cookie();
3381 checkeq(ENGINE_SUCCESS,
3382 h1->get_stats(h, cookie, "workload",
3383 strlen("workload"), add_stats),
3384 "Failed to get workload stats");
3385 testHarness.destroy_cookie(cookie);
3386 int num_read_threads = get_int_stat(h, h1, "ep_workload:num_readers",
3388 int num_write_threads = get_int_stat(h, h1, "ep_workload:num_writers",
3390 int num_auxio_threads = get_int_stat(h, h1, "ep_workload:num_auxio",
3392 int num_nonio_threads = get_int_stat(h, h1, "ep_workload:num_nonio",
3394 int max_read_threads = get_int_stat(h, h1, "ep_workload:max_readers",
3396 int max_write_threads = get_int_stat(h, h1, "ep_workload:max_writers",
3398 int max_auxio_threads = get_int_stat(h, h1, "ep_workload:max_auxio",
3400 int max_nonio_threads = get_int_stat(h, h1, "ep_workload:max_nonio",
3402 int num_shards = get_int_stat(h, h1, "ep_workload:num_shards", "workload");
3403 // if max limit on other groups missing use remaining for readers & writers
3404 checkeq(5, num_read_threads, "Incorrect number of readers");
3405 // MB-12279: limiting max writers to 4 for DGM bgfetch performance
3406 checkeq(4, num_write_threads, "Incorrect number of writers");
3408 checkeq(1, num_auxio_threads, "Incorrect number of auxio threads");// config
3409 checkeq(4, num_nonio_threads, "Incorrect number of nonio threads");// config
3410 checkeq(5, max_read_threads, "Incorrect limit of readers");// derived
3411 // MB-12279: limiting max writers to 4 for DGM bgfetch performance
3412 checkeq(4, max_write_threads, "Incorrect limit of writers");// max-capped
3413 checkeq(1, max_auxio_threads, "Incorrect limit of auxio threads");// config
3414 checkeq(4, max_nonio_threads, "Incorrect limit of nonio threads");// config
3415 checkeq(5, num_shards, "Incorrect number of shards");
3419 static enum test_result test_worker_stats(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3420 checkeq(ENGINE_SUCCESS,
3421 h1->get_stats(h, NULL, "dispatcher",
3422 strlen("dispatcher"), add_stats),
3423 "Failed to get worker stats");
3425 std::set<std::string> tasklist;
3426 tasklist.insert("Running a flusher loop");
3427 tasklist.insert("Snapshotting vbucket states for the shard");
3428 tasklist.insert("Deleting VBucket");
3429 tasklist.insert("Updating stat snapshot on disk");
3430 tasklist.insert("Batching background fetch");
3431 tasklist.insert("Fetching item from disk for vkey stat");
3432 tasklist.insert("Fetching item from disk");
3433 tasklist.insert("Loading TAP backfill from disk");
3434 tasklist.insert("Tap connection notifier");
3435 tasklist.insert("Generating access log");
3436 tasklist.insert("Fetching item from disk for tap");
3437 tasklist.insert("Snapshotting vbucket states");
3438 tasklist.insert("Persisting a vbucket state for vbucket");
3439 tasklist.insert("Reaping tap or dcp connection");
3440 tasklist.insert("Warmup - initialize");
3441 tasklist.insert("Warmup - creating vbuckets");
3442 tasklist.insert("Warmup - estimate item count");
3443 tasklist.insert("Warmup - key dump");
3444 tasklist.insert("Warmup - check for access log");
3445 tasklist.insert("Warmup - loading access log");
3446 tasklist.insert("Warmup - loading KV Pairs");
3447 tasklist.insert("Warmup - loading data");
3448 tasklist.insert("Warmup - completion");
3449 tasklist.insert("Not currently running any task");
3451 std::set<std::string> statelist;
3452 statelist.insert("creating");
3453 statelist.insert("running");
3454 statelist.insert("waiting");
3455 statelist.insert("sleeping");
3456 statelist.insert("shutdown");
3457 statelist.insert("dead");
3459 std::string worker_0_task = vals["reader_worker_0:task"];
3460 unsigned pos = worker_0_task.find(":");
3461 worker_0_task = worker_0_task.substr(0, pos ? pos : worker_0_task.size());
3462 std::string worker_0_state = vals["reader_worker_0:state"];
3463 check(tasklist.find(worker_0_task)!=tasklist.end(),
3464 "worker_0's Current task incorrect");
3465 check(statelist.find(worker_0_state)!=statelist.end(),
3466 "worker_0's state incorrect");
3467 std::string worker_1_task = vals["reader_worker_1:task"];
3468 pos = worker_1_task.find(":");
3469 worker_1_task = worker_1_task.substr(0, pos ? pos : worker_1_task.size());
3470 std::string worker_1_state = vals["reader_worker_1:state"];
3471 check(tasklist.find(worker_1_task)!=tasklist.end(),
3472 "worker_1's Current task incorrect");
3473 check(statelist.find(worker_1_state)!=statelist.end(),
3474 "worker_1's state incorrect");
3476 checkeq(11, get_int_stat(h, h1, "ep_num_workers"), // cannot spawn less
3477 "Incorrect number of threads spawned");
3481 static enum test_result test_cluster_config(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3482 check(set_vbucket_state(h, h1, 1, vbucket_state_active), "Failed set vbucket 1 state.");
3483 check(verify_vbucket_state(h, h1, 1, vbucket_state_active),
3484 "VBucket state not active");
3485 uint64_t var = 1234;
3486 protocol_binary_request_header *pkt1 =
3487 createPacket(PROTOCOL_BINARY_CMD_SET_CLUSTER_CONFIG, 1, 0, NULL, 0, NULL, 0, (char*)&var, 8);
3488 checkeq(ENGINE_SUCCESS,
3489 h1->unknown_command(h, NULL, pkt1, add_response, testHarness.doc_namespace),
3490 "Failed to set cluster configuration");
3493 protocol_binary_request_header *pkt2 =
3494 createPacket(PROTOCOL_BINARY_CMD_GET_CLUSTER_CONFIG, 1, 0, NULL, 0, NULL, 0, NULL, 0);
3495 checkeq(ENGINE_SUCCESS, h1->unknown_command(h, NULL, pkt2, add_response, testHarness.doc_namespace),
3496 "Failed to get cluster configuration");
3498 if (last_body.compare(0, sizeof(var), reinterpret_cast<char*>(&var),
3499 sizeof(var)) != 0) {
3506 static enum test_result test_not_my_vbucket_with_cluster_config(ENGINE_HANDLE *h,
3507 ENGINE_HANDLE_V1 *h1) {
3508 uint64_t var = 4321;
3509 protocol_binary_request_header *pkt1 =
3510 createPacket(PROTOCOL_BINARY_CMD_SET_CLUSTER_CONFIG, 1, 0, NULL, 0, NULL, 0, (char*)&var, 8);
3511 checkeq(ENGINE_SUCCESS, h1->unknown_command(h, NULL, pkt1, add_response, testHarness.doc_namespace),
3512 "Failed to set cluster configuration");
3515 protocol_binary_request_header *pkt2 =
3516 createPacket(PROTOCOL_BINARY_CMD_GET_VBUCKET, 1, 0, NULL, 0, NULL, 0, NULL, 0);
3517 ENGINE_ERROR_CODE ret = h1->unknown_command(h, NULL, pkt2,
3519 testHarness.doc_namespace);
3520 checkeq(ENGINE_SUCCESS, ret, "Should've received not_my_vbucket/cluster config");
3522 if (last_body.compare(0, sizeof(var), reinterpret_cast<char*>(&var),
3523 sizeof(var)) != 0) {
3528 check(verify_key(h, h1, "key", 2) == ENGINE_NOT_MY_VBUCKET, "Expected miss");
3529 checkeq(ENGINE_SUCCESS,
3530 h1->get_engine_vb_map(h, NULL, vb_map_response),
3531 "Failed to recover cluster configuration");
3532 if (last_body.compare(0, sizeof(var), reinterpret_cast<char*>(&var),
3533 sizeof(var)) != 0) {
3540 static enum test_result test_all_keys_api(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3541 std::vector<std::string> keys;
3542 const int start_key_idx = 10, del_key_idx = 12, num_keys = 5,
3545 for (uint32_t i = 0; i < total_keys; ++i) {
3546 std::string key("key_" + std::to_string(i));
3547 keys.push_back(key);
3549 std::vector<std::string>::iterator it;
3550 for (it = keys.begin(); it != keys.end(); ++it) {
3552 checkeq(ENGINE_SUCCESS, store(h, h1, NULL, OPERATION_SET, it->c_str(),
3553 it->c_str(), &itm, 0, 0),
3554 "Failed to store a value");
3555 h1->release(h, NULL, itm);
3557 std::string del_key("key_" + std::to_string(del_key_idx));
3558 checkeq(ENGINE_SUCCESS, del(h, h1, del_key.c_str(), 0, 0),
3559 "Failed to delete key");
3560 wait_for_flusher_to_settle(h, h1);
3561 checkeq(total_keys - 1, get_int_stat(h, h1, "curr_items"),
3562 "Item count mismatch");
3564 std::string start_key("key_" + std::to_string(start_key_idx));
3565 const uint16_t keylen = start_key.length();
3566 uint32_t count = htonl(num_keys);
3568 protocol_binary_request_header *pkt1 =
3569 createPacket(PROTOCOL_BINARY_CMD_GET_KEYS, 0, 0,
3570 reinterpret_cast<char*>(&count),
3571 sizeof(count), start_key.c_str(), keylen, NULL, 0, 0x00);
3573 checkeq(ENGINE_SUCCESS, h1->unknown_command(h, NULL, pkt1, add_response, testHarness.doc_namespace),
3574 "Failed to get all_keys, sort: ascending");
3577 /* Check the keys. */
3579 /* Since we have one deleted key, we must go till num_keys + 1 */
3580 for (size_t i = 0; i < num_keys + 1; ++i) {
3581 if (del_key_idx == start_key_idx + i) {
3585 memcpy(&len, last_body.data() + offset, sizeof(uint16_t));
3587 checkeq(keylen, len, "Key length mismatch in all_docs response");
3588 std::string key("key_" + std::to_string(start_key_idx + i));
3589 offset += sizeof(uint16_t);
3590 checkeq(0, last_body.compare(offset, keylen, key.c_str()),
3591 "Key mismatch in all_keys response");
3598 static enum test_result test_all_keys_api_during_bucket_creation(
3599 ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3601 uint32_t count = htonl(5);
3602 const char key[] = "key_10";
3604 protocol_binary_request_header *pkt1 =
3605 createPacket(PROTOCOL_BINARY_CMD_GET_KEYS, 1, 0,
3606 reinterpret_cast<char*>(&count),
3607 sizeof(count), key, strlen(key), NULL, 0, 0x00);
3609 stop_persistence(h, h1);
3610 check(set_vbucket_state(h, h1, 1, vbucket_state_active),
3611 "Failed set vbucket 1 state.");
3613 ENGINE_ERROR_CODE err = h1->unknown_command(h, NULL, pkt1,
3615 testHarness.doc_namespace);
3617 start_persistence(h, h1);
3619 checkeq(ENGINE_SUCCESS, err,
3620 "Unexpected return code from all_keys_api");
3621 checkeq(PROTOCOL_BINARY_RESPONSE_SUCCESS, last_status.load(),
3622 "Unexpected response status");
3627 static enum test_result test_curr_items_add_set(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3630 // Verify initial case.
3631 verify_curr_items(h, h1, 0, "init");
3633 const auto initial_enqueued = get_int_stat(h, h1, "ep_total_enqueued");
3635 // Verify set and add case
3636 checkeq(ENGINE_SUCCESS,
3637 store(h, h1, NULL, OPERATION_ADD,"k1", "v1", &i),
3638 "Failed to fail to store an item.");
3639 h1->release(h, NULL, i);
3640 checkeq(ENGINE_SUCCESS,
3641 store(h, h1, NULL, OPERATION_SET,"k2", "v2", &i),
3642 "Failed to fail to store an item.");
3643 h1->release(h, NULL, i);
3644 checkeq(ENGINE_SUCCESS,
3645 store(h, h1, NULL, OPERATION_SET,"k3", "v3", &i),
3646 "Failed to fail to store an item.");
3647 h1->release(h, NULL, i);
3648 if (isPersistentBucket(h, h1) && is_full_eviction(h, h1)) {
3649 // MB-21957: FE mode - curr_items is only valid once we flush documents
3650 wait_for_flusher_to_settle(h, h1);
3652 verify_curr_items(h, h1, 3, "three items stored");
3653 checkeq(initial_enqueued + 3, get_int_stat(h, h1, "ep_total_enqueued"),
3654 "Expected total_enqueued to increase by 3 after 3 new items");
3659 static enum test_result test_curr_items_delete(ENGINE_HANDLE *h,
3660 ENGINE_HANDLE_V1 *h1) {
3661 // Verify initial case.
3662 verify_curr_items(h, h1, 0, "init");
3665 write_items(h, h1, 3);
3666 wait_for_flusher_to_settle(h, h1);
3668 // Verify delete case.
3669 checkeq(ENGINE_SUCCESS, del(h, h1, "key1", 0, 0),
3670 "Failed remove with value.");
3672 wait_for_stat_change(h, h1, "curr_items", 3);
3673 verify_curr_items(h, h1, 2, "one item deleted - persisted");
3678 static enum test_result test_curr_items_flush(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3680 // Verify initial case.
3681 verify_curr_items(h, h1, 0, "init");
3684 write_items(h, h1, 3);
3685 wait_for_flusher_to_settle(h, h1);
3687 // Verify flush case.
3688 set_degraded_mode(h, h1, nullptr, true);
3689 checkeq(ENGINE_SUCCESS, h1->flush(h, nullptr),
3691 set_degraded_mode(h, h1, nullptr, false);
3692 verify_curr_items(h, h1, 0, "flush");
3698 static enum test_result test_curr_items_dead(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3700 // Verify initial case.
3701 verify_curr_items(h, h1, 0, "init");
3704 write_items(h, h1, 3);
3705 wait_for_flusher_to_settle(h, h1);
3707 // Verify dead vbucket case.
3708 check(set_vbucket_state(h, h1, 0, vbucket_state_dead),
3709 "Failed set vbucket 0 state to dead");
3711 verify_curr_items(h, h1, 0, "dead vbucket");
3712 checkeq(0, get_int_stat(h, h1, "curr_items_tot"),
3713 "Expected curr_items_tot to be 0 with a dead vbucket");
3716 check(set_vbucket_state(h, h1, 0, vbucket_state_active),
3717 "Failed set vbucket 0 state to active");
3719 verify_curr_items(h, h1, 3, "resurrected vbucket");
3721 // Now completely delete it.
3722 check(set_vbucket_state(h, h1, 0, vbucket_state_dead),
3723 "Failed set vbucket 0 state to dead (2)");
3724 vbucketDelete(h, h1, 0);
3725 checkeq(PROTOCOL_BINARY_RESPONSE_SUCCESS, last_status.load(),
3726 "Expected success deleting vbucket.");
3727 verify_curr_items(h, h1, 0, "del vbucket");
3728 checkeq(0, get_int_stat(h, h1, "curr_items_tot"),
3729 "Expected curr_items_tot to be 0 after deleting a vbucket");
3734 static enum test_result test_value_eviction(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3735 check(set_vbucket_state(h, h1, 1, vbucket_state_active), "Failed to set vbucket state.");
3738 h1->reset_stats(h, NULL);
3739 checkeq(0, get_int_stat(h, h1, "ep_num_value_ejects"),
3740 "Expected reset stats to set ep_num_value_ejects to zero");
3741 checkeq(0, get_int_stat(h, h1, "ep_num_non_resident"),
3742 "Expected all items to be resident");
3743 checkeq(0, get_int_stat(h, h1, "vb_active_num_non_resident"),
3744 "Expected all active vbucket items to be resident");
3747 stop_persistence(h, h1);
3748 checkeq(ENGINE_SUCCESS,
3749 store(h, h1, NULL, OPERATION_SET,"k1", "v1", &i, 0, 0),
3750 "Failed to fail to store an item.");
3751 h1->release(h, NULL, i);
3752 evict_key(h, h1, "k1", 0, "Can't eject: Dirty object.", true);
3753 start_persistence(h, h1);
3754 wait_for_flusher_to_settle(h, h1);
3755 stop_persistence(h, h1);
3756 checkeq(ENGINE_SUCCESS,
3757 store(h, h1, NULL, OPERATION_SET,"k2", "v2", &i, 0, 1),
3758 "Failed to fail to store an item.");
3759 h1->release(h, NULL, i);
3760 evict_key(h, h1, "k2", 1, "Can't eject: Dirty object.", true);
3761 start_persistence(h, h1);
3762 wait_for_flusher_to_settle(h, h1);
3764 evict_key(h, h1, "k1", 0, "Ejected.");
3765 evict_key(h, h1, "k2", 1, "Ejected.");
3767 checkeq(2, get_int_stat(h, h1, "vb_active_num_non_resident"),
3768 "Expected two non-resident items for active vbuckets");
3770 evict_key(h, h1, "k1", 0, "Already ejected.");
3771 evict_key(h, h1, "k2", 1, "Already ejected.");
3773 protocol_binary_request_header *pkt = createPacket(PROTOCOL_BINARY_CMD_EVICT_KEY, 0, 0,
3774 NULL, 0, "missing-key", 11);
3775 pkt->request.vbucket = htons(0);
3777 checkeq(ENGINE_SUCCESS,
3778 h1->unknown_command(h, NULL, pkt, add_response, testHarness.doc_namespace),
3779 "Failed to evict key.");
3781 checkeq(ENGINE_SUCCESS, h1->get_stats(h, NULL, NULL, 0, add_stats),
3782 "Failed to get stats.");
3783 std::string eviction_policy = vals.find("ep_item_eviction_policy")->second;
3784 if (eviction_policy == "value_only") {
3785 checkeq(PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, last_status.load(),
3786 "expected the key to be missing...");
3788 // Note that we simply return SUCCESS when EVICT_KEY is issued to
3789 // a non-resident or non-existent key with full eviction to avoid a disk lookup.
3790 checkeq(PROTOCOL_BINARY_RESPONSE_SUCCESS, last_status.load(),
3791 "expected the success for evicting a non-existent key with full eviction");
3795 h1->reset_stats(h, NULL);
3796 checkeq(0, get_int_stat(h, h1, "ep_num_value_ejects"),
3797 "Expected reset stats to set ep_num_value_ejects to zero");
3799 check_key_value(h, h1, "k1", "v1", 2);
3800 checkeq(1, get_int_stat(h, h1, "vb_active_num_non_resident"),
3801 "Expected only one active vbucket item to be non-resident");
3803 check(set_vbucket_state(h, h1, 0, vbucket_state_replica), "Failed to set vbucket state.");
3804 check(set_vbucket_state(h, h1, 1, vbucket_state_replica), "Failed to set vbucket state.");
3805 checkeq(0, get_int_stat(h, h1, "vb_active_num_non_resident"),
3806 "Expected no non-resident items");
3811 static enum test_result test_duplicate_items_disk(ENGINE_HANDLE *h, ENGINE_HANDLE_V1 *h1) {
3812 if (!isWarmupEnabled(h, h1)) {
3816 check(set_vbucket_state(h, h1, 1, vbucket_state_active), "Failed to set vbucket state.");
3818 std::vector<std::string> keys;
3819 for (int j = 0; j < 100; ++j) {
3820 std::stringstream ss;
3822 std::string key(ss.str());
3823 keys.push_back(key);
3825 std::vector<std::string>::iterator it;
3826 for (it = keys.begin(); it != keys.end(); ++it) {
3828 checkeq(ENGINE_SUCCESS,
3829 store(h, h1, NULL, OPERATION_SET, it->c_str(), "value", &i, 0, 1),
3830 "Failed to store a value");
3831 h1->release(h, NULL, i);
3833 wait_for_flusher_to_settle(h, h1);
3835 // don't need to explicitly set the vbucket state to dead as this is
3836 // done as part of the vbucketDelete. See KVBucket::deleteVBucket
3837 int vb_del_num = get_int_stat(h, h1, "ep_vbucket_del");
3838 vbucketDelete(h, h1, 1);
3839 checkeq(PROTOCOL_BINARY_RESPONSE_SUCCESS, last_status.load(),
3840 "Failure deleting dead bucket.");
3841 check(verify_vbucket_missing(h, h1, 1),
3842 "vbucket 1 was not missing after deleting it.");
3843 // wait for the deletion to successfully complete before setting the
3844 // vbucket state active (which creates the vbucket)
3845 wait_for_stat_change(h, h1, "ep_vbucket_del", vb_del_num);
3847 check(set_vbucket_state(h, h1, 1, vbucket_state_active), "Failed to set vbucket state.");
3849 for (it = keys.begin(); it != keys.end(); ++it) {
3851 checkeq(ENGINE_SUCCESS,
3852 store(h, h1, NULL, OPERATION_SET, it->c_str(), it->c_str(), &i, 0, 1),
3853 "Failed to store a value");
3854 h1->release(h, NULL, i);
3856 wait_for_flusher_to_settle(h, h1);
3858 testHarness.reload_engine(&h, &h1,
3859 testHarness.engine_path,
3860 testHarness.get_current_testcase()->cfg,
3862 wait_for_warmup_complete(h, h1);
3863 check(set_vbucket_state(h, h1, 1, vbucket_state_active), "Failed to set vbucket state.");
3864 // Make sure that a key/value item is persisted correctly
3865 for (it = keys.begin(); it != keys.end(); ++it) {
3866 evict_key(h, h1, it->c_str(), 1, "Ejected.");
3868 for (it = keys.begin(); it != keys.end(); ++it) {
3869 check_key_value(h, h1, it->c_str(), it->data(), it->size(), 1);
3871 checkeq(0, get_int_stat(h, h1, "ep_warmup_dups"),
3872 "Expected no duplicate items from disk");
3877 static enum test_result test_disk_gt_ram_golden(ENGINE_HANDLE *h,
3878 ENGINE_HANDLE_V1 *h1) {
3879 // Check/grab initial state.
3880 const auto initial_enqueued = get_int_stat(h, h1, "ep_total_enqueued");
3881 int itemsRemoved = get_int_stat(h, h1, "ep_items_rm_from_checkpoints");
3883 // Store some data and check post-set state.
3884 wait_for_persisted_value(h, h1, "k1", "some value");
3885 testHarness.time_travel(65);
3886 wait_for_stat_change(h, h1, "ep_items_rm_from_checkpoints", itemsRemoved);
3888 checkeq(0, get_int_stat(h, h1, "ep_bg_fetched"),
3889 "Should start with zero bg fetches");
3890 checkeq((initial_enqueued + 1),
3891 get_int_stat(h, h1, "ep_total_enqueued"),
3892 "Should have additional item enqueued after store");
3893 int kv_size = get_int_stat(h, h1, "ep_kv_size");
3894 int mem_used = get_int_stat(h, h1, "mem_used");
3897 evict_key(h, h1, "k1");
3899 int kv_size2 = get_int_stat(h, h1, "ep_kv_size");
3900 int mem_used2 = get_int_stat(h, h1, "mem_used");
3902 checkgt(kv_size, kv_size2, "kv_size should have decreased after eviction");
3903 checkgt(mem_used, mem_used2, "mem_used should have decreased after eviction");
3906 check_key_value(h, h1, "k1", "some value", 10);
3908 int kv_size3 = get_int_stat(h, h1, "ep_kv_size");
3909 int mem_used3 = get_int_stat(h, h1, "mem_used");
3911 checkeq(1, get_int_stat(h, h1, "ep_bg_fetched"),
3912 "BG fetches should be one after reading an evicted key");
3913 checkeq((initial_enqueued + 1), get_int_stat(h, h1, "ep_total_enqueued"),
3914 "Item should not be marked dirty after reading an evicted key");
3916 checkeq(kv_size, kv_size3,
3917 "kv_size should have returned to initial value after restoring evicted item");
3918 checkle(mem_used, mem_used3,
3919 "mem_used should have returned to initial value (or less) after restoring evicted item");
3921 itemsRemoved = get_int_stat(h, h1, "ep_items_rm_from_checkpoints");
3922 // Delete the value and make sure things return correctly.
3923 int numStored = get_int_stat(h, h1, "ep_total_persisted");
3924 checkeq(ENGINE_SUCCESS,
3925 del(h, h1, "k1", 0, 0), "Failed remove with value.");
3926 wait_for_stat_change(h, h1, "ep_total_persisted", numStored);
3927 testHarness.time_travel(65);
3928 wait_for_stat_change(h, h1, "ep_items_rm_from_checkpoints", itemsRemoved);
3933 static enum test_result test_disk_gt_ram_paged_rm(ENGINE_HANDLE *h,
3934 ENGINE_HANDLE_V1 *h1) {
3935 // Check/grab initial state.
3936 int overhead = get_int_stat(h, h1, "ep_overhead");
3937 const auto initial_enqueued = get_int_stat(h, h1, "ep_total_enqueued");
3939 // Store some data and check post-set state.
3940 wait_for_persisted_value(h, h1, "k1", "some value");
3941 checkeq(0, get_int_stat(h, h1, "ep_bg_fetched"),
3942 "bg_fetched should initially be zero");
3943 checkeq(initial_enqueued + 1, get_int_stat(h, h1, "ep_total_enqueued"),
3944 "Expected total_enqueued to increase by 1 after storing 1 value");
3945 checkge(get_int_stat(h, h1, "ep_overhead"), overhead,
3946 "Fell below initial overhead.");
3949 evict_key(h, h1, "k1");
3951 // Delete the value and make sure things return correctly.
3952 int itemsRemoved = get_int_stat(h, h1, "ep_items_rm_from_checkpoints");
3953 int numStored = get_int_stat(h, h1, "ep_total_persisted");
3954 checkeq(ENGINE_SUCCESS,
3955 del(h, h1, "k1", 0, 0), "Failed remove with value.");
3956 wait_for_stat_change(h, h1, "ep_total_persisted", numStored);
3957 testHarness.time_travel(65);
3958 wait_for_stat_change(h, h1, "ep_items_rm_from_checkpoints", itemsRemoved);
3963 static enum test_result test_disk_gt_ram_update_paged_out(ENGINE_HANDLE *h,
3964 ENGINE_HANDLE_V1 *h1) {
3965 wait_for_persisted_value(h, h1, "k1", "some value");
3967 evict_key(h, h1, "k1");
3970 checkeq(ENGINE_SUCCESS,
3971 store(h, h1, NULL, OPERATION_SET, "k1", "new value", &i),
3972 "Failed to update an item.");
3973 h1->release(h, NULL, i);
3975 check_key_value(h, h1, "k1", "new value", 9);
3977 checkeq(0, get_int_stat(h, h1, "ep_bg_fetched"), "bg fetched something");
3982 static enum test_result test_disk_gt_ram_delete_paged_out(ENGINE_HANDLE *h,
3983 ENGINE_HANDLE_V1 *h1) {
3984 wait_for_persisted_value(h, h1, "k1", "some value");
3986 evict_key(h, h1, "k1");
3988 checkeq(ENGINE_SUCCESS,
3989 del(h, h1, "k1", 0, 0), "Failed to delete.");
3991 check(verify_key(h, h1, "k1") == ENGINE_KEY_ENOENT, "Expected miss.");
3993 cb_assert(0 == get_int_stat(h, h1, "ep_bg_fetched"));
3999 static void bg_set_thread(void *arg) {
4000 ThreadData *td(static_cast<ThreadData*>(arg));
4002 usleep(2600); // Exacerbate race condition.
4005 checkeq(ENGINE_SUCCESS,
4006 store(td->h, td->h1, NULL, OPERATION_SET,
4007 "k1", "new value", &i),