diff --git a/lib/cn/keep.c b/lib/cn/keep.c index 33279d95b..af20a3ac4 100644 --- a/lib/cn/keep.c +++ b/lib/cn/keep.c @@ -93,7 +93,7 @@ kvset_keep_vblocks( for (uint32_t j = 0; j < cnt; ++j) { vbm->vbm_blkv[nv] = kvset_get_nth_vblock_id(kvset, j); - vbm->vbm_tot += kvset_get_nth_vblock_len(kvset, j); + vbm->vbm_tot += kvset_get_nth_vblock_wlen(kvset, j); if (j == vgmap_vbidx_out_end(kvset, kvg)) { merr_t err; diff --git a/lib/cn/kvset.c b/lib/cn/kvset.c index 5d2c588f0..cd677cfc4 100644 --- a/lib/cn/kvset.c +++ b/lib/cn/kvset.c @@ -1773,11 +1773,19 @@ kvset_get_nth_vblock_id(struct kvset *ks, uint32_t index) } uint32_t -kvset_get_nth_vblock_len(struct kvset *ks, uint32_t index) +kvset_get_nth_vblock_wlen(struct kvset *ks, uint32_t index) { struct vblock_desc *vbd = lvx2vbd(ks, index); - return vbd ? vbd->vbd_len : 0; + return vbd ? vbd->vbd_wlen : 0; +} + +uint32_t +kvset_get_nth_vblock_alen(struct kvset *ks, uint32_t index) +{ + struct vblock_desc *vbd = lvx2vbd(ks, index); + + return vbd ? vbd->vbd_alen : 0; } struct vblock_desc * @@ -2297,7 +2305,7 @@ vr_start_read( /* update mblock properties */ assert(lvx2vbd(ks, vbidx)); vr->vr_mblk_dstart = lvx2vbd(ks, vbidx)->vbd_off; - vr->vr_mblk_dlen = lvx2vbd(ks, vbidx)->vbd_len; + vr->vr_mblk_dlen = lvx2vbd(ks, vbidx)->vbd_wlen; vr->vr_mbid = lvx2mbid(ks, vbidx); /* set io fields for async mblock read */ @@ -2708,7 +2716,7 @@ kvset_madvise_vblks(struct kvset *ks, int advice) for (j = 0; j < v->mbs_mblkc; j++) { struct vblock_desc *vbd = mbset_get_udata(v, j); - vbr_madvise_async(vbd, 0, vbd->vbd_len, advice, wq); + vbr_madvise_async(vbd, 0, vbd->vbd_wlen, advice, wq); } } } @@ -2730,7 +2738,7 @@ kvset_madvise_capped(struct kvset *ks, int advice) for (uint j = 0; j < v->mbs_mblkc; j++) { struct vblock_desc *vbd = mbset_get_udata(v, j); - vbr_madvise_async(vbd, 0, min_t(uint, vbd->vbd_len, vra_len), advice, wq); + vbr_madvise_async(vbd, 0, min_t(uint, vbd->vbd_wlen, vra_len), advice, wq); } } } @@ -2746,7 +2754,7 @@ kvset_madvise_vmaps(struct kvset *ks, int advice) for (uint j = 0; j < v->mbs_mblkc; j++) { struct vblock_desc *vbd = mbset_get_udata(v, j); - vbr_madvise(vbd, 0, vbd->vbd_len, advice); + vbr_madvise(vbd, 0, vbd->vbd_wlen, advice); } } } diff --git a/lib/cn/kvset.h b/lib/cn/kvset.h index 1aa159da3..1e289114e 100644 --- a/lib/cn/kvset.h +++ b/lib/cn/kvset.h @@ -351,11 +351,18 @@ void kvset_set_rule(struct kvset *ks, enum cn_rule rule); /** - * kvset_get_nth_vblock_len() - Get len of useful data in nth vblock + * Get len of useful written data in nth vblock */ /* MTF_MOCK */ uint32_t -kvset_get_nth_vblock_len(struct kvset *km, uint32_t index); +kvset_get_nth_vblock_wlen(struct kvset *km, uint32_t index); + +/** + * Get len of useful allocated data in nth vblock + */ +/* MTF_MOCK */ +uint32_t +kvset_get_nth_vblock_alen(struct kvset *km, uint32_t index); /* MTF_MOCK */ void diff --git a/lib/cn/kvset_split.c b/lib/cn/kvset_split.c index e7f6c5837..447e7469c 100644 --- a/lib/cn/kvset_split.c +++ b/lib/cn/kvset_split.c @@ -3,22 +3,20 @@ * Copyright (C) 2022 Micron Technology, Inc. All rights reserved. */ +#include #include -#include -#include -#include -#include -#include - -#include - #include #include #include #include - +#include +#include #include +#include +#include +#include +#include #include "kvs_mblk_desc.h" #include "kvset.h" @@ -399,13 +397,19 @@ kblocks_split( return err; } +struct vgroup_split_metadata { + bool overlaps; /**< Whether the vgroup overlaps the split key. */ + uint16_t vblk_idx; /**< Where the left kvset's vblocks end and the right kvset's vblocks begin */ + off_t offset; /**< Offset into the vblock where the access first occurs */ +}; + /** * Return a split vblock index for the specified range of vblocks [start, end] by comparing * the min/max keys stored in a vblock footer against the split key. * * Return values: * v >= start and overlap = false: left: [start, v - 1], right [v, end] - * v >= start and overlap = true : left: [start, v], right [clone(v), end] + * v >= start and overlap = true : left: [start, punched(v)], right [clone(v), end] * * NOTES: * v = start and overlap = false: All vblocks go to the right @@ -422,13 +426,13 @@ get_vblk_split_index( uint16_t v; INVARIANT(ks && split_key && overlap); + INVARIANT(start <= end && end < kvset_get_num_vblocks(ks)); *overlap = false; - assert(start <= end && end < kvset_get_num_vblocks(ks)); for (v = start; v <= end; v++) { - struct key_obj min_key = { 0 }; - struct vblock_desc *vbd = kvset_get_nth_vblock_desc(ks, v); + struct key_obj min_key; + const struct vblock_desc *vbd = kvset_get_nth_vblock_desc(ks, v); key2kobj(&min_key, vbd->vbd_mblkdesc->map_base + vbd->vbd_min_koff, vbd->vbd_min_klen); @@ -437,8 +441,8 @@ get_vblk_split_index( } if (v > start) { - struct key_obj max_key = { 0 }; - struct vblock_desc *vbd = kvset_get_nth_vblock_desc(ks, v - 1); + struct key_obj max_key; + const struct vblock_desc *vbd = kvset_get_nth_vblock_desc(ks, v - 1); key2kobj(&max_key, vbd->vbd_mblkdesc->map_base + vbd->vbd_max_koff, vbd->vbd_max_klen); @@ -451,6 +455,120 @@ get_vblk_split_index( return v; } +static void +find_max_key_among_overlapping_vblocks( + const uint32_t nvgroups, + const struct vgroup_split_metadata *metadatav, + struct kvset *const ks, + struct key_obj *max_key) +{ + for (uint32_t i = 0; i < nvgroups; i++) { + struct key_obj curr_key = { 0 }; + const struct vblock_desc *vbd; + const struct vgroup_split_metadata *metadata = metadatav + i; + + if (!metadata->overlaps) + continue; + + vbd = kvset_get_nth_vblock_desc(ks, metadata->vblk_idx); + + key2kobj(&curr_key, vbd->vbd_mblkdesc->map_base + vbd->vbd_max_koff, vbd->vbd_max_klen); + + if (key_obj_cmp(max_key, &curr_key) < 0) + *max_key = curr_key; + } +} + +static merr_t +mark_vgroup_accesses( + const uint32_t nvgroups, + struct vgroup_split_metadata *metadatav, + struct kvset *const ks, + const struct key_obj *split_key, + const struct key_obj *max_key) +{ + merr_t err; + bool first = true; + uint32_t accesses = 0; + struct kv_iterator *iter; + + INVARIANT(key_obj_cmp(split_key, max_key) <= 0); + + err = kvset_iter_create(ks, NULL, NULL, NULL, kvset_iter_flag_mmap, &iter); + if (ev(err)) + return err; + + err = kvset_iter_seek(iter, split_key->ko_sfx, split_key->ko_sfx_len, NULL); + if (ev(err)) + goto out; + + while (true) { + uint vlen; + uint vbidx; + uint vboff; + uint complen; + uint64_t seqno; + const void *vdata; + enum kmd_vtype vtype; + struct key_obj curr_key; + struct kvset_iter_vctx vc; + + if (iter->kvi_eof) + break; + + err = kvset_iter_next_key(iter, &curr_key, &vc); + if (ev(err)) + goto out; + + /* In the event that this kvset contains the split key, skip it. */ + if (first) { + first = false; + if (key_obj_cmp(&curr_key, split_key) == 0) + continue; + } + + if (key_obj_cmp(&curr_key, max_key) > 0) + break; + + while (kvset_iter_next_vref(iter, &vc, &seqno, &vtype, &vbidx, &vboff, &vdata, &vlen, + &complen)) { + uint64_t vgidx; + const struct vblock_desc *vbd; + struct vgroup_split_metadata *metadata; + + switch (vtype) { + case VTYPE_UCVAL: + case VTYPE_CVAL: + vbd = kvset_get_nth_vblock_desc(ks, vbidx); + /* TODO: Why do I need the -1? vblock_reader.c always adds 1 for + * some reason that doesn't make sense to me. + */ + vgidx = atomic_read(&vbd->vbd_vgidx) - 1; + assert(vgidx < nvgroups); + metadata = metadatav + vgidx; + if (metadata->offset == -1) { + metadata->offset = vboff; + + /* Exit because we have marked the offset for all vgroups */ + if (++accesses == nvgroups) + goto out; + } + /* fallthrough */ + case VTYPE_IVAL: + case VTYPE_ZVAL: + case VTYPE_TOMB: + case VTYPE_PTOMB: + continue; + } + } + } + +out: + kvset_iter_release(iter); + + return err; +} + /** * @vbidx_left, @vbidx_right - tracks vblock index for the left and right kvsets * @vgidx_left, @vgidx_right - tracks vgroup index for the left and right kvsets @@ -464,6 +582,8 @@ vblocks_split( struct perfc_set *pc, struct kvset_split_res *result) { + struct key_obj max_key; + struct vgroup_split_metadata *metadatav; struct vgmap *vgmap_src = ks->ks_vgmap; struct vgmap *vgmap_left = work[LEFT].vgmap; struct vgmap *vgmap_right = work[RIGHT].vgmap; @@ -472,113 +592,188 @@ vblocks_split( uint16_t vbidx_left = 0, vbidx_right = 0; uint32_t vgidx_left = 0, vgidx_right = 0; uint32_t nvgroups = kvset_get_vgroups(ks), perfc_rwc = 0; - bool move_left = (blks_right->kblks.idc == 0); - bool move_right = (blks_left->kblks.idc == 0); + const bool move_left = (blks_right->kblks.idc == 0); + const bool move_right = (blks_left->kblks.idc == 0); uint64_t perfc_rwb = 0; merr_t err; + log_debug("splitting"); + if (move_left && move_right) { assert(nvgroups == 0); return 0; } + metadatav = calloc(nvgroups, sizeof(*metadatav)); + if (ev(!metadatav)) + return merr(ENOMEM); for (uint32_t i = 0; i < nvgroups; i++) { - uint16_t src_start, src_end, src_split, end; - uint32_t vbcnt = 0; - bool overlap = false; + /* Negative offset implies overlapping vblock was not accessed */ + metadatav[i].offset = -1; + } - /* Per vgroup start and end output vblock index in the source kvset - */ - src_start = vgmap_vbidx_out_start(ks, i); - src_end = vgmap_vbidx_out_end(ks, i); + for (uint32_t i = 0; i < nvgroups; i++) { + uint16_t start, end; + struct vgroup_split_metadata *metadata = metadatav + i; + + /* Per vgroup start and end output vblock index in the source kvset */ + start = vgmap_vbidx_out_start(ks, i); + end = vgmap_vbidx_out_end(ks, i); if (move_left || move_right) { - /* If all the kblocks are on one side then all the vblocks can be safely moved - * to the same side - */ - src_split = move_right ? src_start : src_end + 1; - assert(!overlap); + metadata->vblk_idx = move_right ? start : end; + metadata->overlaps = false; } else { - src_split = get_vblk_split_index(ks, src_start, src_end, split_key, &overlap); + metadata->vblk_idx = get_vblk_split_index(ks, start, end, split_key, + &metadata->overlaps); } - assert(src_split >= src_start && src_split <= src_end + 1); + assert(metadata->vblk_idx >= start && metadata->vblk_idx <= end + 1); + } - /* Add vblocks in [src_start, end - 1] to the left kvset - */ - end = overlap ? src_split + 1 : src_split; - for (uint16_t j = src_start; j < end; j++) { - err = blk_list_append(&blks_left->vblks, kvset_get_nth_vblock_id(ks, j)); - if (err) - return err; + max_key = *split_key; + find_max_key_among_overlapping_vblocks(nvgroups, metadatav, ks, &max_key); + + err = mark_vgroup_accesses(nvgroups, metadatav, ks, split_key, &max_key); + if (ev(err)) + goto out; + + for (uint32_t i = 0; i < nvgroups; i++) { + uint32_t vbcnt = 0; + bool overlapping_access; + uint16_t split, start, end, boundary; + const struct vgroup_split_metadata *metadata = metadatav + i; + + overlapping_access = metadata->overlaps && metadata->offset >= 0; + + /* Per vgroup start and end output vblock index in the source kvset */ + start = vgmap_vbidx_out_start(ks, i); + end = vgmap_vbidx_out_end(ks, i); + split = metadata->vblk_idx; + + log_debug("start=%u end=%u split=%u overlaps=%d offset=%jd overlapping_access=%d", start, end, split, metadata->overlaps, metadata->offset, overlapping_access); + + /* Add the vblocks in [boundary, end] to the right kvset */ + boundary = split; + for (uint16_t j = boundary; j <= end; j++) { + uint32_t alen; + uint64_t mbid; + + mbid = kvset_get_nth_vblock_id(ks, j); + + if (j == split && overlapping_access) { + off_t off; + uint64_t clone_mbid; + struct mblock_props props; + + off = metadata->offset; + off = off < PAGE_SIZE ? 0 : roundup(off - PAGE_SIZE, PAGE_SIZE); + + err = mpool_mblock_clone(ks->ks_mp, mbid, off, 0, &clone_mbid); + if (!err) { + err = blk_list_append(&blks_right->vblks, clone_mbid); + if (!err) + err = blk_list_append(result->ks[RIGHT].blks_commit, clone_mbid); + } + + if (err) + goto out; + + log_debug("Cloned mblock (0x%" PRIx64 ") starting at offset %jd", mbid, off); + + err = mpool_mblock_props_get(ks->ks_mp, clone_mbid, &props); + if (ev(err)) + goto out; + + perfc_rwc++; + if (perfc_ison(pc, PERFC_RA_CNCOMP_RBYTES) || + perfc_ison(pc, PERFC_RA_CNCOMP_WBYTES)) + perfc_rwb += props.mpr_write_len - off; + + alen = props.mpr_alloc_cap - VBLOCK_FOOTER_LEN; + } else { + err = blk_list_append(&blks_right->vblks, mbid); + if (err) + goto out; + + alen = kvset_get_nth_vblock_alen(ks, j); + } vbcnt++; - blks_left->bl_vtotal += kvset_get_nth_vblock_len(ks, j); + blks_right->bl_vtotal += alen; } if (vbcnt > 0) { - vbidx_left += vbcnt; + vbidx_right += vbcnt; - err = vgmap_vbidx_set(vgmap_src, end - 1, vgmap_left, vbidx_left - 1, vgidx_left); + err = vgmap_vbidx_set(vgmap_src, end, vgmap_right, vbidx_right - 1, vgidx_right); if (err) - return err; + goto out; - vgidx_left++; + vgidx_right++; } + /* Add vblocks in [start, boundary] to the left kvset + */ vbcnt = 0; /* reset vbcnt for the right kvset */ - if (overlap) { - /* Append a clone of the overlapping vblock to the right kvset */ - const uint64_t src_mbid = kvset_get_nth_vblock_id(ks, src_split); - uint64_t clone_mbid; - - err = mpool_mblock_clone(ks->ks_mp, src_mbid, 0, 0, &clone_mbid); - if (!err) { - err = blk_list_append(&blks_right->vblks, clone_mbid); - if (!err) - err = blk_list_append(result->ks[RIGHT].blks_commit, clone_mbid); - } + boundary = overlapping_access ? split : split - 1; + for (uint16_t j = start; j <= boundary; j++) { + uint32_t alen; + uint64_t mbid; - if (err) - return err; + mbid = kvset_get_nth_vblock_id(ks, j); - perfc_rwc++; - if (perfc_ison(pc, PERFC_RA_CNCOMP_RBYTES) || perfc_ison(pc, PERFC_RA_CNCOMP_WBYTES)) { + if (j == split) { + off_t off; + uint32_t wlen; struct mblock_props props; - err = mpool_mblock_props_get(ks->ks_mp, src_mbid, &props); - if (!ev(err)) - perfc_rwb += props.mpr_write_len; - else - err = 0; - } + /* Offset must be page aligned. Punching the rest of the vblock + * from the page aligned offset up to the vblock footer. + */ + off = roundup(metadata->offset, PAGE_SIZE); + wlen = kvset_get_nth_vblock_wlen(ks, j) - off; - vbcnt++; - blks_right->bl_vtotal += kvset_get_nth_vblock_len(ks, src_split); - src_split++; - } + err = mpool_mblock_punch(ks->ks_mp, mbid, off, wlen); + if (ev(err)) + goto out; - /* Add the remaining vblocks in [src_split, src_end] to the right kvset - */ - for (uint16_t j = src_split; j <= src_end; j++) { - err = blk_list_append(&blks_right->vblks, kvset_get_nth_vblock_id(ks, j)); - if (err) - return err; + log_debug("Punched mblock (0x%" PRIx64 ") starting at offset %jd for %u bytes", + mbid, off, wlen); + + err = mpool_mblock_props_get(ks->ks_mp, mbid, &props); + if (ev(err)) + goto out; + + alen = props.mpr_alloc_cap - VBLOCK_FOOTER_LEN; + } else { + alen = kvset_get_nth_vblock_alen(ks, j); + } + + err = blk_list_append(&blks_left->vblks, mbid); + if (ev(err)) + goto out; vbcnt++; - blks_right->bl_vtotal += kvset_get_nth_vblock_len(ks, j); + blks_left->bl_vtotal += alen; } if (vbcnt > 0) { - vbidx_right += vbcnt; + vbidx_left += vbcnt; - err = vgmap_vbidx_set(vgmap_src, src_end, vgmap_right, vbidx_right - 1, vgidx_right); + err = vgmap_vbidx_set(vgmap_src, boundary, vgmap_left, vbidx_left - 1, vgidx_left); if (err) - return err; + goto out; - vgidx_right++; + vgidx_left++; } } + /* Sanity check, so that we don't fall into these asserts elsewhere later + * on. + */ + assert(blks_left->bl_vtotal >= blks_left->bl_vused); + assert(blks_right->bl_vtotal >= blks_right->bl_vused); + if (nvgroups > 0) { assert(vgidx_left <= nvgroups); assert(vgidx_right <= nvgroups); @@ -593,7 +788,10 @@ vblocks_split( perfc_add2(pc, PERFC_RA_CNCOMP_WREQS, perfc_rwc, PERFC_RA_CNCOMP_WBYTES, perfc_rwb); } - return 0; +out: + free(metadatav); + + return err; } /** diff --git a/lib/cn/vblock_reader.c b/lib/cn/vblock_reader.c index d998fcafe..7cd9b34b3 100644 --- a/lib/cn/vblock_reader.c +++ b/lib/cn/vblock_reader.c @@ -28,9 +28,10 @@ vbr_desc_read( struct vblock_desc *vblk_desc) { struct vblock_footer_omf *footer; - uint32_t wlen; + uint32_t wlen, alen; wlen = mblk->wlen_pages * PAGE_SIZE; + alen = mblk->alen_pages * PAGE_SIZE; footer = mblk->map_base + wlen - VBLOCK_FOOTER_LEN; if (ev(omf_vbf_magic(footer) != VBLOCK_FOOTER_MAGIC)) @@ -43,9 +44,10 @@ vbr_desc_read( vblk_desc->vbd_mblkdesc = mblk; vblk_desc->vbd_off = 0; - vblk_desc->vbd_len = wlen - VBLOCK_FOOTER_LEN; + vblk_desc->vbd_wlen = wlen - VBLOCK_FOOTER_LEN; + vblk_desc->vbd_alen = alen - VBLOCK_FOOTER_LEN; vblk_desc->vbd_vgroup = omf_vbf_vgroup(footer); - vblk_desc->vbd_min_koff = vblk_desc->vbd_len + VBLOCK_FOOTER_LEN - (2 * HSE_KVS_KEY_LEN_MAX); + vblk_desc->vbd_min_koff = vblk_desc->vbd_wlen + VBLOCK_FOOTER_LEN - (2 * HSE_KVS_KEY_LEN_MAX); vblk_desc->vbd_min_klen = omf_vbf_min_klen(footer); vblk_desc->vbd_max_koff = vblk_desc->vbd_min_koff + HSE_KVS_KEY_LEN_MAX; vblk_desc->vbd_max_klen = omf_vbf_max_klen(footer); @@ -156,7 +158,7 @@ vbr_readahead( end = (bkt + 2) * ra_len; if (rah->vgidx == vgidx && bkt - 1 == rah->bkt) voff = (bkt + 1) * ra_len; - if (voff >= vbd->vbd_len) + if (voff >= vbd->vbd_wlen) return; voff &= PAGE_MASK; } @@ -164,8 +166,8 @@ vbr_readahead( willneed: ra_len = end - voff; - if (voff + ra_len > vbd->vbd_len) - ra_len = vbd->vbd_len - voff; + if (voff + ra_len > vbd->vbd_wlen) + ra_len = vbd->vbd_wlen - voff; rah->vgidx = vgidx; rah->bkt = bkt; @@ -230,6 +232,6 @@ void * vbr_value(struct vblock_desc *vbd, uint vboff, uint vlen) { assert(vbd->vbd_mblkdesc->map_base); - assert(vboff + vlen <= vbd->vbd_len); + assert(vboff + vlen <= vbd->vbd_wlen); return vbd->vbd_mblkdesc->map_base + vbd->vbd_off + vboff; } diff --git a/lib/cn/vblock_reader.h b/lib/cn/vblock_reader.h index 0157c11a2..fde8a8fff 100644 --- a/lib/cn/vblock_reader.h +++ b/lib/cn/vblock_reader.h @@ -51,7 +51,8 @@ struct vbr_madvise_work { struct vblock_desc { const struct kvs_mblk_desc *vbd_mblkdesc; /* underlying block descriptor */ uint32_t vbd_off; /* byte offset of vblock data (always 0!) */ - uint32_t vbd_len; /* byte length of vblock data (not including footer) */ + uint32_t vbd_wlen; /* byte length of written vblock data (not including footer) */ + uint32_t vbd_alen; /* byte length of allocated vblock data (not including footer) */ uint32_t vbd_min_koff; /* min key offset */ uint32_t vbd_max_koff; /* max key offset */ uint16_t vbd_min_klen; /* min key length */ diff --git a/tests/mocks/repository/lib/mock_kvset.c b/tests/mocks/repository/lib/mock_kvset.c index b47e1f55f..56881a5ff 100644 --- a/tests/mocks/repository/lib/mock_kvset.c +++ b/tests/mocks/repository/lib/mock_kvset.c @@ -367,7 +367,7 @@ _kvset_get_nth_vblock_id(struct kvset *kvset, uint32_t index) } static uint32_t -_kvset_get_nth_vblock_len(struct kvset *kvset, uint32_t index) +_kvset_get_nth_vblock_wlen(struct kvset *kvset, uint32_t index) { struct mock_kvset *mk = (void *)kvset; struct kvdata * iterv = mk->iter_data; @@ -384,6 +384,12 @@ _kvset_get_nth_vblock_len(struct kvset *kvset, uint32_t index) return vcnt * sizeof(int); } +static uint32_t +_kvset_get_nth_vblock_alen(struct kvset *kvset, uint32_t index) +{ + return _kvset_get_nth_vblock_wlen(kvset, index); +} + static uint64_t _kvset_get_nodeid(const struct kvset *kvset) { @@ -729,7 +735,8 @@ mock_kvset_set(void) MOCK_SET(kvset, _kvset_open); MOCK_SET(kvset, _kvset_set_work); MOCK_SET(kvset, _kvset_get_work); - MOCK_SET(kvset, _kvset_get_nth_vblock_len); + MOCK_SET(kvset, _kvset_get_nth_vblock_wlen); + MOCK_SET(kvset, _kvset_get_nth_vblock_alen); MOCK_SET(kvset, _kvset_list_add); MOCK_SET(kvset, _kvset_list_add_tail); MOCK_SET(kvset, _kvset_get_ref); @@ -763,7 +770,7 @@ mock_kvset_unset(void) mapi_inject_list_unset(inject_list); MOCK_UNSET(kvset, _kvset_open); - MOCK_UNSET(kvset, _kvset_get_nth_vblock_len); + MOCK_UNSET(kvset, _kvset_get_nth_vblock_wlen); MOCK_UNSET(kvset, _kvset_list_add); MOCK_UNSET(kvset, _kvset_list_add_tail); MOCK_UNSET(kvset, _kvset_get_ref); diff --git a/tests/unit/cn/cn_tree_test.c b/tests/unit/cn/cn_tree_test.c index 1f3803194..68e1ceba6 100644 --- a/tests/unit/cn/cn_tree_test.c +++ b/tests/unit/cn/cn_tree_test.c @@ -344,7 +344,8 @@ struct mapi_injection inject_list[] = { { mapi_idx_kvset_get_hblock_id, MAPI_RC_SCALAR, 0xabc001 }, { mapi_idx_kvset_get_nth_kblock_id, MAPI_RC_SCALAR, 0xabc002 }, { mapi_idx_kvset_get_nth_vblock_id, MAPI_RC_SCALAR, 0xabc003 }, - { mapi_idx_kvset_get_nth_vblock_len, MAPI_RC_SCALAR, 128 * 1024 }, + { mapi_idx_kvset_get_nth_vblock_wlen, MAPI_RC_SCALAR, 128 * 1024 }, + { mapi_idx_kvset_get_nth_vblock_alen, MAPI_RC_SCALAR, 128 * 1024 }, /* we need kvset_iter_create, but we should never * need the guts of an iterator b/c we mock diff --git a/tests/unit/cn/vblock_reader_test.c b/tests/unit/cn/vblock_reader_test.c index ab07195a4..231de1c12 100644 --- a/tests/unit/cn/vblock_reader_test.c +++ b/tests/unit/cn/vblock_reader_test.c @@ -134,7 +134,7 @@ MTF_DEFINE_UTEST_PRE(vblock_reader_test, t_vbr_desc_read, pre) ASSERT_EQ(17, vblk_desc.vbd_min_klen); ASSERT_EQ(18, vblk_desc.vbd_max_klen); ASSERT_EQ(19, vblk_desc.vbd_vgroup); - ASSERT_EQ(ALIGN(value_bytes, PAGE_SIZE), vblk_desc.vbd_len); + ASSERT_EQ(ALIGN(value_bytes, PAGE_SIZE), vblk_desc.vbd_wlen); ASSERT_EQ(1, atomic_read(&vblk_desc.vbd_vgidx)); }