1#ifndef BMXORFUNC__H__INCLUDED__
2#define BMXORFUNC__H__INCLUDED__
104 unsigned gap_count = 1;
105 unsigned bit_count = 0;
108 w = w0 = *block ^ *xor_block;
111 const int w_shift = int(
sizeof(w) * 8 - 1);
114 gap_count -= (w_prev = (w0 >> w_shift));
117 for (++block, ++xor_block; block < block_end; ++block, ++xor_block)
119 w = w0 = *block ^ *xor_block;
124 gap_count -= !w_prev;
133 gap_count -= (w0 >> w_shift);
134 gap_count -= !(w_prev ^ w_l);
136 w_prev = (w0 >> w_shift);
159 unsigned gap_count = 1;
160 unsigned bit_count = 0;
166 w = w0 = *block ^ *xor_block;
169 const int w_shift = int(
sizeof(w) * 8 - 1);
172 gap_count -= unsigned(w_prev = (w0 >> w_shift));
174 const bm::id64_t* block_end = block + (size/2);
175 for (++block, ++xor_block; block < block_end; ++block, ++xor_block)
177 w = w0 = *block ^ *xor_block;
182 gap_count -= !w_prev;
190 gap_count -= unsigned(w0 >> w_shift);
191 gap_count -= !(w_prev ^ w_l);
192 w_prev = (w0 >> w_shift);
213#ifdef VECT_BLOCK_XOR_CHANGE
305 if (
ref_idx[i] != bmc.ref_idx[i])
309 if (
xor_d64[i] != bmc.xor_d64[i])
325template<
typename PVT,
typename VT>
326typename VT::size_type
329 typename VT::size_type best_ref_idx,
334 match_pairs_vect.resize(0);
340 typename VT::size_type sz = match_vect.size();
341 for (
typename VT::size_type i = 0; (i < sz) && (d64_acc != ~0ull); ++i)
344 if (xmd.
ref_idx == best_ref_idx)
360 const unsigned min_gain_cut_off = 50;
361 for (
typename VT::size_type i = 0; (i < sz) && (d64_acc != ~0ull); ++i)
372 if (xmd.
gc_gain > min_gain_cut_off)
373 d64_new = ~~d64_acc & xmd.
gc_d64;
376 if (xmd.
bc_gain > min_gain_cut_off)
377 d64_new = ~~d64_acc & xmd.
bc_d64;
380 if (xmd.
ibc_gain > min_gain_cut_off)
381 d64_new = ~~d64_acc & xmd.
ibc_d64;
395 return match_pairs_vect.size();
403template<
typename BMChain,
typename RVect>
407 for (
size_t i = 0; i < mchain.chain_size; ++i)
410 ridx = ref_vect.get_row_idx(ridx);
443 #if defined(VECT_BLOCK_CHANGE)
455 x_descr.sb_bc[i] = (
unsigned short) bc;
464 x_descr.sb_gc[i] = (
unsigned short) gc;
493#ifdef VECT_BIT_BLOCK_XOR
508 for ( ;sub_block < sub_block_end; )
510 t_sub_block[0] = sub_block[0] ^ xor_sub_block[0];
511 t_sub_block[1] = sub_block[1] ^ xor_sub_block[1];
512 t_sub_block[2] = sub_block[2] ^ xor_sub_block[2];
513 t_sub_block[3] = sub_block[3] ^ xor_sub_block[3];
514 t_sub_block+=4; sub_block+=4; xor_sub_block+=4;
519 for (; sub_block < sub_block_end; t_sub_block+=4, sub_block+=4)
521 t_sub_block[0] = sub_block[0];
522 t_sub_block[1] = sub_block[1];
523 t_sub_block[2] = sub_block[2];
524 t_sub_block[3] = sub_block[3];
533 const bm::word_t* xor_sub_block = xor_block + off;
534 for (; sub_block < sub_block_end; )
536 t_sub_block[0] = sub_block[0] ^ xor_sub_block[0];
537 t_sub_block[1] = sub_block[1] ^ xor_sub_block[1];
538 t_sub_block[2] = sub_block[2] ^ xor_sub_block[2];
539 t_sub_block[3] = sub_block[3] ^ xor_sub_block[3];
540 t_sub_block+=4; sub_block+=4; xor_sub_block+=4;
545 for (; sub_block < sub_block_end; t_sub_block+=4, sub_block+=4)
547 t_sub_block[0] = sub_block[0];
548 t_sub_block[1] = sub_block[1];
549 t_sub_block[2] = sub_block[2];
550 t_sub_block[3] = sub_block[3];
576 #ifdef VECT_BIT_BLOCK_XOR_2WAY
589 for (; t_sub_block < t_sub_block_end; t_sub_block+=4, xor_sub_block+=4)
591 t_sub_block[0] ^= xor_sub_block[0];
592 t_sub_block[1] ^= xor_sub_block[1];
593 t_sub_block[2] ^= xor_sub_block[2];
594 t_sub_block[3] ^= xor_sub_block[3];
597 const bm::word_t* xor_sub_block = xor_block + off;
600 for (; t_sub_block < t_sub_block_end; t_sub_block+=4, xor_sub_block+=4)
602 t_sub_block[0] ^= xor_sub_block[0];
603 t_sub_block[1] ^= xor_sub_block[1];
604 t_sub_block[2] ^= xor_sub_block[2];
605 t_sub_block[3] ^= xor_sub_block[3];
635 bm::dynamic_heap_matrix<block_match_chain_type, bv_allocator_type>
707 bv_blocks.optimize(tb);
715 template<
class BMATR>
725 template<
typename BMATR>
750 matr.resize(
ref_bvects_.size(), total_blocks,
false );
800 bm::dynamic_heap_matrix<block_match_chain_type, bv_allocator_type>
827 typedef bm::heap_vector<bm::block_xor_match_descr, bv_allocator_type, true>
829 typedef bm::heap_vector<bm::match_pair, bv_allocator_type, true>
833 typedef bm::heap_vector<bm::word_t*, bv_allocator_type, true>
835 typedef bm::heap_vector<unsigned, bv_allocator_type, true>
838 typedef bm::heap_vector<bm::block_waves_xor_descr, bv_allocator_type, true>
851 { ref_vect_ = ref_vect; }
854 {
return *ref_vect_; }
874 unsigned i,
unsigned j,
947 {
return x_block_mtype_; }
950 {
return found_block_xor_; }
957 {
return s_block_best_metric_; }
967 {
return match_vect_; }
970 {
return chain_match_vect_; }
976 {
return ref_vect_->
get_bv(ri)->get_blocks_manager().get_block_ptr(i, j); }
1015 unsigned s_block_best_metric_;
1017 unsigned x_best_metric_;
1023 const
bm::
word_t* found_block_xor_;
1044template<typename BV>
1047 x_descr_ = nb_xdescr_vect_[ri];
1048 s_gc_ = nb_gc_vect_[ri];
1049 s_bc_ = nb_bc_vect_[ri];
1051 x_block_mtype_ =
best_metric(s_bc_, s_gc_, &s_block_best_metric_);
1052 x_best_metric_ = s_block_best_metric_;
1056template<
typename BV>
1064 x_block_mtype_ =
best_metric(s_bc_, s_gc_, &s_block_best_metric_);
1065 x_best_metric_ = s_block_best_metric_;
1069template<
typename BV>
1079 xmd.xor_gc = xmd.xor_bc = 0;
1087 const bm::word_t* xor_sub_block = xor_block + off;
1089 unsigned xor_gc, xor_bc;
1093 x_descr.sb_xor_bc[i] = (
unsigned short)xor_bc;
1097 bm::word_t w_l = (sub_block[-1] ^ xor_sub_block[-1]);
1098 bm::word_t w_r = (sub_block[0] ^ xor_sub_block[0]) & 1;
1100 xor_gc -= (w_l == w_r);
1102 x_descr.sb_xor_gc[i] = (
unsigned short)xor_gc;
1104 xmd.xor_bc += xor_bc;
1105 xmd.xor_gc += xor_gc;
1111 unsigned block_gc_gain(0), block_bc_gain(0), block_ibc_gain(0);
1112 bm::id64_t gc_digest(0), bc_digest(0), ibc_digest(0);
1121 unsigned xor_gc = x_descr.sb_xor_gc[i];
1125 block_gc_gain += x_descr.sb_gc[i];
1127 else if (xor_gc < x_descr.sb_gc[i])
1130 block_gc_gain += (x_descr.sb_gc[i] - xor_gc);
1132 unsigned xor_bc = x_descr.sb_xor_bc[i];
1133 if (xor_bc < x_descr.sb_bc[i])
1136 block_bc_gain += (x_descr.sb_bc[i] - xor_bc);
1138 unsigned xor_ibc = wave_max_bits - xor_bc;
1139 unsigned wave_ibc = wave_max_bits - x_descr.sb_bc[i];
1140 if (xor_ibc < wave_ibc)
1142 ibc_digest |= dmask;
1143 block_ibc_gain += (wave_ibc - xor_ibc);
1151 xmd.gc_d64 = gc_digest;
1152 xmd.bc_d64 = bc_digest;
1153 xmd.ibc_d64 = ibc_digest;
1155 xmd.gc_gain = block_gc_gain;
1156 xmd.bc_gain = block_bc_gain;
1157 xmd.ibc_gain = block_ibc_gain;
1162 if (!(block_gc_gain | block_bc_gain | block_ibc_gain))
1176 xmd.block_gain = 0; xmd.xor_d64 = 0;
1180 int new_gc = int(s_gc_) - int(block_gc_gain);
1183 int new_bc = int(s_bc_) - int(block_bc_gain);
1192 xmd.match_type =
best_metric(
unsigned(new_bc),
unsigned(new_gc), &best_m);
1193 switch (xmd.match_type)
1196 if (new_ibc < new_gc)
1198 xmd.block_gain = block_ibc_gain; xmd.xor_d64 = ibc_digest;
1202 xmd.block_gain = block_gc_gain; xmd.xor_d64 = gc_digest;
1206 if (new_ibc < new_bc)
1208 xmd.block_gain = block_ibc_gain; xmd.xor_d64 = ibc_digest;
1212 xmd.block_gain = block_bc_gain; xmd.xor_d64 = bc_digest;
1216 xmd.block_gain = block_ibc_gain; xmd.xor_d64 = ibc_digest;
1225 if (block_gc_gain >= block_bc_gain && block_gc_gain >= block_ibc_gain)
1227 xmd.block_gain = block_gc_gain; xmd.xor_d64 = gc_digest;
1230 if (block_bc_gain > block_gc_gain && block_bc_gain > block_ibc_gain)
1232 xmd.block_gain = block_bc_gain; xmd.xor_d64 = bc_digest;
1236 xmd.block_gain = block_ibc_gain; xmd.xor_d64 = ibc_digest;
1244template<
typename BV>
1250 unsigned i,
unsigned j,
1258 if (ridx_to > ref_vect_->
size())
1259 ridx_to = ref_vect_->
size();
1263 found_block_xor_ = 0;
1265 unsigned best_block_gain = 0;
1268 match_vect_.resize(0);
1278 s_block = nb_blocks_vect_.at(s_ri);
1289 for (
size_type ri = ridx_from; ri < ridx_to; ++ri, ++depth)
1303 unsigned gc_diff = r_gc - s_gc;
1304 if (gc_diff >= s_gc)
1309 if (nb_blocks_vect_.size() > ri)
1310 ref_block = nb_blocks_vect_[ri];
1326 BM_ASSERT(x_best_metric_ <= s_block_best_metric_);
1337 float(xmd.
block_gain) / float(s_block_best_metric_);
1348 match_vect_.push_back(xmd);
1358 const float bie_bits_per_int = 3.0f;
1359 const unsigned bie_limit =
1362 unsigned xor_bc, xor_gc;
1366 ref_block = nb_blocks_vect_[
size_type(best_ri)];
1367 found_block_xor_ = ref_block;
1375 x_best_metric_ = xor_bc;
1388 rb_found =
best_metric(xor_bc, xor_gc, &x_best_metric_);
1395 if (x_best_metric_ > bie_limit ||
1403 unsigned gain_min = unsigned(
sizeof(
char) +
sizeof(
unsigned));
1408 if (x_best_metric_ <= 1)
1412 if (gain > gain_min)
1424template<
typename BV>
1428 if (x_d64_ == ~0ull || !x_d64_)
1433 chain_match_vect_, match_vect_, found_ridx_, x_d64_, mtype);
1439template<
typename BV>
1446 ref_vect_ = &ref_vect;
1449 ref_vect_ = ref_vect_curr;
1453template<
typename BV>
1478template<
typename BV>
1485 BM_ASSERT(nb_rank < sim_model_matr.cols());
1487 const float bie_bits_per_int = 3.0f;
1488 const unsigned bie_limit =
1523 if (s_block_best_metric_ < 3)
1529 i0, j0, xor_tmp_block_, params);
1537 if (d64 && d64 != ~0ULL)
1547 BM_ASSERT(chain_size == pm_vect.size());
1552 unsigned xor_bc, xor_gc;
1558 if ((x_best_metric_ > bie_limit) ||
1564 for (
size_type k = 0; k < chain_size; ++k)
1576 bmc.
ref_idx[0] = unsigned(ridx);
1580 bmc.
ref_idx[0] = unsigned(ridx);
1586 BM_ASSERT(chain_size == pm_vect.size());
1587 auto sz = pm_vect.size();
1606template<
typename BV>
1617 s_block = nb_blocks_vect_.at(s_ri);
1620 auto sz = pm_vect.size();
1621 for (
typename match_pairs_vector_type::size_type k = 0; k < sz; ++k)
1627 ref_block = nb_blocks_vect_[mp.
ref_idx];
1639template<
typename BV>
1674template<
typename BV>
1677 size_t sz = nb_blocks_vect_.size();
1678 for (
size_t i = 0; i < sz; ++i)
1682 alloc_.free_bit_block(blk);
1684 nb_blocks_vect_.resize(0);
1689template<
typename BV>
1694 BM_ASSERT(nb_blocks_vect_.size() == rsize);
1708 bm::word_t* t_block = nb_blocks_vect_.at(ri);
1711 t_block = alloc_.alloc_bit_block();
1712 nb_blocks_vect_[ri] = t_block;
1723 nb_gc_vect_[ri] = gc;
1724 nb_bc_vect_[ri] = bc;
1731template<
typename BV>
1735 if (nb_blocks_vect_.size() == rsize)
1738 nb_blocks_vect_.resize(rsize);
1739 bm::word_t** vect_data = nb_blocks_vect_.data();
1742 nb_gc_vect_.resize(rsize);
1743 nb_bc_vect_.resize(rsize);
1744 nb_xdescr_vect_.resize(rsize);
#define BM_DECLARE_TEMP_BLOCK(x)
#define VECT_BIT_BLOCK_XOR(t, src, src_xor, d)
#define VECT_BLOCK_CHANGE(block, size)
#define VECT_BIT_BLOCK_XOR_2WAY(t, src_xor, d)
#define VECT_BLOCK_XOR_CHANGE(block, xor_block, size, gc, bc)
#define IS_VALID_ADDR(addr)
Bit manipulation primitives (internal)
List of reference bit-vectors with their true index associations.
bvector_type::size_type size_type
void add_vectors(const BMATR &bmatr)
Append basic bit-matrix to the list of reference vectors.
bm::block_match_chain< size_type > block_match_chain_type
void fill_alloc_digest(bvector_type &bv_blocks) const
Fill block allocation digest for all vectors in the reference collection.
bm::heap_vector< std::size_t, bv_allocator_type, true > bv_plane_vector_type
size_type size() const BMNOEXCEPT
Get reference list size.
static size_type not_found() BMNOEXCEPT
not-found value for find methods
bvector_type * bvector_type_ptr
bool build_nb_digest_and_xor_matrix(matrix_chain_type &matr, bvector_type &bv_blocks) const
Calculate blocks digest and resize XOR distance matrix based on total number of available blocks.
const bvector_type * bvector_type_const_ptr
void add(const bvector_type *bv, size_type ref_idx)
Add reference vector.
const bvector_type * get_bv(size_type idx) const BMNOEXCEPT
Get reference vector by the index in this ref-vector.
size_type find_bv(const bvector_type *bv) const BMNOEXCEPT
Find vector index by the pointer.
size_type get_row_idx(size_type idx) const BMNOEXCEPT
Get reference row index by the index in this ref-vector.
void reset()
reset the collection (resize(0))
void build(const BMATR &bmatr)
Reset and build vector of references from a basic bit-matrix all NULL rows are skipped,...
bm::dynamic_heap_matrix< block_match_chain_type, bv_allocator_type > matrix_chain_type
size_type find(std::size_t ref_idx) const BMNOEXCEPT
Find vector index by the reference index.
bm::heap_vector< bvector_type_const_ptr, bv_allocator_type, true > bvptr_vector_type
void resize_xor_matrix(matrix_chain_type &matr, size_type total_blocks) const
Utility function to resize matrix based on number of vectors and blocks.
bvector_type::allocator_type bv_allocator_type
void add_sparse_vector(const SV &sv)
Add bit-transposed sparse vector as a bit-matrix.
bv_plane_vector_type ref_bvects_rows_
reference vector row idxs
unsigned rows_acc_
total rows accumulator
bvptr_vector_type ref_bvects_
reference vector pointers
Constant iterator designed to enumerate "ON" bits.
bool valid() const BMNOEXCEPT
Checks if iterator is still valid.
bvector_size_type size_type
XOR scanner to search for complement-similarities in collections of bit-vectors.
unsigned get_s_gc() const BMNOEXCEPT
const bm::word_t * get_ref_block(size_type ri, unsigned i, unsigned j) const BMNOEXCEPT
Return block from the reference vector [vect_idx, block_i, block_j].
static bm::xor_complement_match best_metric(unsigned bc, unsigned gc, unsigned *best_metric) BMNOEXCEPT
bm::bv_ref_vector< BV > bv_ref_vector_type
bm::heap_vector< unsigned, bv_allocator_type, true > bv_bcgc_vector_type
void sync_nb_vect()
Sync TEMP vector size.
bvector_type::size_type size_type
bm::xor_complement_match search_best_xor_mask(const bm::word_t *s_block, size_type ri, size_type ridx_from, size_type ridx_to, unsigned i, unsigned j, bm::word_t *tx_block, const bm::xor_sim_params ¶ms)
Scan for all candidate bit-blocks to find mask or match.
void free_blocks() BMNOEXCEPT
Free the collection of temp blocks.
unsigned get_x_best_metric() const BMNOEXCEPT
bool validate_xor(const bm::word_t *xor_block) const BMNOEXCEPT
Check if XOR transform simplified block enough for compressibility objective.
bool compute_sim_model(xor_sim_model< BV > &sim_model, const bv_ref_vector_type &ref_vect, const bm::xor_sim_params ¶ms)
Calculate matrix of best XOR match metrics per block for the attached collection of bit-vectors.
void get_s_block_stats(size_type ri) BMNOEXCEPT
Get statistics for the r-(or s-) block.
xor_matches_vector_type & get_match_vector() BMNOEXCEPT
match_pairs_vector_type & get_match_pairs() BMNOEXCEPT
size_type refine_match_chain()
Run a search to add possible XOR match chain additions.
bm::id64_t get_xor_digest() const BMNOEXCEPT
bvector_type::allocator_type bv_allocator_type
unsigned get_s_bc() const BMNOEXCEPT
bv_ref_vector_type::matrix_chain_type matrix_chain_type
void deoptimize_gap_blocks(size_type nb, const xor_sim_params ¶ms)
Deoptimize vertical slice of GAP blocks.
unsigned get_s_block_best() const BMNOEXCEPT
bm::heap_vector< bm::block_xor_match_descr, bv_allocator_type, true > xor_matches_vector_type
bm::block_waves_xor_descr & get_descr() BMNOEXCEPT
void compute_s_block_stats(const bm::word_t *block) BMNOEXCEPT
Compute statistics for the r-(or s-) block.
bm::xor_complement_match get_best_match_type() const BMNOEXCEPT
Return best match type of a found block.
bm::heap_vector< bm::block_waves_xor_descr, bv_allocator_type, true > bv_xdescr_vector_type
const bv_ref_vector_type & get_ref_vector() const BMNOEXCEPT
size_type found_ridx() const BMNOEXCEPT
void apply_xor_match_vector(bm::word_t *target_xor_block, const bm::word_t *s_block, size_type s_ri, const match_pairs_vector_type &pm_vect, unsigned i, unsigned j) const BMNOEXCEPT
XOR all match blocks to target using their digest masks.
void compute_xor_complexity_descr(const bm::word_t *BMRESTRICT block, bm::id64_t block_d64, const bm::word_t *BMRESTRICT xor_block, bm::block_waves_xor_descr &BMRESTRICT x_descr, bm::block_xor_match_descr &BMRESTRICT xmd) const BMNOEXCEPT
Compute reference complexity descriptor based on XOR vector.
void set_ref_vector(const bv_ref_vector_type *ref_vect) BMNOEXCEPT
const bm::word_t * get_found_block() const BMNOEXCEPT
bm::heap_vector< bm::match_pair, bv_allocator_type, true > match_pairs_vector_type
bm::heap_vector< bm::word_t *, bv_allocator_type, true > bv_blocks_vector_type
BMFORCEINLINE bm::id_t word_bitcount(bm::id_t w) BMNOEXCEPT
void bit_block_xor_change64(const bm::word_t *BMRESTRICT s_block, const bm::word_t *BMRESTRICT ref_block, unsigned size, unsigned *BMRESTRICT gc, unsigned *BMRESTRICT bc) BMNOEXCEPT
BMFORCEINLINE unsigned bit_count_min_unroll(const bm::word_t *BMRESTRICT block, const bm::word_t *BMRESTRICT block_end) BMNOEXCEPT
Bitcount for bit block without agressive unrolling.
void bit_block_xor_change32(const bm::word_t *BMRESTRICT block, const bm::word_t *BMRESTRICT xor_block, unsigned size, unsigned *BMRESTRICT gc, unsigned *BMRESTRICT bc) BMNOEXCEPT
BMFORCEINLINE unsigned word_bitcount64(bm::id64_t x) BMNOEXCEPT
bm::id64_t calc_block_digest0(const bm::word_t *const block) BMNOEXCEPT
Compute digest for 64 non-zero areas.
bm::id64_t bit_block_xor(bm::word_t *BMRESTRICT dst, const bm::word_t *BMRESTRICT src) BMNOEXCEPT
Plain bitblock XOR operation. Function does not analyse availability of source and destination blocks...
void gap_convert_to_bitset(unsigned *BMRESTRICT dest, const T *BMRESTRICT buf, unsigned len=0) BMNOEXCEPT
GAP block to bitblock conversion.
BMFORCEINLINE bm::gap_word_t gap_length(const bm::gap_word_t *BMRESTRICT buf) BMNOEXCEPT
Returs GAP block length.
xor_complement_match
XOR complementarity type between 2 blocks.
const unsigned set_block_digest_wave_size
void bit_block_change_bc(const bm::word_t *BMRESTRICT block, unsigned *BMRESTRICT gc, unsigned *BMRESTRICT bc) BMNOEXCEPT
unsigned char check_pair_vect_vbr(const BMChain &mchain, const RVect &ref_vect)
Check effective bit-rate for the XOR encode vector.
void compute_s_block_descr(const bm::word_t *BMRESTRICT block, block_waves_xor_descr &BMRESTRICT x_descr, unsigned *BMRESTRICT s_gc, unsigned *BMRESTRICT s_bc) BMNOEXCEPT
Compute reference (non-XOR) 64-dim complexity descriptor for the s-block.
unsigned bit_block_change64(const bm::word_t *BMRESTRICT in_block, unsigned size) BMNOEXCEPT
BMFORCEINLINE void get_block_coord(BI_TYPE nb, unsigned &i, unsigned &j) BMNOEXCEPT
Recalc linear bvector block index into 2D matrix coordinates.
VT::size_type greedy_refine_match_vector(PVT &match_pairs_vect, VT &match_vect, typename VT::size_type best_ref_idx, bm::id64_t d64, bm::xor_complement_match match_type)
Greedy algorithm to find additional matches improving the inital best match block on its match type.
bool block_find_first_diff(const bm::word_t *BMRESTRICT blk, const bm::word_t *BMRESTRICT arg_blk, unsigned *BMRESTRICT pos) BMNOEXCEPT
Find first bit which is different between two blocks (GAP or bit)
unsigned long long int id64_t
const unsigned block_waves
unsigned bit_block_change32(const bm::word_t *BMRESTRICT block, unsigned size) BMNOEXCEPT
BMFORCEINLINE unsigned long long bmi_bslr_u64(unsigned long long w) BMNOEXCEPT
unsigned short gap_word_t
bm::id_t bvector_size_type
const unsigned gap_max_bits
BMFORCEINLINE unsigned long long bmi_blsi_u64(unsigned long long w)
void bit_block_xor_change(const bm::word_t *BMRESTRICT block, const bm::word_t *BMRESTRICT xor_block, unsigned size, unsigned *BMRESTRICT gc, unsigned *BMRESTRICT bc) BMNOEXCEPT
bool operator==(const block_match_chain &bmc) const BMNOEXCEPT
bm::xor_complement_match match
Structure to compute XOR gap-count profile by sub-block waves.
unsigned short sb_bc[bm::block_waves]
BIT counts.
unsigned short sb_gc[bm::block_waves]
GAP counts.
unsigned short sb_xor_gc[bm::block_waves]
XOR-mask GAP count.
unsigned short sb_xor_bc[bm::block_waves]
XOR-mask GAP count.
Capture the XOR filter results (xor block against ref.block)
size_type ref_idx
reference vector index
bm::id64_t xor_d64
recorded digest
bvector_size_type size_type
unsigned block_gain
XOR filter improvement (best)
bm::xor_complement_match match_type
match type
bvector_size_type ref_idx
reference vector index
bm::id64_t xor_d64
recorded digest
match_pair(bvector_size_type idx, bm::id64_t d64)
bm::dynamic_heap_matrix< block_match_chain_type, bv_allocator_type > matrix_chain_type
bm::block_match_chain< size_type > block_match_chain_type
bvector_type::size_type size_type
matrix_chain_type matr
model matrix
bvector_type bv_blocks
blocks digest
bvector_type::allocator_type bv_allocator_type
Parameters for XOR similarity search.
unsigned min_lookup_depth
unsigned max_lookup_depth