Skip to content
Snippets Groups Projects
Commit c8671143 authored by Mathis "what could possibly go wrong" Randl's avatar Mathis "what could possibly go wrong" Randl
Browse files

Merge branch 'dyn_tol' into 'main'

Dyn tol

See merge request randl/proximity!3
parents a4a4d7d0 a9543226
No related branches found
No related tags found
No related merge requests found
# proximity-cache
# Proximity
Proximity is a research project exploring the optimization and speed-recall tradeoffs of approximate vector search in high-dimensional spaces.
We provide an approximate cache for vector databases that is written in Rust and exposes Python bindings.
......@@ -44,4 +44,3 @@ todo
This project is licensed under the MIT License. See LICENSE for details.
This code is meant as a beta/development playground. It should not be used for production systems.
......@@ -9,5 +9,5 @@ name = "proximipy"
crate-type = ["cdylib"]
[dependencies]
pyo3 = "0.23.3"
pyo3 = {version = "0.23.3", features = ["py-clone"]}
proximity-cache = { path = "../core"}
use std::hash::{Hash, Hasher};
use proximipy::caching::approximate_cache::ApproximateCache;
use proximipy::caching::bounded::fifo::fifo_cache::FifoCache as FifoInternal;
use proximipy::caching::bounded::lru::lru_cache::LRUCache as LruInternal;
use proximipy::numerics::comp::ApproxComparable;
use proximipy::numerics::f32vector::F32Vector;
use proximipy::{caching::approximate_cache::ApproximateCache, numerics::comp::ApproxComparable};
use pyo3::PyObject;
use pyo3::{
pyclass, pymethods,
types::{PyAnyMethods, PyList},
......@@ -12,7 +14,7 @@ use pyo3::{
};
macro_rules! create_pythonized_interface {
($internal : ident, $name: ident, $keytype: ident, $valuetype : ident) => {
($internal : ident, $name: ident, $keytype: ident) => {
// unsendable == should hard-crash if Python tries to access it from
// two different Python threads.
//
......@@ -24,36 +26,33 @@ macro_rules! create_pythonized_interface {
// happen on the Rust side and will not be visible to the Python ML pipeline.
#[pyclass(unsendable)]
pub struct $name {
inner: $internal<$keytype, $valuetype>,
inner: $internal<$keytype, PyObject>,
}
#[pymethods]
impl $name {
#[new]
pub fn new(max_capacity: usize, tolerance: f32) -> Self {
pub fn new(max_capacity: usize) -> Self {
Self {
inner: $internal::new(max_capacity, tolerance),
inner: $internal::new(max_capacity),
}
}
fn find(&mut self, k: $keytype) -> Option<$valuetype> {
fn find(&mut self, k: $keytype) -> Option<PyObject> {
self.inner.find(&k)
}
fn batch_find(&mut self, ks: Vec<$keytype>) -> Vec<Option<$valuetype>> {
fn batch_find(&mut self, ks: Vec<$keytype>) -> Vec<Option<PyObject>> {
// more efficient than a python for loop
ks.into_iter().map(|k| self.find(k)).collect()
}
fn insert(&mut self, key: $keytype, value: $valuetype) {
self.inner.insert(key, value)
}
fn len(&self) -> usize {
self.inner.len()
fn insert(&mut self, key: $keytype, value: PyObject, tolerance: f32) {
self.inner.insert(key, value, tolerance)
}
fn __len__(&self) -> usize {
self.len()
self.inner.len()
}
}
};
......@@ -138,8 +137,6 @@ impl ApproxComparable for VecPy<f32> {
}
type F32VecPy = VecPy<f32>;
type UsizeVecPy = VecPy<usize>;
type UsizeWithRankingVecPy = (UsizeVecPy, F32VecPy);
create_pythonized_interface!(LruInternal, LRUCache, F32VecPy, UsizeWithRankingVecPy);
create_pythonized_interface!(FifoInternal, FifoCache, F32VecPy, UsizeWithRankingVecPy);
create_pythonized_interface!(LruInternal, LRUCache, F32VecPy);
create_pythonized_interface!(FifoInternal, FifoCache, F32VecPy);
use crate::numerics::comp::ApproxComparable;
// size of caches in implementations where that should be known at comptime
pub const COMPTIME_CACHE_SIZE: usize = 1024;
pub type Tolerance = f32;
pub trait ApproximateCache<K, V>
where
K: ApproxComparable,
V: Clone,
{
fn find(&mut self, key: &K) -> Option<V>;
fn insert(&mut self, key: K, value: V);
fn find(&mut self, target: &K) -> Option<V>;
fn insert(&mut self, key: K, value: V, tolerance: f32);
fn len(&self) -> usize;
fn is_empty(&self) -> bool {
self.len() == 0
......
use std::collections::VecDeque;
use crate::{caching::approximate_cache::ApproximateCache, numerics::comp::ApproxComparable};
use crate::caching::approximate_cache::ApproximateCache;
use crate::caching::approximate_cache::Tolerance;
use crate::numerics::comp::ApproxComparable;
#[derive(Clone)]
struct CacheLine<K, V> {
key: K,
tol: Tolerance,
value: V,
}
pub struct FifoCache<K, V> {
max_capacity: usize,
items: VecDeque<(K, V)>,
tolerance: f32,
items: VecDeque<CacheLine<K, V>>,
}
impl<K, V> ApproximateCache<K, V> for FifoCache<K, V>
......@@ -13,21 +21,27 @@ where
K: ApproxComparable,
V: Clone,
{
fn find(&mut self, key: &K) -> Option<V> {
fn find(&mut self, target: &K) -> Option<V> {
let candidate = self
.items
.iter()
.min_by(|&(x, _), &(y, _)| key.fuzziness(x).partial_cmp(&key.fuzziness(y)).unwrap())?;
let (candidate_key, candidate_value) = candidate;
if candidate_key.roughly_matches(key, self.tolerance) {
Some(candidate_value.clone())
} else {
None
}
.filter(|&entry| entry.key.roughly_matches(target, entry.tol))
.min_by(|&x, &y| {
target
.fuzziness(&x.key)
.partial_cmp(&target.fuzziness(&y.key))
.unwrap()
})?;
Some(candidate.value.clone())
}
fn insert(&mut self, key: K, value: V) {
self.items.push_back((key, value));
fn insert(&mut self, key: K, value: V, tolerance: f32) {
let new_entry = CacheLine {
key,
tol: tolerance,
value,
};
self.items.push_back(new_entry);
if self.items.len() > self.max_capacity {
self.items.pop_front();
}
......@@ -39,13 +53,11 @@ where
}
impl<K, V> FifoCache<K, V> {
pub fn new(max_capacity: usize, tolerance: f32) -> Self {
pub fn new(max_capacity: usize) -> Self {
assert!(max_capacity > 0);
assert!(tolerance > 0.0);
Self {
max_capacity,
items: VecDeque::with_capacity(max_capacity),
tolerance,
}
}
}
......@@ -57,11 +69,11 @@ mod tests {
const TEST_TOLERANCE: f32 = 1e-8;
#[test]
fn test_fifo_cache_basic_operations() {
let mut cache = FifoCache::new(2, TEST_TOLERANCE);
cache.insert(1, 1); // Cache is {1=1}
cache.insert(2, 2); // Cache is {1=1, 2=2}
let mut cache = FifoCache::new(2);
cache.insert(1, 1, TEST_TOLERANCE); // Cache is {1=1}
cache.insert(2, 2, TEST_TOLERANCE); // Cache is {1=1, 2=2}
assert_eq!(cache.find(&1), Some(1)); // Returns 1, Cache is {1=1, 2=2}
cache.insert(3, 3); // Evicts key 1, Cache is {2=2, 3=3}
cache.insert(3, 3, TEST_TOLERANCE); // Evicts key 1, Cache is {2=2, 3=3}
assert_eq!(cache.find(&1), None); // Key 1 not found
assert_eq!(cache.find(&2), Some(2)); // Returns 2
assert_eq!(cache.find(&3), Some(3)); // Returns 3
......@@ -69,11 +81,11 @@ mod tests {
#[test]
fn test_fifo_cache_eviction_order() {
let mut cache = FifoCache::new(3, TEST_TOLERANCE);
cache.insert(1, 1); // Cache is {1=1}
cache.insert(2, 2); // Cache is {1=1, 2=2}
cache.insert(3, 3); // Cache is {1=1, 2=2, 3=3}
cache.insert(4, 4); // Evicts key 1, Cache is {2=2, 3=3, 4=4}
let mut cache = FifoCache::new(3);
cache.insert(1, 1, TEST_TOLERANCE); // Cache is {1=1}
cache.insert(2, 2, TEST_TOLERANCE); // Cache is {1=1, 2=2}
cache.insert(3, 3, TEST_TOLERANCE); // Cache is {1=1, 2=2, 3=3}
cache.insert(4, 4, TEST_TOLERANCE); // Evicts key 1, Cache is {2=2, 3=3, 4=4}
assert_eq!(cache.find(&1), None); // Key 1 not found
assert_eq!(cache.find(&2), Some(2)); // Returns 2
assert_eq!(cache.find(&3), Some(3)); // Returns 3
......@@ -82,22 +94,22 @@ mod tests {
#[test]
fn test_fifo_cache_overwrite() {
let mut cache = FifoCache::new(2, TEST_TOLERANCE);
cache.insert(1, 1); // Cache is {1=1}
cache.insert(2, 2); // Cache is {1=1, 2=2}
cache.insert(1, 10); // Overwrites key 1, Cache is {2=2, 1=10}
let mut cache = FifoCache::new(2);
cache.insert(1, 1, TEST_TOLERANCE); // Cache is {1=1}
cache.insert(2, 2, TEST_TOLERANCE); // Cache is {1=1, 2=2}
cache.insert(1, 10, TEST_TOLERANCE); // Overwrites key 1, Cache is {2=2, 1=10}
assert_eq!(cache.find(&1), Some(10)); // Returns 10
cache.insert(3, 3); // Evicts key 2, Cache is {1=10, 3=3}
cache.insert(3, 3, TEST_TOLERANCE); // Evicts key 2, Cache is {1=10, 3=3}
assert_eq!(cache.find(&2), None); // Key 2 not found
assert_eq!(cache.find(&3), Some(3)); // Returns 3
}
#[test]
fn test_fifo_cache_capacity_one() {
let mut cache = FifoCache::new(1, TEST_TOLERANCE);
cache.insert(1, 1); // Cache is {1=1}
let mut cache = FifoCache::new(1);
cache.insert(1, 1, TEST_TOLERANCE); // Cache is {1=1}
assert_eq!(cache.find(&1), Some(1)); // Returns 1
cache.insert(2, 2); // Evicts key 1, Cache is {2=2}
cache.insert(2, 2, TEST_TOLERANCE); // Evicts key 1, Cache is {2=2}
assert_eq!(cache.find(&1), None); // Key 1 not found
assert_eq!(cache.find(&2), Some(2)); // Returns 2
}
......@@ -105,6 +117,6 @@ mod tests {
#[test]
#[should_panic]
fn test_fifo_cache_empty() {
let _cache: FifoCache<i16, i16> = FifoCache::new(0, TEST_TOLERANCE);
let _cache: FifoCache<i16, i16> = FifoCache::new(0);
}
}
......@@ -7,6 +7,7 @@ use crate::caching::approximate_cache::ApproximateCache;
use super::linked_list::DoublyLinkedList;
use super::list_node::{Node, SharedNode};
use super::map_entry::MapEntry;
/// `LRUCache` is a bounded cache with approximate key matching support and LRU eviction.
///
......@@ -19,16 +20,17 @@ use super::list_node::{Node, SharedNode};
/// use proximipy::caching::bounded::lru::lru_cache::LRUCache;
/// use proximipy::caching::approximate_cache::ApproximateCache;
///
/// let mut cache = LRUCache::new(3, 2.0);
/// let mut cache = LRUCache::new(3);
/// const TEST_TOL: f32 = 2.0;
///
/// cache.insert(10 as i16, "Value 1");
/// cache.insert(20, "Value 2");
/// cache.insert(30, "Value 3");
/// cache.insert(10 as i16, "Value 1", TEST_TOL);
/// cache.insert(20, "Value 2", TEST_TOL);
/// cache.insert(30, "Value 3", TEST_TOL);
///
/// assert_eq!(cache.find(&11), Some("Value 1"));
/// assert_eq!(cache.len(), 3);
///
/// cache.insert(40, "Value 4"); // Evicts the least recently used (Key(20))
/// cache.insert(40, "Value 4", TEST_TOL); // Evicts the least recently used (Key(20))
/// assert!(cache.find(&20).is_none());
/// ```
///
......@@ -43,9 +45,8 @@ use super::list_node::{Node, SharedNode};
/// - `len(&self) -> usize`: Returns the current size of the cache.
pub struct LRUCache<K, V> {
max_capacity: usize,
map: HashMap<K, SharedNode<K, V>>,
list: DoublyLinkedList<K, V>,
tolerance: f32,
map: HashMap<MapEntry<K>, SharedNode<MapEntry<K>, V>>,
list: DoublyLinkedList<MapEntry<K>, V>,
}
impl<K, V> ApproximateCache<K, V> for LRUCache<K, V>
......@@ -53,33 +54,37 @@ where
K: ApproxComparable + Eq + Hash + Clone,
V: Clone,
{
fn find(&mut self, key: &K) -> Option<V> {
let potential_candi = self
fn find(&mut self, target: &K) -> Option<V> {
let candidate = self
.map
.keys()
.min_by(|&x, &y| key.fuzziness(x).partial_cmp(&key.fuzziness(y)).unwrap())?;
let matching = if potential_candi.roughly_matches(key, self.tolerance) {
Some(potential_candi)
} else {
None
}?;
let node: SharedNode<K, V> = self.map.get(matching).cloned()?;
.filter(|&entry| entry.key.roughly_matches(target, entry.tolerance))
.min_by(|&xentry, &yentry| {
target
.fuzziness(&xentry.key)
.partial_cmp(&target.fuzziness(&yentry.key))
.unwrap()
})?;
let node: SharedNode<MapEntry<K>, V> = self.map.get(candidate).cloned()?;
self.list.remove(node.clone());
self.list.add_to_head(node.clone());
return Some(node.borrow().value.clone());
}
fn insert(&mut self, key: K, value: V) {
fn insert(&mut self, key: K, value: V, tolerance: f32) {
if self.len() >= self.max_capacity {
if let Some(tail) = self.list.remove_tail() {
self.map.remove(&tail.borrow().key);
}
}
let new_node = Node::new(key.clone(), value);
let map_entry = MapEntry {
key: key.clone(),
tolerance,
};
let new_node = Node::new(map_entry.clone(), value);
self.list.add_to_head(new_node.clone());
self.map.insert(key, new_node);
self.map.insert(map_entry, new_node);
}
fn len(&self) -> usize {
......@@ -88,14 +93,12 @@ where
}
impl<K, V> LRUCache<K, V> {
pub fn new(max_capacity: usize, tolerance: f32) -> Self {
pub fn new(max_capacity: usize) -> Self {
assert!(max_capacity > 0);
assert!(tolerance > 0.0);
Self {
max_capacity,
map: HashMap::with_capacity(max_capacity),
list: DoublyLinkedList::new(),
tolerance,
}
}
}
......@@ -107,13 +110,13 @@ mod tests {
const TEST_TOLERANCE: f32 = 1e-8;
#[test]
fn test_lru_cache_basic_operations() {
let mut cache = LRUCache::new(2, TEST_TOLERANCE);
cache.insert(1, 1); // Cache is {1=1}
cache.insert(2, 2); // Cache is {1=1, 2=2}
let mut cache = LRUCache::new(2);
cache.insert(1, 1, TEST_TOLERANCE); // Cache is {1=1}
cache.insert(2, 2, TEST_TOLERANCE); // Cache is {1=1, 2=2}
assert_eq!(cache.find(&1), Some(1)); // Returns 1, Cache is {2=2, 1=1}
cache.insert(3, 3); // Evicts key 2, Cache is {1=1, 3=3}
cache.insert(3, 3, TEST_TOLERANCE); // Evicts key 2, Cache is {1=1, 3=3}
assert_eq!(cache.find(&2), None); // Key 2 not found
cache.insert(4, 4); // Evicts key 1, Cache is {3=3, 4=4}
cache.insert(4, 4, TEST_TOLERANCE); // Evicts key 1, Cache is {3=3, 4=4}
assert_eq!(cache.find(&1), None); // Key 1 not found
assert_eq!(cache.find(&3), Some(3)); // Returns 3
assert_eq!(cache.find(&4), Some(4)); // Returns 4
......@@ -121,12 +124,12 @@ mod tests {
#[test]
fn test_lru_cache_eviction_order() {
let mut cache = LRUCache::new(3, TEST_TOLERANCE);
cache.insert(1, 1); // Cache is {1=1}
cache.insert(2, 2); // Cache is {1=1, 2=2}
cache.insert(3, 3); // Cache is {1=1, 2=2, 3=3}
let mut cache = LRUCache::new(3);
cache.insert(1, 1, TEST_TOLERANCE); // Cache is {1=1}
cache.insert(2, 2, TEST_TOLERANCE); // Cache is {1=1, 2=2}
cache.insert(3, 3, TEST_TOLERANCE); // Cache is {1=1, 2=2, 3=3}
cache.find(&1); // Access key 1, Cache is {2=2, 3=3, 1=1}
cache.insert(4, 4); // Evicts key 2, Cache is {3=3, 1=1, 4=4}
cache.insert(4, 4, TEST_TOLERANCE); // Evicts key 2, Cache is {3=3, 1=1, 4=4}
assert_eq!(cache.find(&2), None); // Key 2 not found
assert_eq!(cache.find(&3), Some(3)); // Returns 3
assert_eq!(cache.find(&4), Some(4)); // Returns 4
......@@ -135,22 +138,22 @@ mod tests {
#[test]
fn test_lru_cache_overwrite() {
let mut cache = LRUCache::new(2, TEST_TOLERANCE);
cache.insert(1, 1); // Cache is {1=1}
cache.insert(2, 2); // Cache is {1=1, 2=2}
cache.insert(1, 10); // Overwrites key 1, Cache is {2=2, 1=10}
let mut cache = LRUCache::new(2);
cache.insert(1, 1, TEST_TOLERANCE); // Cache is {1=1}
cache.insert(2, 2, TEST_TOLERANCE); // Cache is {1=1, 2=2}
cache.insert(1, 10, TEST_TOLERANCE); // Overwrites key 1, Cache is {2=2, 1=10}
assert_eq!(cache.find(&1), Some(10)); // Returns 10
cache.insert(3, 3); // Evicts key 2, Cache is {1=10, 3=3}
cache.insert(3, 3, TEST_TOLERANCE); // Evicts key 2, Cache is {1=10, 3=3}
assert_eq!(cache.find(&2), None); // Key 2 not found
assert_eq!(cache.find(&3), Some(3)); // Returns 3
}
#[test]
fn test_lru_cache_capacity_one() {
let mut cache = LRUCache::new(1, TEST_TOLERANCE);
cache.insert(1, 1); // Cache is {1=1}
let mut cache = LRUCache::new(1);
cache.insert(1, 1, TEST_TOLERANCE); // Cache is {1=1}
assert_eq!(cache.find(&1), Some(1)); // Returns 1
cache.insert(2, 2); // Evicts key 1, Cache is {2=2}
cache.insert(2, 2, TEST_TOLERANCE); // Evicts key 1, Cache is {2=2}
assert_eq!(cache.find(&1), None); // Key 1 not found
assert_eq!(cache.find(&2), Some(2)); // Returns 2
}
......@@ -158,6 +161,6 @@ mod tests {
#[test]
#[should_panic]
fn test_lru_cache_empty() {
let _cache: LRUCache<i16, i16> = LRUCache::new(0, TEST_TOLERANCE);
let _cache: LRUCache<i16, i16> = LRUCache::new(0);
}
}
use std::hash::Hash;
#[derive(Clone)]
pub struct MapEntry<K> {
pub key: K,
pub tolerance: f32,
}
impl<K: Eq> PartialEq for MapEntry<K> {
fn eq(&self, other: &Self) -> bool {
self.key == other.key && self.tolerance == other.tolerance
}
}
impl<K: Eq> Eq for MapEntry<K> {}
impl<K: Hash> Hash for MapEntry<K> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.key.hash(state);
self.tolerance.to_bits().hash(state);
}
}
mod linked_list;
mod list_node;
pub mod lru_cache;
mod map_entry;
pub mod approximate_cache;
pub mod bounded;
pub mod unbounded_linear_cache;
use crate::caching::approximate_cache::ApproximateCache;
use crate::numerics::comp::ApproxComparable;
/// A cache implementation that checks all entries one-by-one, without eviction
/// ## Generic Types
/// The types K and V are used for the cache keys and values respectively.
///
/// K should be `ApproxComparable`, i.e. the compiler should know how to
/// decide that two K's are 'close enough' given a certain tolerance.
///
/// V should be `Clone` so that the user can do whatever they want with a returned
/// value without messing with the actual cache line.
///
/// ## Constructors
/// Use the ```from``` method to create a new cache. You will be asked to provide a
/// tolerance for the search and (optionally) an initial allocated capacity in memory.
/// ```tolerance``` indicates the searching sensitivity (see `ApproxComparable`),
/// which is a constant w.r.t. to the queried K (for now).
pub struct UnboundedLinearCache<K, V>
where
K: ApproxComparable,
V: Clone,
{
keys: Vec<K>,
values: Vec<V>,
tolerance: f32,
}
impl<K, V> UnboundedLinearCache<K, V>
where
K: ApproxComparable,
V: Clone,
{
pub fn new(tolerance: f32) -> Self {
UnboundedLinearCache {
keys: Vec::new(),
values: Vec::new(),
tolerance,
}
}
pub fn with_initial_capacity(tolerance: f32, capacity: usize) -> Self {
UnboundedLinearCache {
keys: Vec::with_capacity(capacity),
values: Vec::with_capacity(capacity),
tolerance,
}
}
}
impl<K, V> ApproximateCache<K, V> for UnboundedLinearCache<K, V>
where
K: ApproxComparable,
V: Clone,
{
// to find a match in an unbounded cache, iterate over all cache lines
// and return early if you have something
fn find(&mut self, to_find: &K) -> Option<V> {
let potential_match = self
.keys
.iter()
.position(|key| to_find.roughly_matches(key, self.tolerance));
potential_match.map(|i| self.values[i].clone())
}
// inserting a new value in a linear cache == pushing it at the end for future scans
fn insert(&mut self, key: K, value: V) {
self.keys.push(key);
self.values.push(value);
}
fn len(&self) -> usize {
self.keys.len()
}
}
#[cfg(test)]
mod tests {
use crate::caching::approximate_cache::COMPTIME_CACHE_SIZE;
use super::*;
use quickcheck::{QuickCheck, TestResult};
const TEST_TOLERANCE: f32 = 1e-8;
const TEST_MAX_SIZE: usize = COMPTIME_CACHE_SIZE;
#[test]
fn start_always_matches_exactly() {
fn qc_start_always_matches_exactly(
start_state: Vec<(f32, u8)>,
key: f32,
value: u8,
) -> TestResult {
let mut ulc = UnboundedLinearCache::<f32, u8>::new(TEST_TOLERANCE);
if !key.is_finite() || start_state.len() > TEST_MAX_SIZE {
return TestResult::discard();
}
ulc.insert(key, value);
for &(k, v) in start_state.iter() {
ulc.insert(k, v);
}
assert!(ulc.len() == start_state.len() + 1);
if let Some(x) = ulc.find(&key) {
TestResult::from_bool(x == value)
} else {
TestResult::failed()
}
}
QuickCheck::new()
.tests(10_000)
.min_tests_passed(1_000)
.quickcheck(
qc_start_always_matches_exactly as fn(Vec<(f32, u8)>, f32, u8) -> TestResult,
);
}
#[test]
fn middle_always_matches() {
fn qc_middle_always_matches(
start_state: Vec<(f32, u8)>,
key: f32,
value: u8,
end_state: Vec<(f32, u8)>,
) -> TestResult {
let mut ulc = UnboundedLinearCache::<f32, u8>::new(TEST_TOLERANCE);
if !key.is_finite() || start_state.len() > TEST_MAX_SIZE {
return TestResult::discard();
}
for &(k, v) in start_state.iter() {
ulc.insert(k, v);
}
ulc.insert(key, value);
for &(k, v) in end_state.iter() {
ulc.insert(k, v);
}
assert!(ulc.len() == start_state.len() + end_state.len() + 1);
// we should match on something but we can't know on what
TestResult::from_bool(ulc.find(&key).is_some())
}
QuickCheck::new()
.tests(10_000)
.min_tests_passed(1_000)
.quickcheck(
qc_middle_always_matches
as fn(Vec<(f32, u8)>, f32, u8, Vec<(f32, u8)>) -> TestResult,
);
}
}
......@@ -27,7 +27,7 @@ fn main() {
let vecs_f: Vec<f32> = vecs.into_iter().map(f32::from).collect();
println!("{:?}", vecs_f.chunks_exact(128).next().unwrap());
let mut ulc = LRUCache::<F32Vector, usize>::new(10000, 15_000.0);
let mut ulc = LRUCache::<F32Vector, usize>::new(10000);
let mut count: u32 = 0;
let mut scanned: usize = 0;
......@@ -40,7 +40,7 @@ fn main() {
count += 1;
} else {
scanned += ulc.len();
ulc.insert(f32v, index);
ulc.insert(f32v, index, 15_000.0);
}
writeln!(file, "{} {}", index, find.unwrap_or(50001)).unwrap();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment