191 lines
5.2 KiB
Rust
191 lines
5.2 KiB
Rust
#[macro_use]
|
|
extern crate criterion;
|
|
extern crate bincode;
|
|
extern crate ledb;
|
|
extern crate ledb_types;
|
|
extern crate serde;
|
|
extern crate serde_derive;
|
|
extern crate tempfile;
|
|
#[macro_use]
|
|
extern crate serde_json;
|
|
|
|
use chgk_ledb_lib::db;
|
|
use std::path::PathBuf;
|
|
|
|
use db::{Reader, Writer, WriterOpts};
|
|
|
|
use criterion::{BatchSize, Criterion};
|
|
use tempfile::tempdir;
|
|
|
|
use ledb::{Document, Options, Storage};
|
|
use serde_derive::{Deserialize, Serialize};
|
|
|
|
#[derive(
|
|
bincode::Encode,
|
|
bincode::Decode,
|
|
Clone,
|
|
Debug,
|
|
PartialEq,
|
|
Eq,
|
|
PartialOrd,
|
|
Ord,
|
|
Serialize,
|
|
Deserialize,
|
|
Document,
|
|
)]
|
|
struct TestData {
|
|
#[document(primary)]
|
|
num1: u64,
|
|
num2: u64,
|
|
test: String,
|
|
}
|
|
|
|
const N: usize = 4096;
|
|
|
|
fn gen_data(count: usize) -> impl Iterator<Item = TestData> {
|
|
(0..count)
|
|
.into_iter()
|
|
.map(|i| 143 + i as u64)
|
|
.map(|i| TestData {
|
|
num1: i,
|
|
num2: i*100 ^ 0xDF0E441122334455,
|
|
test: "test ---- Test ____".repeat(123 + i as usize % 15),
|
|
})
|
|
}
|
|
|
|
fn prepare_db_writer(path: &PathBuf) -> Writer<TestData> {
|
|
let opts = WriterOpts {
|
|
compress_lvl: 1,
|
|
data_buf_size: 100 * 1024 * 1024,
|
|
out_buf_size: 100 * 1024 * 1024,
|
|
current_buf_size: 10240,
|
|
};
|
|
|
|
Writer::new(path, opts).expect("new writer")
|
|
}
|
|
|
|
fn db_read(c: &mut Criterion) {
|
|
let dir = tempdir().expect("tempdir");
|
|
let tmpfile = dir.path().join("test.tmp");
|
|
let mut writer = prepare_db_writer(&tmpfile);
|
|
|
|
let mut items_iter = gen_data(N).collect::<Vec<TestData>>().into_iter();
|
|
writer.load(&mut items_iter).unwrap();
|
|
writer.finish().unwrap();
|
|
|
|
c.bench_function("read", |b| {
|
|
b.iter_batched(
|
|
|| {
|
|
let reader: Reader<TestData> = Reader::new(&tmpfile, 2048).expect("new reader");
|
|
reader
|
|
},
|
|
|reader| {
|
|
for item in reader {
|
|
drop(item);
|
|
}
|
|
},
|
|
BatchSize::SmallInput,
|
|
)
|
|
});
|
|
}
|
|
|
|
fn db_write(c: &mut Criterion) {
|
|
let dir = tempdir().expect("tempdir");
|
|
let tmpfile = dir.path().join("test.tmp");
|
|
c.bench_function("write", |b| {
|
|
b.iter_batched(
|
|
|| {
|
|
let src = gen_data(N).collect::<Vec<TestData>>().into_iter();
|
|
let writer = prepare_db_writer(&tmpfile);
|
|
(src, writer)
|
|
},
|
|
|(mut src, mut writer)| {
|
|
writer.load(&mut src).unwrap();
|
|
writer.finish().unwrap();
|
|
},
|
|
BatchSize::SmallInput,
|
|
)
|
|
});
|
|
}
|
|
|
|
fn ledb_write(c: &mut Criterion) {
|
|
let dir = tempdir().expect("tempdir");
|
|
let tmp_dir = dir.as_ref();
|
|
c.bench_function("ledb_write", |b| {
|
|
b.iter_batched(
|
|
|| {
|
|
let src = gen_data(N).collect::<Vec<TestData>>().into_iter();
|
|
let options: Options = serde_json::from_value(json!({
|
|
"map_size": 100 * 1024 * 1024, // 100mb
|
|
"write_map": true,
|
|
"map_async": true,
|
|
"no_lock": true,
|
|
"no_meta_sync": true,
|
|
"no_sync": true,
|
|
}))
|
|
.unwrap();
|
|
|
|
let storage = Storage::new(tmp_dir, options).unwrap();
|
|
let collection = storage.collection("test").unwrap();
|
|
(src, collection)
|
|
},
|
|
|(src, collection)| collection.load(src).expect("load"),
|
|
BatchSize::SmallInput,
|
|
)
|
|
});
|
|
}
|
|
|
|
fn ledb_read(c: &mut Criterion) {
|
|
let dir = tempdir().expect("tempdir");
|
|
let tmp_dir = dir.as_ref();
|
|
|
|
let write_options: Options = serde_json::from_value(json!({
|
|
"map_size": 100 * 1024 * 1024, // 100mb
|
|
"write_map": true,
|
|
"map_async": true,
|
|
"no_lock": true,
|
|
"no_meta_sync": true,
|
|
"no_sync": true,
|
|
}))
|
|
.unwrap();
|
|
|
|
let storage = Storage::new(&tmp_dir, write_options).unwrap();
|
|
let collection = storage.collection("test").unwrap();
|
|
let items_iter = gen_data(N).collect::<Vec<TestData>>().into_iter();
|
|
collection.load(items_iter).expect("load");
|
|
drop(collection);
|
|
drop(storage);
|
|
|
|
c.bench_function("ledb_read", |b| {
|
|
b.iter_batched(
|
|
|| {
|
|
let options: Options = serde_json::from_value(json!({
|
|
"read_only": true,
|
|
"map_async": true,
|
|
"no_lock": true,
|
|
}))
|
|
.unwrap();
|
|
|
|
let storage = Storage::new(tmp_dir, options).unwrap();
|
|
let collection = storage.collection("test").unwrap();
|
|
collection
|
|
},
|
|
|collection| {
|
|
let mut collection_iter = collection.dump::<TestData>().expect("dump");
|
|
while let Some(item) = collection_iter.next() {
|
|
drop(item);
|
|
}
|
|
},
|
|
BatchSize::SmallInput,
|
|
)
|
|
});
|
|
}
|
|
|
|
fn config() -> Criterion {
|
|
Criterion::default().sample_size(40)
|
|
}
|
|
|
|
criterion_group! {name=ledb; config = config(); targets = ledb_read, ledb_write}
|
|
criterion_group! {name=benches; config = config(); targets = db_read, db_write}
|
|
criterion_main!(benches, ledb);
|