This commit is contained in:
27
app/Cargo.toml
Normal file
27
app/Cargo.toml
Normal file
@@ -0,0 +1,27 @@
|
||||
[package]
|
||||
name = "chgk_ledb"
|
||||
version = "0.1.0"
|
||||
authors = ["Dmitry <b4tm4n@mail.ru>"]
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[[bench]]
|
||||
name = "db_bench"
|
||||
harness = false
|
||||
|
||||
[dependencies]
|
||||
chgk_ledb_lib = {path = "../lib"}
|
||||
serde_json="1.0"
|
||||
ledb = {git = "https://github.com/b4tman/ledb.git", rev="a646b90e", package="ledb"}
|
||||
zip="0.6"
|
||||
rand="0.8"
|
||||
clap = { version = "3.2.22", features = ["derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.4.0"
|
||||
tempfile = "3.3"
|
||||
bincode = "^2.0.0-rc.2"
|
||||
ledb = {git = "https://github.com/b4tman/ledb.git", rev="a646b90e", package="ledb"}
|
||||
ledb-derive = {git = "https://github.com/b4tman/ledb.git", rev="a646b90e", package="ledb-derive"}
|
||||
ledb-types = {git = "https://github.com/b4tman/ledb.git", rev="a646b90e", package="ledb-types"}
|
||||
93
app/benches/db_bench.rs
Normal file
93
app/benches/db_bench.rs
Normal file
@@ -0,0 +1,93 @@
|
||||
#[macro_use]
|
||||
extern crate criterion;
|
||||
extern crate bincode;
|
||||
extern crate tempfile;
|
||||
|
||||
use chgk_ledb_lib::db;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use db::{Reader, Writer, WriterOpts};
|
||||
|
||||
use criterion::{BatchSize, Criterion};
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[derive(bincode::Encode, bincode::Decode, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
struct TestData {
|
||||
num: u128,
|
||||
test: String,
|
||||
}
|
||||
|
||||
const N: usize = 4096;
|
||||
|
||||
fn gen_data(count: usize) -> impl Iterator<Item = TestData> {
|
||||
(0..count)
|
||||
.into_iter()
|
||||
.map(|i| 143 + i as u128)
|
||||
.map(|i| TestData {
|
||||
num: i,
|
||||
test: "test ---- Test ____".repeat(123 + i as usize % 15),
|
||||
})
|
||||
}
|
||||
|
||||
fn prepare_db_writer(path: &PathBuf) -> Writer<TestData> {
|
||||
let opts = WriterOpts {
|
||||
compress_lvl: 1,
|
||||
data_buf_size: 100 * 1024 * 1024,
|
||||
out_buf_size: 100 * 1024 * 1024,
|
||||
current_buf_size: 10240,
|
||||
};
|
||||
|
||||
Writer::new(path, opts).expect("new writer")
|
||||
}
|
||||
|
||||
fn db_read(c: &mut Criterion) {
|
||||
let dir = tempdir().expect("tempdir");
|
||||
let tmpfile = dir.path().join("test.tmp");
|
||||
let mut writer = prepare_db_writer(&tmpfile);
|
||||
|
||||
let mut items_iter = gen_data(N).collect::<Vec<TestData>>().into_iter();
|
||||
writer.load(&mut items_iter).unwrap();
|
||||
writer.finish().unwrap();
|
||||
|
||||
c.bench_function("read", |b| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
let reader: Reader<TestData> = Reader::new(&tmpfile, 2048).expect("new reader");
|
||||
reader
|
||||
},
|
||||
|mut reader| {
|
||||
let mut reader_iter = reader.iter();
|
||||
while let Some(item) = reader_iter.next() {
|
||||
drop(item);
|
||||
}
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
fn db_write(c: &mut Criterion) {
|
||||
let dir = tempdir().expect("tempdir");
|
||||
let tmpfile = dir.path().join("test.tmp");
|
||||
c.bench_function("write", |b| {
|
||||
b.iter_batched(
|
||||
|| {
|
||||
let src = gen_data(N).collect::<Vec<TestData>>().into_iter();
|
||||
let writer = prepare_db_writer(&tmpfile);
|
||||
(src, writer)
|
||||
},
|
||||
|(mut src, mut writer)| {
|
||||
writer.load(&mut src).unwrap();
|
||||
writer.finish().unwrap();
|
||||
},
|
||||
BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
fn config() -> Criterion {
|
||||
Criterion::default().sample_size(40)
|
||||
}
|
||||
|
||||
criterion_group! {name=benches; config = config(); targets = db_read, db_write}
|
||||
criterion_main!(benches);
|
||||
270
app/src/main.rs
Normal file
270
app/src/main.rs
Normal file
@@ -0,0 +1,270 @@
|
||||
#[macro_use]
|
||||
extern crate serde_json;
|
||||
use clap::{Parser, Subcommand};
|
||||
use rand::seq::IteratorRandom;
|
||||
|
||||
use std::io;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Instant;
|
||||
use std::{fs, sync::mpsc, thread};
|
||||
|
||||
use ledb::{Options, Storage};
|
||||
|
||||
use chgk_ledb_lib::db;
|
||||
use chgk_ledb_lib::questions;
|
||||
use chgk_ledb_lib::source;
|
||||
|
||||
use crate::questions::{Question, QuestionsConverter};
|
||||
use crate::source::ReadSourceQuestionsBatches;
|
||||
|
||||
const ZIP_FILENAME: &str = "json.zip";
|
||||
const NEW_DB_FILENAME: &str = "test.bin";
|
||||
const DB_DIR: &str = "db";
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum Command {
|
||||
Write,
|
||||
Compact,
|
||||
Print {
|
||||
#[clap(value_parser, default_value = "0")]
|
||||
id: u32,
|
||||
},
|
||||
ZipPrint {
|
||||
#[clap(value_parser, default_value = "0")]
|
||||
file_num: usize,
|
||||
#[clap(value_parser, default_value = "0")]
|
||||
num: usize,
|
||||
},
|
||||
Write2,
|
||||
Print2 {
|
||||
#[clap(value_parser, default_value = "0")]
|
||||
id: u32,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[clap(author, version, about, long_about = None)]
|
||||
#[clap(propagate_version = true)]
|
||||
struct Cli {
|
||||
#[clap(subcommand)]
|
||||
command: Command,
|
||||
#[clap(short, long, action)]
|
||||
measure: bool,
|
||||
}
|
||||
|
||||
fn zip_reader_task(tx: mpsc::Sender<Question>) {
|
||||
let zip_file = fs::File::open(ZIP_FILENAME).unwrap();
|
||||
let zip_reader = io::BufReader::new(zip_file);
|
||||
let archive = zip::ZipArchive::new(zip_reader).unwrap();
|
||||
let mut source_questions = archive.source_questions();
|
||||
|
||||
let questions = source_questions
|
||||
.convert()
|
||||
.enumerate()
|
||||
.map(|(num, mut question)| {
|
||||
question.num = 1 + num as u32;
|
||||
question
|
||||
});
|
||||
for question in questions {
|
||||
let res = tx.send(question);
|
||||
if res.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
println!("read done");
|
||||
}
|
||||
fn db_writer_task(rx: mpsc::Receiver<Question>) {
|
||||
let out_file: PathBuf = [DB_DIR, "data.mdb"].into_iter().collect();
|
||||
match fs::metadata(&out_file) {
|
||||
Ok(x) if x.is_file() => {
|
||||
fs::remove_file(&out_file).unwrap();
|
||||
println!(r#""{}" removed"#, out_file.to_str().unwrap());
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
|
||||
let options: Options = serde_json::from_value(json!({
|
||||
"map_size": 900 * 1024 * 1024, // 900mb
|
||||
"write_map": true,
|
||||
"map_async": true,
|
||||
"no_lock": true,
|
||||
"no_meta_sync": true,
|
||||
"no_sync": true,
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let storage = Storage::new(DB_DIR, options).unwrap();
|
||||
let collection = storage.collection("questions").unwrap();
|
||||
|
||||
let count = collection.load(rx).expect("load");
|
||||
|
||||
println!("loaded {count}");
|
||||
|
||||
println!("syncing to disk...");
|
||||
storage.sync(true).unwrap();
|
||||
|
||||
print!("stats: ");
|
||||
let stats = storage.stat().unwrap();
|
||||
println!("{:?}", stats);
|
||||
|
||||
drop(storage);
|
||||
println!("write done");
|
||||
}
|
||||
|
||||
fn write_db() {
|
||||
let (tx, rx) = mpsc::channel::<Question>();
|
||||
[
|
||||
thread::spawn(move || zip_reader_task(tx)),
|
||||
thread::spawn(move || db_writer_task(rx)),
|
||||
]
|
||||
.into_iter()
|
||||
.for_each(|handle| handle.join().expect("thread panic"));
|
||||
println!("all done");
|
||||
}
|
||||
|
||||
fn print_question_from<F>(get_q: F)
|
||||
where
|
||||
F: FnOnce() -> Option<Question>,
|
||||
{
|
||||
let q = get_q().unwrap();
|
||||
println!("{:#?}", q)
|
||||
}
|
||||
|
||||
fn read_from_zip(file_num: usize, mut num: usize) -> Option<Question> {
|
||||
let mut rng = rand::thread_rng();
|
||||
let zip_file = fs::File::open(ZIP_FILENAME).unwrap();
|
||||
let zip_reader = io::BufReader::new(zip_file);
|
||||
let archive = zip::ZipArchive::new(zip_reader).unwrap();
|
||||
|
||||
let mut source_questions = archive.source_questions();
|
||||
let (filename, batch) = if file_num == 0 {
|
||||
source_questions.choose(&mut rng).unwrap()
|
||||
} else {
|
||||
source_questions.nth(file_num - 1).unwrap()
|
||||
};
|
||||
let mut batch = batch.unwrap();
|
||||
batch.filename = filename;
|
||||
let questions: Vec<Question> = batch.into();
|
||||
if num == 0 {
|
||||
num = (1..=questions.len()).choose(&mut rng).unwrap();
|
||||
}
|
||||
Some(questions[num - 1].clone())
|
||||
}
|
||||
|
||||
fn compact_db() {
|
||||
let options: Options = serde_json::from_value(json!({
|
||||
"write_map": true,
|
||||
"map_async": true,
|
||||
"no_lock": true,
|
||||
"no_meta_sync": true,
|
||||
"no_sync": true,
|
||||
"compact": true,
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let storage = Storage::new(DB_DIR, options).unwrap();
|
||||
|
||||
storage.sync(true).unwrap();
|
||||
let stats = storage.stat().unwrap();
|
||||
println!("{:?}", stats);
|
||||
drop(storage);
|
||||
}
|
||||
|
||||
fn read_from_db(mut id: u32) -> Option<Question> {
|
||||
let options: Options = serde_json::from_value(json!({
|
||||
"read_only": true,
|
||||
"map_async": true,
|
||||
"no_lock": true,
|
||||
}))
|
||||
.unwrap();
|
||||
|
||||
let storage = Storage::new(DB_DIR, options).unwrap();
|
||||
let collection = storage.collection("questions").unwrap();
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
if id == 0 {
|
||||
let last_id = collection.last_id().unwrap();
|
||||
id = (1..=last_id).choose(&mut rng).unwrap();
|
||||
}
|
||||
|
||||
collection.get::<Question>(id).unwrap()
|
||||
}
|
||||
|
||||
// measure and return time elapsed in `func` in seconds
|
||||
pub fn measure<F: FnOnce()>(func: F) -> f64 {
|
||||
let start = Instant::now();
|
||||
func();
|
||||
let elapsed = start.elapsed();
|
||||
(elapsed.as_secs() as f64) + (elapsed.subsec_nanos() as f64 / 1_000_000_000.0)
|
||||
}
|
||||
|
||||
pub fn measure_and_print<F: FnOnce()>(func: F) {
|
||||
let m = measure(func);
|
||||
eprintln!("{}", m);
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args = Cli::parse();
|
||||
|
||||
let mut action: Box<dyn FnOnce()> = match &args.command {
|
||||
Command::Write => Box::new(write_db),
|
||||
Command::Compact => Box::new(compact_db),
|
||||
Command::Print { id } => {
|
||||
let get_question = Box::new(|| read_from_db(*id));
|
||||
Box::new(|| print_question_from(get_question))
|
||||
}
|
||||
Command::ZipPrint { file_num, num } => {
|
||||
let get_question = Box::new(|| read_from_zip(*file_num, *num));
|
||||
Box::new(|| print_question_from(get_question))
|
||||
}
|
||||
Command::Write2 => Box::new(write_db2),
|
||||
Command::Print2 { id } => {
|
||||
let get_question = Box::new(|| read_from_db2(*id));
|
||||
Box::new(|| print_question_from(get_question))
|
||||
}
|
||||
};
|
||||
|
||||
if args.measure {
|
||||
action = Box::new(|| measure_and_print(action));
|
||||
}
|
||||
|
||||
action();
|
||||
}
|
||||
|
||||
fn read_from_db2(id: u32) -> Option<Question> {
|
||||
let mut reader: db::Reader<Question> =
|
||||
db::Reader::new(NEW_DB_FILENAME, 2048).expect("new db reader");
|
||||
|
||||
let mut questions = reader.iter();
|
||||
|
||||
match id {
|
||||
0 => {
|
||||
let mut rng = rand::thread_rng();
|
||||
questions.choose(&mut rng)
|
||||
}
|
||||
_ => questions.nth((id - 1) as usize),
|
||||
}
|
||||
}
|
||||
fn write_db2() {
|
||||
let (tx, rx) = mpsc::channel::<Question>();
|
||||
[
|
||||
thread::spawn(move || zip_reader_task(tx)),
|
||||
thread::spawn(move || db_writer2_task(rx)),
|
||||
]
|
||||
.into_iter()
|
||||
.for_each(|handle| handle.join().expect("thread panic"));
|
||||
println!("all done");
|
||||
}
|
||||
fn db_writer2_task(rx: mpsc::Receiver<Question>) {
|
||||
let writer_opts = db::WriterOpts::default();
|
||||
let mut writer: db::Writer<Question> =
|
||||
db::Writer::new(NEW_DB_FILENAME, writer_opts).expect("new db writer");
|
||||
|
||||
writer
|
||||
.load(&mut rx.iter())
|
||||
.unwrap_or_else(|e| panic!("db writer load, {e:#?}"));
|
||||
|
||||
writer.finish().expect("db writer finish");
|
||||
|
||||
println!("write done");
|
||||
}
|
||||
Reference in New Issue
Block a user