1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
// Copyright 2021-2022 Parity Technologies (UK) Ltd.
// This file is dual-licensed as Apache-2.0 or MIT.

use crate::{
	column::{ColId, Salt},
	compress::CompressionType,
	error::{try_io, Error, Result},
};
use rand::Rng;
use std::{collections::HashMap, path::Path};

pub const CURRENT_VERSION: u32 = 8;
// TODO on last supported 5, remove MULTIHEAD_V4 and MULTIPART_V4
// TODO on last supported 8, remove XOR with salt in column::hash
const LAST_SUPPORTED_VERSION: u32 = 4;

pub const DEFAULT_COMPRESSION_THRESHOLD: u32 = 4096;

/// Database configuration.
#[derive(Clone, Debug)]
pub struct Options {
	/// Database path.
	pub path: std::path::PathBuf,
	/// Column settings
	pub columns: Vec<ColumnOptions>,
	/// fsync WAL to disk before committing any changes. Provides extra consistency
	/// guarantees. On by default.
	pub sync_wal: bool,
	/// fsync/msync data to disk before removing logs. Provides crash resistance guarantee.
	/// On by default.
	pub sync_data: bool,
	/// Collect database statistics. May have effect on performance.
	pub stats: bool,
	/// Override salt value. If `None` is specified salt is loaded from metadata
	/// or randomly generated when creating a new database.
	pub salt: Option<Salt>,
	/// Minimal value size threshold to attempt compressing a value per column.
	///
	/// Optional. A sensible default is used if nothing is set for a given column.
	pub compression_threshold: HashMap<ColId, u32>,
	#[cfg(any(test, feature = "instrumentation"))]
	/// Always starts background threads.
	pub with_background_thread: bool,
	#[cfg(any(test, feature = "instrumentation"))]
	/// Always flushes data from the log to the on-disk data structures.
	pub always_flush: bool,
}

/// Database column configuration.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ColumnOptions {
	/// Indicates that the column value is the preimage of the key.
	/// This implies that a given value always has the same key.
	/// Enables some optimizations.
	pub preimage: bool,
	/// Indicates that the keys are at least 32 bytes and
	/// the first 32 bytes have uniform distribution.
	/// Allows for skipping additional key hashing.
	pub uniform: bool,
	/// Use reference counting for values. Reference operations are allowed for a column. The value
	/// is deleted when the counter reaches zero.
	pub ref_counted: bool,
	/// Compression to use for this column.
	pub compression: CompressionType,
	/// Column is configured to use Btree storage. Btree columns allow for ordered key iteration
	/// and key retrieval, but are significantly less performant and require more disk space.
	pub btree_index: bool,
}

/// Database metadata.
#[derive(Clone, Debug)]
pub struct Metadata {
	/// Salt value.
	pub salt: Salt,
	/// Database version.
	pub version: u32,
	/// Column metadata.
	pub columns: Vec<ColumnOptions>,
}

impl ColumnOptions {
	fn as_string(&self) -> String {
		format!(
			"preimage: {}, uniform: {}, refc: {}, compression: {}, ordered: {}",
			self.preimage, self.uniform, self.ref_counted, self.compression as u8, self.btree_index,
		)
	}

	pub fn is_valid(&self) -> bool {
		if self.ref_counted && !self.preimage {
			log::error!(target: "parity-db", "Using `ref_counted` option without `preimage` enabled is not supported");
			return false
		}
		true
	}

	fn from_string(s: &str) -> Option<Self> {
		let mut split = s.split("sizes: ");
		let vals = split.next()?;

		let vals: HashMap<&str, &str> = vals
			.split(", ")
			.filter_map(|s| {
				let mut pair = s.split(": ");
				Some((pair.next()?, pair.next()?))
			})
			.collect();

		let preimage = vals.get("preimage")?.parse().ok()?;
		let uniform = vals.get("uniform")?.parse().ok()?;
		let ref_counted = vals.get("refc")?.parse().ok()?;
		let compression: u8 = vals.get("compression").and_then(|c| c.parse().ok()).unwrap_or(0);
		let btree_index = vals.get("ordered").and_then(|c| c.parse().ok()).unwrap_or(false);

		Some(ColumnOptions {
			preimage,
			uniform,
			ref_counted,
			compression: compression.into(),
			btree_index,
		})
	}
}

impl Default for ColumnOptions {
	fn default() -> ColumnOptions {
		ColumnOptions {
			preimage: false,
			uniform: false,
			ref_counted: false,
			compression: CompressionType::NoCompression,
			btree_index: false,
		}
	}
}

impl Options {
	pub fn with_columns(path: &Path, num_columns: u8) -> Options {
		Options {
			path: path.into(),
			sync_wal: true,
			sync_data: true,
			stats: true,
			salt: None,
			columns: (0..num_columns).map(|_| Default::default()).collect(),
			compression_threshold: HashMap::new(),
			#[cfg(any(test, feature = "instrumentation"))]
			with_background_thread: true,
			#[cfg(any(test, feature = "instrumentation"))]
			always_flush: false,
		}
	}

	// TODO on next major version remove in favor of write_metadata_with_version
	pub fn write_metadata(&self, path: &Path, salt: &Salt) -> Result<()> {
		self.write_metadata_with_version(path, salt, None)
	}

	// TODO on next major version remove in favor of write_metadata_with_version
	pub fn write_metadata_file(&self, path: &Path, salt: &Salt) -> Result<()> {
		self.write_metadata_file_with_version(path, salt, None)
	}

	pub fn write_metadata_with_version(
		&self,
		path: &Path,
		salt: &Salt,
		version: Option<u32>,
	) -> Result<()> {
		let mut path = path.to_path_buf();
		path.push("metadata");
		self.write_metadata_file_with_version(&path, salt, version)
	}

	pub fn write_metadata_file_with_version(
		&self,
		path: &Path,
		salt: &Salt,
		version: Option<u32>,
	) -> Result<()> {
		let mut metadata = vec![
			format!("version={}", version.unwrap_or(CURRENT_VERSION)),
			format!("salt={}", hex::encode(salt)),
		];
		for i in 0..self.columns.len() {
			metadata.push(format!("col{}={}", i, self.columns[i].as_string()));
		}
		try_io!(std::fs::write(path, metadata.join("\n")));
		Ok(())
	}

	pub fn load_and_validate_metadata(&self, create: bool) -> Result<Metadata> {
		let meta = Self::load_metadata(&self.path)?;

		if let Some(meta) = meta {
			if meta.columns.len() != self.columns.len() {
				return Err(Error::InvalidConfiguration(format!(
					"Column config mismatch. Expected {} columns, got {}",
					self.columns.len(),
					meta.columns.len()
				)))
			}

			for c in 0..meta.columns.len() {
				if meta.columns[c] != self.columns[c] {
					return Err(Error::IncompatibleColumnConfig {
						id: c as ColId,
						reason: format!(
							"Column config mismatch. Expected \"{}\", got \"{}\"",
							self.columns[c].as_string(),
							meta.columns[c].as_string(),
						),
					})
				}
			}
			Ok(meta)
		} else if create {
			let s: Salt = self.salt.unwrap_or_else(|| rand::thread_rng().gen());
			self.write_metadata(&self.path, &s)?;
			Ok(Metadata { version: CURRENT_VERSION, columns: self.columns.clone(), salt: s })
		} else {
			Err(Error::DatabaseNotFound)
		}
	}

	pub fn load_metadata(path: &Path) -> Result<Option<Metadata>> {
		let mut path = path.to_path_buf();
		path.push("metadata");
		Self::load_metadata_file(&path)
	}

	pub fn load_metadata_file(path: &Path) -> Result<Option<Metadata>> {
		use std::{io::BufRead, str::FromStr};

		if !path.exists() {
			return Ok(None)
		}
		let file = std::io::BufReader::new(try_io!(std::fs::File::open(path)));
		let mut salt = None;
		let mut columns = Vec::new();
		let mut version = 0;
		for l in file.lines() {
			let l = try_io!(l);
			let mut vals = l.split('=');
			let k = vals.next().ok_or_else(|| Error::Corruption("Bad metadata".into()))?;
			let v = vals.next().ok_or_else(|| Error::Corruption("Bad metadata".into()))?;
			if k == "version" {
				version =
					u32::from_str(v).map_err(|_| Error::Corruption("Bad version string".into()))?;
			} else if k == "salt" {
				let salt_slice =
					hex::decode(v).map_err(|_| Error::Corruption("Bad salt string".into()))?;
				let mut s = Salt::default();
				s.copy_from_slice(&salt_slice);
				salt = Some(s);
			} else if k.starts_with("col") {
				let col = ColumnOptions::from_string(v)
					.ok_or_else(|| Error::Corruption("Bad column metadata".into()))?;
				columns.push(col);
			}
		}
		if version < LAST_SUPPORTED_VERSION {
			return Err(Error::InvalidConfiguration(format!(
				"Unsupported database version {version}. Expected {CURRENT_VERSION}"
			)))
		}
		let salt = salt.ok_or_else(|| Error::InvalidConfiguration("Missing salt value".into()))?;
		Ok(Some(Metadata { version, columns, salt }))
	}

	pub fn is_valid(&self) -> bool {
		for option in self.columns.iter() {
			if !option.is_valid() {
				return false
			}
		}
		true
	}
}

impl Metadata {
	pub fn columns_to_migrate(&self) -> std::collections::BTreeSet<u8> {
		std::collections::BTreeSet::new()
	}
}