pallet_stateful_storage/
types.rs

1//! Types for the Stateful Storage Pallet
2use crate::Config;
3use alloc::boxed::Box;
4use common_primitives::{
5	node::EIP712Encode,
6	schema::SchemaId,
7	stateful_storage::{PageHash, PageId, PageNonce},
8};
9use frame_support::pallet_prelude::*;
10use frame_system::pallet_prelude::*;
11use lazy_static::lazy_static;
12use parity_scale_codec::{Decode, Encode, MaxEncodedLen};
13use scale_info::TypeInfo;
14use sp_core::bounded::BoundedVec;
15extern crate alloc;
16use alloc::{collections::btree_map::BTreeMap, vec::Vec};
17use core::{
18	cmp::*,
19	fmt::Debug,
20	hash::{Hash, Hasher},
21};
22use sp_core::U256;
23
24use common_primitives::{signatures::get_eip712_encoding_prefix, utils::to_abi_compatible_number};
25use twox_hash::XxHash64;
26
27/// Migration page size
28pub const MIGRATION_PAGE_SIZE: u32 = 20;
29/// Current storage version of the pallet.
30pub const STATEFUL_STORAGE_VERSION: StorageVersion = StorageVersion::new(1);
31/// pallet storage prefix
32pub const PALLET_STORAGE_PREFIX: &[u8] = b"stateful-storage";
33/// itemized storage prefix
34pub const ITEMIZED_STORAGE_PREFIX: &[u8] = b"itemized";
35/// paginated storage prefix
36pub const PAGINATED_STORAGE_PREFIX: &[u8] = b"paginated";
37
38/// MultipartKey type for Itemized storage
39pub type ItemizedKey = (SchemaId,);
40/// MultipartKey type for Paginated storage (full key)
41pub type PaginatedKey = (SchemaId, PageId);
42/// MultipartKey type for Paginated storage (prefix lookup)
43pub type PaginatedPrefixKey = (SchemaId,);
44/// Itemized page type
45pub type ItemizedPage<T> = Page<<T as Config>::MaxItemizedPageSizeBytes>;
46/// Paginated Page type
47pub type PaginatedPage<T> = Page<<T as Config>::MaxPaginatedPageSizeBytes>;
48
49/// Operations on Itemized storage
50pub trait ItemizedOperations<T: Config> {
51	/// Applies all actions to specified page and returns the updated page
52	fn apply_item_actions(
53		&self,
54		actions: &[ItemAction<T::MaxItemizedBlobSizeBytes>],
55	) -> Result<ItemizedPage<T>, PageError>;
56
57	/// Parses all the items inside an ItemPage
58	fn try_parse(&self, include_header: bool) -> Result<ParsedItemPage, PageError>;
59}
60/// Defines the actions that can be applied to an Itemized storage
61#[derive(
62	Clone, Encode, Decode, DecodeWithMemTracking, Debug, TypeInfo, MaxEncodedLen, PartialEq,
63)]
64#[scale_info(skip_type_params(DataSize))]
65#[codec(mel_bound(DataSize: MaxEncodedLen))]
66pub enum ItemAction<DataSize: Get<u32> + Clone + core::fmt::Debug + PartialEq> {
67	/// Adding new Item into page
68	Add {
69		/// The data to add
70		data: BoundedVec<u8, DataSize>,
71	},
72	/// Removing an existing item by index number. Index number starts from 0
73	Delete {
74		/// Index (0+) to delete
75		index: u16,
76	},
77}
78
79/// This header is used to specify the byte size of an item stored inside the buffer
80/// All items will require this header to be inserted before the item data
81#[derive(Encode, Decode, PartialEq, MaxEncodedLen, Debug)]
82pub struct ItemHeader {
83	/// The length of this item, not including the size of this header.
84	pub payload_len: u16,
85}
86
87/// Errors dedicated to parsing or modifying pages
88#[derive(Debug, PartialEq)]
89pub enum PageError {
90	/// Unable to decode the data in the item
91	ErrorParsing(&'static str),
92	/// Add or Delete Operation was not possible
93	InvalidAction(&'static str),
94	/// ItemPage count overflow catch
95	ArithmeticOverflow,
96	/// Page byte length over the max size
97	PageSizeOverflow,
98}
99
100// REMOVED ItemizedSignaturePayload
101
102/// Payload containing all necessary fields to verify Itemized related signatures
103#[derive(
104	Encode,
105	Decode,
106	DecodeWithMemTracking,
107	TypeInfo,
108	MaxEncodedLen,
109	PartialEq,
110	RuntimeDebugNoBound,
111	Clone,
112)]
113#[scale_info(skip_type_params(T))]
114pub struct ItemizedSignaturePayloadV2<T: Config> {
115	/// Schema id of this storage
116	#[codec(compact)]
117	pub schema_id: SchemaId,
118
119	/// Hash of targeted page to avoid race conditions
120	#[codec(compact)]
121	pub target_hash: PageHash,
122
123	/// The block number at which the signed proof will expire
124	pub expiration: BlockNumberFor<T>,
125
126	/// Actions to apply to storage from possible: [`ItemAction`]
127	pub actions: BoundedVec<
128		ItemAction<<T as Config>::MaxItemizedBlobSizeBytes>,
129		<T as Config>::MaxItemizedActionsCount,
130	>,
131}
132
133impl<T: Config> EIP712Encode for ItemizedSignaturePayloadV2<T> {
134	fn encode_eip_712(&self, chain_id: u32) -> Box<[u8]> {
135		lazy_static! {
136			// signed payload
137			static ref MAIN_TYPE_HASH: [u8; 32] =
138				sp_io::hashing::keccak_256(b"ItemizedSignaturePayloadV2(uint16 schemaId,uint32 targetHash,uint32 expiration,ItemAction[] actions)ItemAction(string actionType,bytes data,uint16 index)");
139
140			static ref SUB_TYPE_HASH: [u8; 32] =
141				sp_io::hashing::keccak_256(b"ItemAction(string actionType,bytes data,uint16 index)");
142
143			static ref ITEM_ACTION_ADD: [u8; 32] = sp_io::hashing::keccak_256(b"Add");
144			static ref ITEM_ACTION_DELETE: [u8; 32] = sp_io::hashing::keccak_256(b"Delete");
145
146			static ref EMPTY_BYTES_HASH: [u8; 32] = sp_io::hashing::keccak_256([].as_slice());
147		}
148		// get prefix and domain separator
149		let prefix_domain_separator: Box<[u8]> =
150			get_eip712_encoding_prefix("0xcccccccccccccccccccccccccccccccccccccccc", chain_id);
151		let coded_schema_id = to_abi_compatible_number(self.schema_id);
152		let coded_target_hash = to_abi_compatible_number(self.target_hash);
153		let expiration: U256 = self.expiration.into();
154		let coded_expiration = to_abi_compatible_number(expiration.as_u128());
155		let coded_actions = {
156			let values: Vec<u8> = self
157				.actions
158				.iter()
159				.flat_map(|a| match a {
160					ItemAction::Add { data } => sp_io::hashing::keccak_256(
161						&[
162							SUB_TYPE_HASH.as_slice(),
163							ITEM_ACTION_ADD.as_slice(),
164							&sp_io::hashing::keccak_256(data.as_slice()),
165							[0u8; 32].as_slice(),
166						]
167						.concat(),
168					),
169					ItemAction::Delete { index } => sp_io::hashing::keccak_256(
170						&[
171							SUB_TYPE_HASH.as_slice(),
172							ITEM_ACTION_DELETE.as_slice(),
173							EMPTY_BYTES_HASH.as_slice(),
174							to_abi_compatible_number(*index).as_slice(),
175						]
176						.concat(),
177					),
178				})
179				.collect();
180			sp_io::hashing::keccak_256(&values)
181		};
182		let message = sp_io::hashing::keccak_256(
183			&[
184				MAIN_TYPE_HASH.as_slice(),
185				&coded_schema_id,
186				&coded_target_hash,
187				&coded_expiration,
188				&coded_actions,
189			]
190			.concat(),
191		);
192		let combined = [prefix_domain_separator.as_ref(), &message].concat();
193		combined.into_boxed_slice()
194	}
195}
196
197// REMOVED PaginatedSignaturePayload
198
199/// Payload containing all necessary fields to verify signatures to upsert a Paginated storage
200#[derive(
201	Encode,
202	Decode,
203	DecodeWithMemTracking,
204	TypeInfo,
205	MaxEncodedLen,
206	PartialEq,
207	RuntimeDebugNoBound,
208	Clone,
209)]
210#[scale_info(skip_type_params(T))]
211pub struct PaginatedUpsertSignaturePayloadV2<T: Config> {
212	/// Schema id of this storage
213	#[codec(compact)]
214	pub schema_id: SchemaId,
215
216	/// Page id of this storage
217	#[codec(compact)]
218	pub page_id: PageId,
219
220	/// Hash of targeted page to avoid race conditions
221	#[codec(compact)]
222	pub target_hash: PageHash,
223
224	/// The block number at which the signed proof will expire
225	pub expiration: BlockNumberFor<T>,
226
227	/// payload to update the page with
228	pub payload: BoundedVec<u8, <T as Config>::MaxPaginatedPageSizeBytes>,
229}
230
231impl<T: Config> EIP712Encode for PaginatedUpsertSignaturePayloadV2<T> {
232	fn encode_eip_712(&self, chain_id: u32) -> Box<[u8]> {
233		lazy_static! {
234			// signed payload
235			static ref MAIN_TYPE_HASH: [u8; 32] =
236				sp_io::hashing::keccak_256(b"PaginatedUpsertSignaturePayloadV2(uint16 schemaId,uint16 pageId,uint32 targetHash,uint32 expiration,bytes payload)");
237		}
238		// get prefix and domain separator
239		let prefix_domain_separator: Box<[u8]> =
240			get_eip712_encoding_prefix("0xcccccccccccccccccccccccccccccccccccccccc", chain_id);
241		let coded_schema_id = to_abi_compatible_number(self.schema_id);
242		let coded_page_id = to_abi_compatible_number(self.page_id);
243		let coded_target_hash = to_abi_compatible_number(self.target_hash);
244		let expiration: U256 = self.expiration.into();
245		let coded_expiration = to_abi_compatible_number(expiration.as_u128());
246		let coded_payload = sp_io::hashing::keccak_256(self.payload.as_slice());
247		let message = sp_io::hashing::keccak_256(
248			&[
249				MAIN_TYPE_HASH.as_slice(),
250				&coded_schema_id,
251				&coded_page_id,
252				&coded_target_hash,
253				&coded_expiration,
254				&coded_payload,
255			]
256			.concat(),
257		);
258		let combined = [prefix_domain_separator.as_ref(), &message].concat();
259		combined.into_boxed_slice()
260	}
261}
262
263// REMOVED PaginatedDeleteSignaturePayload
264
265/// Payload containing all necessary fields to verify signatures to delete a Paginated storage
266#[derive(
267	Encode,
268	Decode,
269	DecodeWithMemTracking,
270	TypeInfo,
271	MaxEncodedLen,
272	PartialEq,
273	RuntimeDebugNoBound,
274	Clone,
275)]
276#[scale_info(skip_type_params(T))]
277pub struct PaginatedDeleteSignaturePayloadV2<T: Config> {
278	/// Schema id of this storage
279	#[codec(compact)]
280	pub schema_id: SchemaId,
281
282	/// Page id of this storage
283	#[codec(compact)]
284	pub page_id: PageId,
285
286	/// Hash of targeted page to avoid race conditions
287	#[codec(compact)]
288	pub target_hash: PageHash,
289
290	/// The block number at which the signed proof will expire
291	pub expiration: BlockNumberFor<T>,
292}
293
294impl<T: Config> EIP712Encode for PaginatedDeleteSignaturePayloadV2<T> {
295	fn encode_eip_712(&self, chain_id: u32) -> Box<[u8]> {
296		lazy_static! {
297			// signed payload
298			static ref MAIN_TYPE_HASH: [u8; 32] =
299				sp_io::hashing::keccak_256(b"PaginatedDeleteSignaturePayloadV2(uint16 schemaId,uint16 pageId,uint32 targetHash,uint32 expiration)");
300		}
301		// get prefix and domain separator
302		let prefix_domain_separator: Box<[u8]> =
303			get_eip712_encoding_prefix("0xcccccccccccccccccccccccccccccccccccccccc", chain_id);
304		let coded_schema_id = to_abi_compatible_number(self.schema_id);
305		let coded_page_id = to_abi_compatible_number(self.page_id);
306		let coded_target_hash = to_abi_compatible_number(self.target_hash);
307		let expiration: U256 = self.expiration.into();
308		let coded_expiration = to_abi_compatible_number(expiration.as_u128());
309		let message = sp_io::hashing::keccak_256(
310			&[
311				MAIN_TYPE_HASH.as_slice(),
312				&coded_schema_id,
313				&coded_page_id,
314				&coded_target_hash,
315				&coded_expiration,
316			]
317			.concat(),
318		);
319		let combined = [prefix_domain_separator.as_ref(), &message].concat();
320		combined.into_boxed_slice()
321	}
322}
323
324/// A generic page of data which supports both Itemized and Paginated
325#[derive(Encode, Decode, TypeInfo, MaxEncodedLen, Debug, Default)]
326#[scale_info(skip_type_params(PageDataSize))]
327#[codec(mel_bound(PageDataSize: MaxEncodedLen))]
328pub struct Page<PageDataSize: Get<u32>> {
329	/// Incremental nonce to eliminate of signature replay attacks
330	pub nonce: PageNonce,
331	/// Data for the page
332	/// - Itemized is limited by [`Config::MaxItemizedPageSizeBytes`]
333	/// - Paginated is limited by [`Config::MaxPaginatedPageSizeBytes`]
334	pub data: BoundedVec<u8, PageDataSize>,
335}
336
337/// An internal struct which contains the parsed items in a page
338#[derive(Debug, PartialEq)]
339pub struct ParsedItemPage<'a> {
340	/// Page current size
341	pub page_size: usize,
342	/// A map of item index to a slice of blob (including header is optional)
343	pub items: BTreeMap<u16, &'a [u8]>,
344}
345
346impl<PageDataSize: Get<u32>> Page<PageDataSize> {
347	/// Check if the page is empty
348	pub fn is_empty(&self) -> bool {
349		self.data.is_empty()
350	}
351
352	/// Retrieve the hash of the page
353	pub fn get_hash(&self) -> PageHash {
354		if self.is_empty() {
355			return PageHash::default();
356		}
357		let mut hasher = XxHash64::with_seed(0);
358		self.hash(&mut hasher);
359		let value_bytes: [u8; 4] =
360			hasher.finish().to_be_bytes()[..4].try_into().expect("incorrect hash size");
361		PageHash::from_be_bytes(value_bytes)
362	}
363}
364
365/// PartialEq and Hash should be both derived or implemented manually based on clippy rules
366impl<PageDataSize: Get<u32>> Hash for Page<PageDataSize> {
367	fn hash<H: Hasher>(&self, state: &mut H) {
368		state.write(&self.nonce.encode());
369		state.write(&self.data[..]);
370	}
371}
372
373/// PartialEq and Hash should be both derived or implemented manually based on clippy rules
374impl<PageDataSize: Get<u32>> PartialEq for Page<PageDataSize> {
375	fn eq(&self, other: &Self) -> bool {
376		self.nonce.eq(&other.nonce) && self.data.eq(&other.data)
377	}
378}
379
380/// Deserializing a Page from a BoundedVec is used for the input payload--
381/// so there is no nonce to be read, just the raw data.
382impl<PageDataSize: Get<u32>> From<BoundedVec<u8, PageDataSize>> for Page<PageDataSize> {
383	fn from(bounded: BoundedVec<u8, PageDataSize>) -> Self {
384		Self { nonce: PageNonce::default(), data: bounded }
385	}
386}
387
388/// Deserializing a Page from a `Vec<u8>` is used for reading from storage--
389/// so we must first read the nonce, then the data payload.
390impl<PageDataSize: Get<u32>> TryFrom<Vec<u8>> for Page<PageDataSize> {
391	type Error = ();
392
393	fn try_from(data: Vec<u8>) -> Result<Self, Self::Error> {
394		let nonce: PageNonce =
395			PageNonce::decode(&mut &data[..PageNonce::max_encoded_len()]).map_err(|_| ())?;
396		let bounded: BoundedVec<u8, PageDataSize> =
397			BoundedVec::try_from(data[PageNonce::max_encoded_len()..].to_vec()).map_err(|_| ())?;
398		Ok(Self { nonce, data: bounded })
399	}
400}
401
402impl<T: Config> ItemizedOperations<T> for ItemizedPage<T> {
403	/// Applies all actions to specified page and returns the updated page
404	/// This has O(n) complexity when n is the number of all the bytes in that itemized storage
405	fn apply_item_actions(
406		&self,
407		actions: &[ItemAction<T::MaxItemizedBlobSizeBytes>],
408	) -> Result<Self, PageError> {
409		let mut parsed = ItemizedOperations::<T>::try_parse(self, true)?;
410
411		let mut updated_page_buffer = Vec::with_capacity(parsed.page_size);
412		let mut add_buffer = Vec::new();
413
414		for action in actions {
415			match action {
416				ItemAction::Delete { index } => {
417					ensure!(
418						parsed.items.contains_key(index),
419						PageError::InvalidAction("item index is invalid")
420					);
421					parsed.items.remove(index);
422				},
423				ItemAction::Add { data } => {
424					let header = ItemHeader {
425						payload_len: data
426							.len()
427							.try_into()
428							.map_err(|_| PageError::InvalidAction("invalid payload size"))?,
429					};
430					add_buffer.extend_from_slice(&header.encode()[..]);
431					add_buffer.extend_from_slice(&data[..]);
432				},
433			}
434		}
435
436		// since BTreeMap is sorted by key, all items will be kept in their existing order
437		for (_, slice) in parsed.items.iter() {
438			updated_page_buffer.extend_from_slice(slice);
439		}
440		updated_page_buffer.append(&mut add_buffer);
441
442		Ok(ItemizedPage::<T>::from(
443			BoundedVec::try_from(updated_page_buffer).map_err(|_| PageError::PageSizeOverflow)?,
444		))
445	}
446
447	/// Parses all the items inside an ItemPage
448	/// This has O(n) complexity when n is the number of all the bytes in that itemized storage
449	fn try_parse(&self, include_header: bool) -> Result<ParsedItemPage, PageError> {
450		let mut count = 0u16;
451		let mut items = BTreeMap::new();
452		let mut offset = 0;
453		while offset < self.data.len() {
454			ensure!(
455				offset + ItemHeader::max_encoded_len() <= self.data.len(),
456				PageError::ErrorParsing("wrong header size")
457			);
458			let header = <ItemHeader>::decode(&mut &self.data[offset..])
459				.map_err(|_| PageError::ErrorParsing("decoding header"))?;
460			let item_total_length = ItemHeader::max_encoded_len() + header.payload_len as usize;
461			ensure!(
462				offset + item_total_length <= self.data.len(),
463				PageError::ErrorParsing("wrong payload size")
464			);
465
466			items.insert(
467				count,
468				match include_header {
469					true => &self.data[offset..(offset + item_total_length)],
470					false =>
471						&self.data
472							[(offset + ItemHeader::max_encoded_len())..(offset + item_total_length)],
473				},
474			);
475			offset += item_total_length;
476			count = count.checked_add(1).ok_or(PageError::ArithmeticOverflow)?;
477		}
478
479		Ok(ParsedItemPage { page_size: self.data.len(), items })
480	}
481}