Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions api-augment/dist/types/parachain/interfaces/augment-api-query.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,18 @@ declare module '@polkadot/api-base/types/storage' {
* the file. The value is `false` for volunteered-only and `true` for confirmed.
* This map is created when the first BSP volunteers and removed when the storage
* request is cleaned up.
*
* ## Benchmarking note
*
* This is a [`BoundedBTreeMap`] whose PoV cost is always charged at [`MaxEncodedLen`]
* (i.e. assuming `MaxBspVolunteers` entries) regardless of how many BSPs are actually
* present. Currently, all extrinsic paths interact with this map via whole-map
* operations (`get`, `set`, `remove`) and do **not** perform per-BSP storage
* reads/writes, so the number of volunteers has no statistically significant impact
* on ref_time or PoV. If future logic is added that iterates over individual BSPs
* in this map and performs additional storage reads or writes for each one, the
* volunteer count **must** be re-introduced as a `Linear` benchmark component so
* the weight function can account for the per-BSP cost.
**/
storageRequestBsps: AugmentedQuery<ApiType, (arg: H256 | string | Uint8Array) => Observable<Option<BTreeMap<H256, bool>>>, [H256]> & QueryableStorageEntry<ApiType, [H256]>;
/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,18 @@ declare module '@polkadot/api-base/types/storage' {
* the file. The value is `false` for volunteered-only and `true` for confirmed.
* This map is created when the first BSP volunteers and removed when the storage
* request is cleaned up.
*
* ## Benchmarking note
*
* This is a [`BoundedBTreeMap`] whose PoV cost is always charged at [`MaxEncodedLen`]
* (i.e. assuming `MaxBspVolunteers` entries) regardless of how many BSPs are actually
* present. Currently, all extrinsic paths interact with this map via whole-map
* operations (`get`, `set`, `remove`) and do **not** perform per-BSP storage
* reads/writes, so the number of volunteers has no statistically significant impact
* on ref_time or PoV. If future logic is added that iterates over individual BSPs
* in this map and performs additional storage reads or writes for each one, the
* volunteer count **must** be re-introduced as a `Linear` benchmark component so
* the weight function can account for the per-BSP cost.
**/
storageRequestBsps: AugmentedQuery<ApiType, (arg: H256 | string | Uint8Array) => Observable<Option<BTreeMap<H256, bool>>>, [H256]> & QueryableStorageEntry<ApiType, [H256]>;
/**
Expand Down
2 changes: 1 addition & 1 deletion api-augment/metadata-sh-parachain.json

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion api-augment/metadata-sh-solochain-evm.json

Large diffs are not rendered by default.

12 changes: 12 additions & 0 deletions api-augment/src/parachain/interfaces/augment-api-query.ts
Original file line number Diff line number Diff line change
Expand Up @@ -435,6 +435,18 @@ declare module "@polkadot/api-base/types/storage" {
* the file. The value is `false` for volunteered-only and `true` for confirmed.
* This map is created when the first BSP volunteers and removed when the storage
* request is cleaned up.
*
* ## Benchmarking note
*
* This is a [`BoundedBTreeMap`] whose PoV cost is always charged at [`MaxEncodedLen`]
* (i.e. assuming `MaxBspVolunteers` entries) regardless of how many BSPs are actually
* present. Currently, all extrinsic paths interact with this map via whole-map
* operations (`get`, `set`, `remove`) and do **not** perform per-BSP storage
* reads/writes, so the number of volunteers has no statistically significant impact
* on ref_time or PoV. If future logic is added that iterates over individual BSPs
* in this map and performs additional storage reads or writes for each one, the
* volunteer count **must** be re-introduced as a `Linear` benchmark component so
* the weight function can account for the per-BSP cost.
**/
storageRequestBsps: AugmentedQuery<
ApiType,
Expand Down
12 changes: 12 additions & 0 deletions api-augment/src/solochain-evm/interfaces/augment-api-query.ts
Original file line number Diff line number Diff line change
Expand Up @@ -576,6 +576,18 @@ declare module "@polkadot/api-base/types/storage" {
* the file. The value is `false` for volunteered-only and `true` for confirmed.
* This map is created when the first BSP volunteers and removed when the storage
* request is cleaned up.
*
* ## Benchmarking note
*
* This is a [`BoundedBTreeMap`] whose PoV cost is always charged at [`MaxEncodedLen`]
* (i.e. assuming `MaxBspVolunteers` entries) regardless of how many BSPs are actually
* present. Currently, all extrinsic paths interact with this map via whole-map
* operations (`get`, `set`, `remove`) and do **not** perform per-BSP storage
* reads/writes, so the number of volunteers has no statistically significant impact
* on ref_time or PoV. If future logic is added that iterates over individual BSPs
* in this map and performs additional storage reads or writes for each one, the
* volunteer count **must** be re-introduced as a `Linear` benchmark component so
* the weight function can account for the per-BSP cost.
**/
storageRequestBsps: AugmentedQuery<
ApiType,
Expand Down
120 changes: 37 additions & 83 deletions pallets/file-system/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -622,30 +622,20 @@ mod benchmarks {
.unwrap()
},
>,
v: Linear<
1,
{
Into::<u64>::into(T::MaxBspVolunteers::get())
.try_into()
.unwrap()
},
>,
r: Linear<
1,
{
Into::<u64>::into(T::MaxReplicationTarget::get())
.try_into()
.unwrap()
},
>,
) -> Result<(), BenchmarkError> {
/*********** Setup initial conditions: ***********/
// n = buckets, m = file keys accept per bucket, l = file keys reject per bucket, v = volunteers, r = replication target
// n = buckets, m = file keys accept per bucket, l = file keys reject per bucket
let amount_of_buckets_to_accept: u32 = n.into();
let amount_of_file_keys_to_accept_per_bucket: u32 = m.into();
let amount_of_file_keys_to_reject_per_bucket: u32 = l.into();
let volunteer_count: u32 = v.into();
let replication_target: u32 = r.into();
// Volunteer count and replication target are fixed at worst-case values instead of being
// benchmark `Linear` components. They were removed as components because they impact
// neither ref_time (no per-BSP computation or storage I/O — the accept, reject, and
// cleanup paths only perform whole-map operations: `get`, `set`, `remove`) nor PoV
// (`StorageRequestBsps` is a `BoundedBTreeMap` whose PoV cost is always charged at
// `MaxEncodedLen` regardless of actual entries).
let volunteer_count: u32 = T::MaxBspVolunteers::get();
let replication_target: u32 = T::MaxReplicationTarget::get();

// Get the user account for the generated proofs and load it up with some balance.
let user_as_bytes: [u8; 32] = get_user_account().clone().try_into().unwrap();
Expand Down Expand Up @@ -994,30 +984,18 @@ mod benchmarks {
}

#[benchmark]
fn bsp_confirm_storing(
n: Linear<1, 10>,
v: Linear<
1,
{
Into::<u64>::into(T::MaxBspVolunteers::get())
.try_into()
.unwrap()
},
>,
r: Linear<
1,
{
Into::<u64>::into(T::MaxReplicationTarget::get())
.try_into()
.unwrap()
},
>,
) -> Result<(), BenchmarkError> {
fn bsp_confirm_storing(n: Linear<1, 10>) -> Result<(), BenchmarkError> {
/*********** Setup initial conditions: ***********/
// Get from the linear variables: file count n, volunteer count v, replication target r
// Get from the linear variables: file count n
let amount_of_files_to_confirm_storing: u32 = n.into();
let volunteer_count: u32 = v.into();
let replication_target: u32 = r.into();
// Volunteer count and replication target are fixed at worst-case values instead of being
// benchmark `Linear` components. They were removed as components because they impact
// neither ref_time (no per-BSP computation or storage I/O — the confirm-storing path
// only performs whole-map operations: `get`, `set`, `remove` on `StorageRequestBsps`)
// nor PoV (`StorageRequestBsps` is a `BoundedBTreeMap` whose PoV cost is always charged
// at `MaxEncodedLen` regardless of actual entries).
let volunteer_count: u32 = T::MaxBspVolunteers::get();
let replication_target: u32 = T::MaxReplicationTarget::get();

// Get the user account for the generated proofs and load it up with some balance.
let user_as_bytes: [u8; 32] = get_user_account().clone().try_into().unwrap();
Expand Down Expand Up @@ -2548,29 +2526,17 @@ mod benchmarks {
}

#[benchmark]
fn delete_files_bucket(
n: Linear<1, 10>,
v: Linear<
1,
{
Into::<u64>::into(T::MaxBspVolunteers::get())
.try_into()
.unwrap()
},
>,
r: Linear<
1,
{
Into::<u64>::into(T::MaxReplicationTarget::get())
.try_into()
.unwrap()
},
>,
) -> Result<(), BenchmarkError> {
fn delete_files_bucket(n: Linear<1, 10>) -> Result<(), BenchmarkError> {
/*********** Setup initial conditions: ***********/
let number_of_file_keys: u32 = n.into();
let volunteer_count: u32 = v.into();
let replication_target: u32 = r.into();
// Volunteer count and replication target are fixed at worst-case values instead of being
// benchmark `Linear` components. They were removed as components because they impact
// neither ref_time (no per-BSP computation or storage I/O — the delete-files-from-bucket
// path calls `cleanup_storage_request` which only performs a single
// `StorageRequestBsps::remove()`) nor PoV (`StorageRequestBsps` is a `BoundedBTreeMap`
// whose PoV cost is always charged at `MaxEncodedLen` regardless of actual entries).
let volunteer_count: u32 = T::MaxBspVolunteers::get();
let replication_target: u32 = T::MaxReplicationTarget::get();

// Get the user account for the generated proofs and load it up with some balance.
let user_as_bytes: [u8; 32] = get_user_account().clone().try_into().unwrap();
Expand Down Expand Up @@ -2890,29 +2856,17 @@ mod benchmarks {
}

#[benchmark]
fn delete_files_bsp(
n: Linear<1, 10>,
v: Linear<
1,
{
Into::<u64>::into(T::MaxBspVolunteers::get())
.try_into()
.unwrap()
},
>,
r: Linear<
1,
{
Into::<u64>::into(T::MaxReplicationTarget::get())
.try_into()
.unwrap()
},
>,
) -> Result<(), BenchmarkError> {
fn delete_files_bsp(n: Linear<1, 10>) -> Result<(), BenchmarkError> {
/*********** Setup initial conditions: ***********/
let number_of_file_keys: u32 = n.into();
let volunteer_count: u32 = v.into();
let replication_target: u32 = r.into();
// Volunteer count and replication target are fixed at worst-case values instead of being
// benchmark `Linear` components. They were removed as components because they impact
// neither ref_time (no per-BSP computation or storage I/O — the delete-files-from-bsp
// path calls `cleanup_storage_request` which only performs a single
// `StorageRequestBsps::remove()`) nor PoV (`StorageRequestBsps` is a `BoundedBTreeMap`
// whose PoV cost is always charged at `MaxEncodedLen` regardless of actual entries).
let volunteer_count: u32 = T::MaxBspVolunteers::get();
let replication_target: u32 = T::MaxReplicationTarget::get();

// Get the user account for the generated proofs and load it up with some balance.
let user_as_bytes: [u8; 32] = get_user_account().clone().try_into().unwrap();
Expand Down
92 changes: 19 additions & 73 deletions pallets/file-system/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -444,6 +444,18 @@ pub mod pallet {
/// the file. The value is `false` for volunteered-only and `true` for confirmed.
/// This map is created when the first BSP volunteers and removed when the storage
/// request is cleaned up.
///
/// ## Benchmarking note
///
/// This is a [`BoundedBTreeMap`] whose PoV cost is always charged at [`MaxEncodedLen`]
/// (i.e. assuming `MaxBspVolunteers` entries) regardless of how many BSPs are actually
/// present. Currently, all extrinsic paths interact with this map via whole-map
/// operations (`get`, `set`, `remove`) and do **not** perform per-BSP storage
/// reads/writes, so the number of volunteers has no statistically significant impact
/// on ref_time or PoV. If future logic is added that iterates over individual BSPs
/// in this map and performs additional storage reads or writes for each one, the
/// volunteer count **must** be re-introduced as a `Linear` benchmark component so
/// the weight function can account for the per-BSP cost.
#[pallet::storage]
pub type StorageRequestBsps<T: Config> = StorageMap<
_,
Expand Down Expand Up @@ -1338,50 +1350,16 @@ pub mod pallet {
/// wasn't storing it before.
#[pallet::call_index(8)]
#[pallet::weight({
let max_v = T::MaxBspVolunteers::get();
let max_r = T::MaxReplicationTarget::get();
let mut total_weight: Weight = Weight::zero();
let mut total_reads: u64 = 0;
// TODO: Replace iteration + storage reads with user-provided weight hints validated in extrinsic.
// Also make `StorageRequestMspResponse` and `reject` field `BoundedVec` (currently unbounded `Vec`).
for bucket_response in storage_request_msp_response.iter() {
let amount_of_files_to_accept = bucket_response.accept.as_ref().map_or(0, |accept_response| accept_response.file_keys_and_proofs.len());
let amount_of_files_to_reject = bucket_response.reject.len();

let mut worst_v: u32 = 0;
let mut worst_r: u32 = 0;
if let Some(accept) = &bucket_response.accept {
for fkp in accept.file_keys_and_proofs.iter() {
total_reads += 1;
if let Some(meta) = StorageRequests::<T>::get(&fkp.file_key) {
let v: u64 = meta.bsps_volunteered.into();
let r: u64 = meta.bsps_required.into();
worst_v = worst_v.max(v as u32);
worst_r = worst_r.max(r as u32);
} else {
worst_v = max_v;
worst_r = max_r;
}
}
}
for rejected in bucket_response.reject.iter() {
total_reads += 1;
if let Some(meta) = StorageRequests::<T>::get(&rejected.file_key) {
let v: u64 = meta.bsps_volunteered.into();
let r: u64 = meta.bsps_required.into();
worst_v = worst_v.max(v as u32);
worst_r = worst_r.max(r as u32);
} else {
worst_v = max_v;
worst_r = max_r;
}
}

total_weight = total_weight.saturating_add(
T::WeightInfo::msp_respond_storage_requests_multiple_buckets(1, amount_of_files_to_accept as u32, amount_of_files_to_reject as u32, worst_v, worst_r)
T::WeightInfo::msp_respond_storage_requests_multiple_buckets(1, amount_of_files_to_accept as u32, amount_of_files_to_reject as u32)
);
}
total_weight.saturating_add(T::DbWeight::get().reads(total_reads))
total_weight
})]
pub fn msp_respond_storage_requests_multiple_buckets(
origin: OriginFor<T>,
Expand Down Expand Up @@ -1448,23 +1426,7 @@ pub mod pallet {
#[pallet::call_index(11)]
#[pallet::weight({
let n = file_keys_and_proofs.len() as u32;
let max_v = T::MaxBspVolunteers::get();
let max_r = T::MaxReplicationTarget::get();
let (mut worst_v, mut worst_r): (u32, u32) = (0, 0);
// TODO: Replace iteration + storage reads with user-provided weight hints validated in extrinsic.
for fkp in file_keys_and_proofs.iter() {
if let Some(meta) = StorageRequests::<T>::get(&fkp.file_key) {
let v: u64 = meta.bsps_volunteered.into();
let r: u64 = meta.bsps_required.into();
worst_v = worst_v.max(v as u32);
worst_r = worst_r.max(r as u32);
} else {
worst_v = max_v;
worst_r = max_r;
}
}
T::WeightInfo::bsp_confirm_storing(n, worst_v, worst_r)
.saturating_add(T::DbWeight::get().reads(n as u64))
T::WeightInfo::bsp_confirm_storing(n)
})]
pub fn bsp_confirm_storing(
origin: OriginFor<T>,
Expand Down Expand Up @@ -1743,27 +1705,11 @@ pub mod pallet {
#[pallet::call_index(17)]
#[pallet::weight({
let n = file_deletions.len() as u32;
let max_v = T::MaxBspVolunteers::get();
let max_r = T::MaxReplicationTarget::get();
let (mut worst_v, mut worst_r): (u32, u32) = (0, 0);
// TODO: Replace iteration + storage reads with user-provided weight hints validated in extrinsic.
for deletion in file_deletions.iter() {
if let Some(meta) = StorageRequests::<T>::get(&deletion.signed_intention.file_key) {
let v: u64 = meta.bsps_volunteered.into();
let r: u64 = meta.bsps_required.into();
worst_v = worst_v.max(v as u32);
worst_r = worst_r.max(r as u32);
} else {
worst_v = max_v;
worst_r = max_r;
}
}
let base = if bsp_id.is_none() {
T::WeightInfo::delete_files_bucket(n, worst_v, worst_r)
if bsp_id.is_none() {
T::WeightInfo::delete_files_bucket(n)
} else {
T::WeightInfo::delete_files_bsp(n, worst_v, worst_r)
};
base.saturating_add(T::DbWeight::get().reads(n as u64))
T::WeightInfo::delete_files_bsp(n)
}
})]
pub fn delete_files(
origin: OriginFor<T>,
Expand Down
Loading
Loading