diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 054cfdd429..0330cebb3b 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -181,41 +181,29 @@ jobs: - name: Cargo check all targets. run: cargo check --all-targets - # Next, check subxt features. + # Next, check each subxt feature in isolation. # - `native` feature must always be enabled # - `web` feature is always ignored. - # - This means, don't check --no-default-features and don't try enabling --all-features; both will fail - name: Cargo hack; check each subxt feature - run: cargo hack -p subxt --each-feature check --exclude-no-default-features --exclude-all-features --exclude-features web --features native + run: cargo hack -p subxt --each-feature check --exclude-features web --features native + # Same with subxt-historic - name: Cargo hack; check each subxt feature - run: cargo hack -p subxt-historic --each-feature check --exclude-no-default-features --exclude-all-features --exclude-features web --features native + run: cargo hack -p subxt-historic --each-feature check --exclude-features web --features native - # Subxt-signer has the "subxt" features enabled in the "check all targets" test. Run it on its own to - # check it without. We can't enable subxt or web features here, so no cargo hack. - - name: Cargo check subxt-signer - run: | - cargo check -p subxt-signer - cargo check -p subxt-signer --no-default-features --features sr25519 - cargo check -p subxt-signer --no-default-features --features ecdsa - cargo check -p subxt-signer --no-default-features --features unstable-eth + # And with subxt-rpcs + - name: Cargo hack; check each subxt-rpcs feature + run: cargo hack -p subxt-rpcs --each-feature check --exclude-features web --features native - # Subxt-rpcs has a bunch of clients that can be exposed. Check that they all stand on their own. - - name: Cargo check subxt-rpcs - run: | - cargo check -p subxt-rpcs - cargo check -p subxt-rpcs --no-default-features --features native - cargo check -p subxt-rpcs --no-default-features --features native,subxt - cargo check -p subxt-rpcs --no-default-features --features native,jsonrpsee - cargo check -p subxt-rpcs --no-default-features --features native,reconnecting-rpc-client - cargo check -p subxt-rpcs --no-default-features --features native,mock-rpc-client - cargo check -p subxt-rpcs --no-default-features --features native,unstable-light-client - - # We can't enable web features here, so no cargo hack. + # And with subxt-signer (seems to work with a more basic check here; disable web if it becomes an issue). + - name: Cargo hack; check each subxt-signer feature + run: cargo hack -p subxt-signer --each-feature check + + # And for subxt-lightclient. - name: Cargo check subxt-lightclient - run: cargo check -p subxt-lightclient + run: cargo hack -p subxt-lightclient --each-feature check --exclude-features web --features native - # Next, check each other package in isolation. + # Next, check all other crates. - name: Cargo hack; check each feature/crate on its own run: cargo hack --exclude subxt --exclude subxt-historic --exclude subxt-signer --exclude subxt-lightclient --exclude subxt-rpcs --exclude-all-features --each-feature check --workspace diff --git a/Cargo.lock b/Cargo.lock index 4aba7f6e1e..91c0c41824 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1953,9 +1953,9 @@ dependencies = [ [[package]] name = "frame-decode" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e5c3badfabd704dda4ddc7fafcd09127e8661d1cca2f16556c6826166932c87" +checksum = "73d29c7f2987ea24ab2eaea315aadb9ba598188823181cdf0476049b625a5844" dependencies = [ "frame-metadata 23.0.0", "parity-scale-codec", @@ -4464,9 +4464,9 @@ dependencies = [ [[package]] name = "scale-info-legacy" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d7e3c60aa5e479fe4cfba98a8e74f93b329f4ce0628f7a52f41768489bc418" +checksum = "06423f0d7ea951547143aff4695c4c3e821e66c9b80729a3ff55fa93d23e93e6" dependencies = [ "hashbrown 0.15.3", "scale-type-resolver", @@ -4500,6 +4500,19 @@ dependencies = [ "thiserror 2.0.12", ] +[[package]] +name = "scale-typegen" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "642d2f13f3fc9a34ea2c1e36142984eba78cd2405a61632492f8b52993e98879" +dependencies = [ + "proc-macro2", + "quote", + "scale-info", + "syn 2.0.101", + "thiserror 2.0.12", +] + [[package]] name = "scale-typegen-description" version = "0.11.1" @@ -4513,7 +4526,7 @@ dependencies = [ "rand", "rand_chacha", "scale-info", - "scale-typegen", + "scale-typegen 0.11.1", "scale-value", "smallvec", ] @@ -5654,7 +5667,7 @@ dependencies = [ "pretty_assertions", "quote", "scale-info", - "scale-typegen", + "scale-typegen 0.12.0", "scale-typegen-description", "scale-value", "serde", @@ -5682,7 +5695,7 @@ dependencies = [ "proc-macro2", "quote", "scale-info", - "scale-typegen", + "scale-typegen 0.12.0", "subxt-metadata", "syn 2.0.101", "thiserror 2.0.12", @@ -5779,7 +5792,7 @@ dependencies = [ "quote", "sc-executor", "sc-executor-common", - "scale-typegen", + "scale-typegen 0.12.0", "sp-io", "sp-maybe-compressed-blob", "sp-state-machine", @@ -5800,7 +5813,10 @@ dependencies = [ "hashbrown 0.14.5", "parity-scale-codec", "scale-info", + "scale-info-legacy", + "scale-type-resolver", "sp-crypto-hashing", + "subxt-codegen", "subxt-utils-stripmetadata", "thiserror 2.0.12", ] diff --git a/Cargo.toml b/Cargo.toml index d31a168b40..4acf44b7dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,7 +81,7 @@ darling = "0.20.10" derive-where = "1.2.7" either = { version = "1.13.0", default-features = false } finito = { version = "0.1.0", default-features = false } -frame-decode = { version = "0.12.0", default-features = false } +frame-decode = { version = "0.14.0", default-features = false } frame-metadata = { version = "23.0.0", default-features = false } futures = { version = "0.3.31", default-features = false, features = ["std"] } getrandom = { version = "0.2", default-features = false } @@ -103,8 +103,8 @@ scale-bits = { version = "0.7.0", default-features = false } scale-decode = { version = "0.16.0", default-features = false } scale-encode = { version = "0.10.0", default-features = false } scale-type-resolver = { version = "0.2.0" } -scale-info-legacy = { version = "0.3.0", default-features = false } -scale-typegen = "0.11.1" +scale-info-legacy = { version = "0.3.2", default-features = false } +scale-typegen = "0.12.0" scale-typegen-description = "0.11.0" serde = { version = "1.0.210", default-features = false, features = ["derive"] } serde_json = { version = "1.0.128", default-features = false } diff --git a/artifacts/kusama/metadata_v10_1038.scale b/artifacts/kusama/metadata_v10_1038.scale new file mode 100644 index 0000000000..1e7837da77 Binary files /dev/null and b/artifacts/kusama/metadata_v10_1038.scale differ diff --git a/artifacts/kusama/metadata_v11_1045.scale b/artifacts/kusama/metadata_v11_1045.scale new file mode 100644 index 0000000000..750c7c6479 Binary files /dev/null and b/artifacts/kusama/metadata_v11_1045.scale differ diff --git a/artifacts/kusama/metadata_v12_2025.scale b/artifacts/kusama/metadata_v12_2025.scale new file mode 100644 index 0000000000..a68b8f7117 Binary files /dev/null and b/artifacts/kusama/metadata_v12_2025.scale differ diff --git a/artifacts/kusama/metadata_v13_9030.scale b/artifacts/kusama/metadata_v13_9030.scale new file mode 100644 index 0000000000..92dfaaca10 Binary files /dev/null and b/artifacts/kusama/metadata_v13_9030.scale differ diff --git a/artifacts/kusama/metadata_v14_9111.scale b/artifacts/kusama/metadata_v14_9111.scale new file mode 100644 index 0000000000..ec79100675 Binary files /dev/null and b/artifacts/kusama/metadata_v14_9111.scale differ diff --git a/artifacts/kusama/metadata_v9_1021.scale b/artifacts/kusama/metadata_v9_1021.scale new file mode 100644 index 0000000000..b474022604 Binary files /dev/null and b/artifacts/kusama/metadata_v9_1021.scale differ diff --git a/core/src/blocks/extrinsics.rs b/core/src/blocks/extrinsics.rs index 77f6f7c26d..94de10ade3 100644 --- a/core/src/blocks/extrinsics.rs +++ b/core/src/blocks/extrinsics.rs @@ -494,7 +494,7 @@ mod tests { let metadata = metadata(); // Except our metadata to contain the registered types. - let pallet = metadata.pallet_by_index(0).expect("pallet exists"); + let pallet = metadata.pallet_by_call_index(0).expect("pallet exists"); let extrinsic = pallet .call_variant_by_index(2) .expect("metadata contains the RuntimeCall enum with this pallet"); diff --git a/core/src/events.rs b/core/src/events.rs index e6246cbce3..9d38517910 100644 --- a/core/src/events.rs +++ b/core/src/events.rs @@ -262,7 +262,7 @@ impl EventDetails { // Get metadata for the event: let event_pallet = metadata - .pallet_by_index(pallet_index) + .pallet_by_event_index(pallet_index) .ok_or_else(|| EventsError::CannotFindPalletWithIndex(pallet_index))?; let event_variant = event_pallet .event_variant_by_index(variant_index) @@ -359,7 +359,7 @@ impl EventDetails { pub fn event_metadata(&self) -> EventMetadataDetails<'_> { let pallet = self .metadata - .pallet_by_index(self.pallet_index()) + .pallet_by_event_index(self.pallet_index()) .expect("event pallet to be found; we did this already during decoding"); let variant = pallet .event_variant_by_index(self.variant_index()) diff --git a/core/src/tx/payload.rs b/core/src/tx/payload.rs index 765ef64967..e87c38a98f 100644 --- a/core/src/tx/payload.rs +++ b/core/src/tx/payload.rs @@ -182,7 +182,7 @@ impl Payload for DefaultPayload { call_name: self.call_name.to_string(), })?; - let pallet_index = pallet.index(); + let pallet_index = pallet.call_index(); let call_index = call.index; pallet_index.encode_to(out); diff --git a/historic/src/storage.rs b/historic/src/storage.rs index d34ddf1e97..2d818b9b4e 100644 --- a/historic/src/storage.rs +++ b/historic/src/storage.rs @@ -1,3 +1,4 @@ +mod list_storage_entries_any; mod storage_entry; mod storage_info; mod storage_key; @@ -71,14 +72,14 @@ where let metadata = client.metadata(); let mut pallet_name = Cow::Borrowed(""); - frame_decode::helpers::list_storage_entries_any(metadata).filter_map(move |entry| { + list_storage_entries_any::list_storage_entries_any(metadata).filter_map(move |entry| { match entry { - frame_decode::storage::Entry::In(name) => { + frame_decode::storage::StorageEntry::In(name) => { // Set the pallet name for upcoming entries: pallet_name = name; None } - frame_decode::storage::Entry::Name(entry_name) => { + frame_decode::storage::StorageEntry::Name(entry_name) => { // Output each entry with the last seen pallet name: Some(StorageEntriesItem { pallet_name: pallet_name.clone(), diff --git a/historic/src/storage/list_storage_entries_any.rs b/historic/src/storage/list_storage_entries_any.rs new file mode 100644 index 0000000000..e97bafd748 --- /dev/null +++ b/historic/src/storage/list_storage_entries_any.rs @@ -0,0 +1,35 @@ +use frame_decode::storage::StorageEntryInfo; +use frame_metadata::RuntimeMetadata; + +pub use frame_decode::storage::StorageEntry; + +/// Returns an iterator listing the available storage entries in some metadata. +/// +/// This basically calls [`StorageEntryInfo::storage_entries()`] for each metadata version, +/// returning an empty iterator where applicable (ie when passing legacy metadata and the +/// `legacy` features flag is not enabled). +pub fn list_storage_entries_any( + metadata: &RuntimeMetadata, +) -> impl Iterator> { + match metadata { + RuntimeMetadata::V0(_deprecated_metadata) + | RuntimeMetadata::V1(_deprecated_metadata) + | RuntimeMetadata::V2(_deprecated_metadata) + | RuntimeMetadata::V3(_deprecated_metadata) + | RuntimeMetadata::V4(_deprecated_metadata) + | RuntimeMetadata::V5(_deprecated_metadata) + | RuntimeMetadata::V6(_deprecated_metadata) + | RuntimeMetadata::V7(_deprecated_metadata) => { + Box::new(core::iter::empty()) as Box>> + } + RuntimeMetadata::V8(m) => Box::new(m.storage_entries()), + RuntimeMetadata::V9(m) => Box::new(m.storage_entries()), + RuntimeMetadata::V10(m) => Box::new(m.storage_entries()), + RuntimeMetadata::V11(m) => Box::new(m.storage_entries()), + RuntimeMetadata::V12(m) => Box::new(m.storage_entries()), + RuntimeMetadata::V13(m) => Box::new(m.storage_entries()), + RuntimeMetadata::V14(m) => Box::new(m.storage_entries()), + RuntimeMetadata::V15(m) => Box::new(m.storage_entries()), + RuntimeMetadata::V16(m) => Box::new(m.storage_entries()), + } +} diff --git a/metadata/Cargo.toml b/metadata/Cargo.toml index 425dc6698d..dd68a34b45 100644 --- a/metadata/Cargo.toml +++ b/metadata/Cargo.toml @@ -14,11 +14,23 @@ homepage.workspace = true description = "Command line utilities for checking metadata compatibility between nodes." [features] -default = ["std"] +default = ["std", "legacy"] std = ["scale-info/std", "frame-metadata/std"] +# Enable decoding of legacy metadata, too. +# std required by frame-metadata to decode Self { + Opts { + sanitize_paths: true, + ignore_not_found: true, + } + } +} + +macro_rules! from_historic { + ($vis:vis fn $fn_name:ident($metadata:path $(, builtin_index: $builtin_index:ident)? )) => { + $vis fn $fn_name(metadata: &$metadata, types: &TypeRegistrySet<'_>, opts: Opts) -> Result { + // This will be used to construct our `PortableRegistry` from old-style types. + let mut portable_registry_builder = PortableRegistryBuilder::new(&types); + portable_registry_builder.ignore_not_found(opts.ignore_not_found); + portable_registry_builder.sanitize_paths(opts.sanitize_paths); + + + // We use this type in a few places to denote that we don't know how to decode it. + let unknown_type_id = portable_registry_builder.add_type_str("special::Unknown", None) + .map_err(|e| Error::add_type("constructing 'Unknown' type", e))?; + + // Pallet metadata + let mut call_index = 0u8; + let mut error_index = 0u8; + let mut event_index = 0u8; + + let new_pallets = as_decoded(&metadata.modules).iter().map(|pallet| { + // In older metadatas, calls and event enums can have different indexes + // in a given pallet. Pallets without calls or events don't increment + // the respective index for them. + // + // We assume since errors are non optional, that the pallet index _always_ + // increments for errors (no `None`s to skip). + let (call_index, event_index, error_index) = { + let out = (call_index, event_index, error_index); + if pallet.calls.is_some() { + call_index += 1; + } + if pallet.event.is_some() { + event_index += 1; + } + error_index += 1; + + out + }; + + // For v12 and v13 metadata, there is a builtin index for everything in a pallet. + // We enable this logic for those metadatas to get the correct index. + $( + let $builtin_index = true; + let (call_index, event_index, error_index) = if $builtin_index { + (pallet.index, pallet.index, pallet.index) + } else { + (call_index, event_index, error_index) + }; + )? + + let pallet_name = as_decoded(&pallet.name).to_string(); + + // Storage entries: + let storage = pallet.storage.as_ref().map(|s| { + let storage = as_decoded(s); + let prefix = as_decoded(&storage.prefix); + let entries = metadata.storage_in_pallet(&pallet_name).map(|entry_name| { + let info = metadata + .storage_info(&pallet_name, &entry_name) + .map_err(|e| Error::StorageInfoError(e.into_owned()))?; + let entry_name = entry_name.into_owned(); + + let info = info.map_ids(|old_id| { + portable_registry_builder.add_type(old_id) + }).map_err(|e| { + let ctx = format!("adding type used in storage entry {pallet_name}.{entry_name}"); + Error::add_type(ctx, e) + })?; + + let entry = crate::StorageEntryMetadata { + name: entry_name.clone(), + info: info.into_owned(), + // We don't expose docs via our storage info yet. + docs: Vec::new(), + }; + + Ok((entry_name, entry)) + }).collect::, _>>()?; + Ok(crate::StorageMetadata { + prefix: prefix.clone(), + entries, + }) + }).transpose()?; + + // Pallet error type is just a builtin type: + let error_ty = portable_registry_builder.add_type_str(&format!("builtin::module::error::{pallet_name}"), None) + .map_err(|e| { + let ctx = format!("converting the error enum for pallet {pallet_name}"); + Error::add_type(ctx, e) + })?; + + // Pallet calls also just a builtin type: + let call_ty = pallet.calls.as_ref().map(|_| { + portable_registry_builder.add_type_str(&format!("builtin::module::call::{pallet_name}"), None) + .map_err(|e| { + let ctx = format!("converting the call enum for pallet {pallet_name}"); + Error::add_type(ctx, e) + }) + }).transpose()?; + + // Pallet events also just a builtin type: + let event_ty = pallet.event.as_ref().map(|_| { + portable_registry_builder.add_type_str(&format!("builtin::module::event::{pallet_name}"), None) + .map_err(|e| { + let ctx = format!("converting the event enum for pallet {pallet_name}"); + Error::add_type(ctx, e) + }) + }).transpose()?; + + let call_variant_index = + VariantIndex::build(call_ty, portable_registry_builder.types()); + let error_variant_index = + VariantIndex::build(Some(error_ty), portable_registry_builder.types()); + let event_variant_index = + VariantIndex::build(event_ty, portable_registry_builder.types()); + + let constants = metadata.constants_in_pallet(&pallet_name).map(|name| { + let name = name.into_owned(); + let info = metadata.constant_info(&pallet_name, &name) + .map_err(|e| Error::ConstantInfoError(e.into_owned()))?; + let new_type_id = portable_registry_builder.add_type(info.type_id) + .map_err(|e| { + let ctx = format!("converting the constant {name} for pallet {pallet_name}"); + Error::add_type(ctx, e) + })?; + + let constant = crate::ConstantMetadata { + name: name.clone(), + ty: new_type_id, + value: info.bytes.to_vec(), + // We don't expose docs via our constant info yet. + docs: Vec::new(), + }; + + Ok((name, constant)) + }).collect::>()?; + + let pallet_metadata = crate::PalletMetadataInner { + name: pallet_name.clone(), + call_index, + event_index, + error_index, + storage, + error_ty: Some(error_ty), + call_ty, + event_ty, + call_variant_index, + error_variant_index, + event_variant_index, + constants, + view_functions: Default::default(), + associated_types: Default::default(), + // Pallets did not have docs prior to V15. + docs: Default::default(), + }; + + Ok((pallet_name, pallet_metadata)) + }).collect::,Error>>()?; + + // Extrinsic metadata + let new_extrinsic = { + let signature_info = metadata + .extrinsic_signature_info() + .map_err(|e| Error::ExtrinsicInfoError(e.into_owned()))?; + + let address_ty_id = portable_registry_builder.add_type(signature_info.address_id) + .map_err(|_| Error::CannotFindAddressType)?; + + let signature_ty_id = portable_registry_builder.add_type(signature_info.signature_id) + .map_err(|_| Error::CannotFindCallType)?; + + let transaction_extensions = metadata + .extrinsic_extension_info(None) + .map_err(|e| Error::ExtrinsicInfoError(e.into_owned()))? + .extension_ids + .into_iter() + .map(|ext| { + let ext_name = ext.name.into_owned(); + let ext_type = portable_registry_builder.add_type(ext.id) + .map_err(|e| { + let ctx = format!("converting the signed extension {ext_name}"); + Error::add_type(ctx, e) + })?; + + Ok(crate::TransactionExtensionMetadataInner { + identifier: ext_name, + extra_ty: ext_type, + // This only started existing in V14+ metadata, but in any case, + // we don't need to know how to decode the signed payload for + // historic blocks (hopefully), so set to unknown. + additional_ty: unknown_type_id.into() + }) + }) + .collect::,Error>>()?; + + let transaction_extensions_by_version = BTreeMap::from_iter([( + 0, + (0..transaction_extensions.len() as u32).collect() + )]); + + crate::ExtrinsicMetadata { + address_ty: address_ty_id.into(), + signature_ty: signature_ty_id.into(), + supported_versions: Vec::from_iter([4]), + transaction_extensions, + transaction_extensions_by_version, + } + }; + + // Outer enum types + let outer_enums = crate::OuterEnumsMetadata { + call_enum_ty: portable_registry_builder.add_type_str("builtin::Call", None) + .map_err(|e| { + let ctx = format!("constructing the 'builtin::Call' type to put in the OuterEnums metadata"); + Error::add_type(ctx, e) + })?, + event_enum_ty: portable_registry_builder.add_type_str("builtin::Event", None) + .map_err(|e| { + let ctx = format!("constructing the 'builtin::Event' type to put in the OuterEnums metadata"); + Error::add_type(ctx, e) + })?, + error_enum_ty: portable_registry_builder.add_type_str("builtin::Error", None) + .map_err(|e| { + let ctx = format!("constructing the 'builtin::Error' type to put in the OuterEnums metadata"); + Error::add_type(ctx, e) + })?, + }; + + // These are all the same in V13, but be explicit anyway for clarity. + let pallets_by_call_index = new_pallets + .values() + .iter() + .enumerate() + .map(|(idx,p)| (p.call_index, idx)) + .collect(); + let pallets_by_error_index = new_pallets + .values() + .iter() + .enumerate() + .map(|(idx,p)| (p.error_index, idx)) + .collect(); + let pallets_by_event_index = new_pallets + .values() + .iter() + .enumerate() + .map(|(idx,p)| (p.event_index, idx)) + .collect(); + + // This is optional in the sense that Subxt will return an error if it needs to decode this type, + // and I think for historic metadata we wouldn't end up down that path anyway. Historic metadata + // tends to call it just "DispatchError" but search more specific paths first. + let dispatch_error_ty = portable_registry_builder + .try_add_type_str("hardcoded::DispatchError", None) + .or_else(|| portable_registry_builder.try_add_type_str("sp_runtime::DispatchError", None)) + .or_else(|| portable_registry_builder.try_add_type_str("DispatchError", None)) + .transpose() + .map_err(|e| Error::add_type("constructing DispatchError", e))?; + + // Runtime API definitions live with type definitions. + let apis = type_registry_to_runtime_apis(&types, &mut portable_registry_builder)?; + + Ok(crate::Metadata { + types: portable_registry_builder.finish(), + pallets: new_pallets, + pallets_by_call_index, + pallets_by_error_index, + pallets_by_event_index, + extrinsic: new_extrinsic, + outer_enums, + dispatch_error_ty, + apis, + // Nothing custom existed in V13 + custom: v15::CustomMetadata { map: Default::default() }, + }) + }} +} + +from_historic!(pub fn from_v13(frame_metadata::v13::RuntimeMetadataV13, builtin_index: yes)); +from_historic!(pub fn from_v12(frame_metadata::v12::RuntimeMetadataV12, builtin_index: yes)); +from_historic!(pub fn from_v11(frame_metadata::v11::RuntimeMetadataV11)); +from_historic!(pub fn from_v10(frame_metadata::v10::RuntimeMetadataV10)); +from_historic!(pub fn from_v9(frame_metadata::v9::RuntimeMetadataV9)); +from_historic!(pub fn from_v8(frame_metadata::v8::RuntimeMetadataV8)); + +fn as_decoded(item: &frame_metadata::decode_different::DecodeDifferent) -> &B { + match item { + frame_metadata::decode_different::DecodeDifferent::Encode(_a) => { + panic!("Expecting decoded data") + } + frame_metadata::decode_different::DecodeDifferent::Decoded(b) => b, + } +} + +// Obtain Runtime API information from some type registry. +pub fn type_registry_to_runtime_apis( + types: &TypeRegistrySet<'_>, + portable_registry_builder: &mut PortableRegistryBuilder, +) -> Result, Error> { + let mut apis = OrderedMap::new(); + let mut trait_name = ""; + let mut trait_methods = OrderedMap::new(); + + for api in types.runtime_apis() { + match api { + RuntimeApiName::Trait(name) => { + if !trait_methods.is_empty() { + apis.push_insert( + trait_name.into(), + crate::RuntimeApiMetadataInner { + name: trait_name.into(), + methods: trait_methods, + docs: Vec::new(), + }, + ); + } + trait_methods = OrderedMap::new(); + trait_name = name; + } + RuntimeApiName::Method(name) => { + let info = types + .runtime_api_info(trait_name, name) + .map_err(|e| Error::RuntimeApiInfoError(e.into_owned()))?; + + let info = info.map_ids(|id| { + portable_registry_builder.add_type(id).map_err(|e| { + let c = format!("converting type for runtime API {trait_name}.{name}"); + Error::add_type(c, e) + }) + })?; + + trait_methods.push_insert( + name.to_owned(), + crate::RuntimeApiMethodMetadataInner { + name: name.into(), + info, + docs: Vec::new(), + }, + ); + } + } + } + + Ok(apis) +} + +/// An error encountered converting some legacy metadata to our internal format. +#[allow(missing_docs)] +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Cannot add a type. + #[error("Cannot add type ({context}): {error}")] + AddTypeError { + context: String, + error: portable_registry_builder::PortableRegistryAddTypeError, + }, + #[error("Cannot find 'hardcoded::ExtrinsicAddress' type in legacy types")] + CannotFindAddressType, + #[error("Cannot find 'hardcoded::ExtrinsicSignature' type in legacy types")] + CannotFindSignatureType, + #[error( + "Cannot find 'builtin::Call' type in legacy types (this should have been automatically added)" + )] + CannotFindCallType, + #[error("Cannot obtain the storage information we need to convert storage entries")] + StorageInfoError(frame_decode::storage::StorageInfoError<'static>), + #[error("Cannot obtain the extrinsic information we need to convert transaction extensions")] + ExtrinsicInfoError(frame_decode::extrinsics::ExtrinsicInfoError<'static>), + #[error("Cannot obtain the Runtime API information we need")] + RuntimeApiInfoError(frame_decode::runtime_apis::RuntimeApiInfoError<'static>), + #[error("Cannot obtain the Constant information we need")] + ConstantInfoError(frame_decode::constants::ConstantInfoError<'static>), +} + +impl Error { + /// A shorthand for the [`Error::AddTypeError`] variant. + fn add_type( + context: impl Into, + error: impl Into, + ) -> Self { + Error::AddTypeError { + context: context.into(), + error: error.into(), + } + } +} diff --git a/metadata/src/from/legacy/portable_registry_builder.rs b/metadata/src/from/legacy/portable_registry_builder.rs new file mode 100644 index 0000000000..b681d69160 --- /dev/null +++ b/metadata/src/from/legacy/portable_registry_builder.rs @@ -0,0 +1,541 @@ +use alloc::borrow::ToOwned; +use alloc::collections::{BTreeMap, BTreeSet}; +use alloc::string::ToString; +use alloc::vec::Vec; +use scale_info::PortableRegistry; +use scale_info::{PortableType, form::PortableForm}; +use scale_info_legacy::type_registry::TypeRegistryResolveError; +use scale_info_legacy::{LookupName, TypeRegistrySet}; +use scale_type_resolver::{ + BitsOrderFormat, BitsStoreFormat, FieldIter, PathIter, Primitive, ResolvedTypeVisitor, + UnhandledKind, VariantIter, +}; + +#[derive(thiserror::Error, Debug)] +pub enum PortableRegistryAddTypeError { + #[error("Error resolving type: {0}")] + ResolveError(#[from] TypeRegistryResolveError), + #[error("Cannot find type '{0}'")] + TypeNotFound(LookupName), +} + +/// the purpose of this is to convert a (subset of) [`scale_info_legacy::TypeRegistrySet`] +/// into a [`scale_info::PortableRegistry`]. Type IDs from the former are passed in, and +/// type IDs from the latter are handed back. Calling [`PortableRegistryBuilder::finish()`] +/// then hands back a [`scale_info::PortableRegistry`] which these Ids can be used with. +pub struct PortableRegistryBuilder<'info> { + legacy_types: &'info TypeRegistrySet<'info>, + scale_info_types: PortableRegistry, + old_to_new: BTreeMap, + ignore_not_found: bool, + sanitize_paths: bool, + seen_names_in_default_path: BTreeSet, +} + +impl<'info> PortableRegistryBuilder<'info> { + /// Instantiate a new [`PortableRegistryBuilder`], providing the set of + /// legacy types you wish to use to construct modern types from. + pub fn new(legacy_types: &'info TypeRegistrySet<'info>) -> Self { + PortableRegistryBuilder { + legacy_types, + scale_info_types: PortableRegistry { + types: Default::default(), + }, + old_to_new: Default::default(), + ignore_not_found: false, + sanitize_paths: false, + seen_names_in_default_path: Default::default(), + } + } + + /// If this is enabled, any type that isn't found will be replaced by a "special::Unknown" type + /// instead of a "type not found" error being emitted. + /// + /// Default: false + pub fn ignore_not_found(&mut self, ignore: bool) { + self.ignore_not_found = ignore; + } + + /// Should type paths be sanitized to make them more amenable to things like codegen? + /// + /// Default: false + pub fn sanitize_paths(&mut self, sanitize: bool) { + self.sanitize_paths = sanitize; + } + + /// Try adding a type, given its string name and optionally the pallet it's scoped to. + pub fn try_add_type_str( + &mut self, + id: &str, + pallet: Option<&str>, + ) -> Option> { + let mut id = match LookupName::parse(id) { + Ok(id) => id, + Err(e) => { + return Some(Err(TypeRegistryResolveError::LookupNameInvalid( + id.to_owned(), + e, + ))); + } + }; + + if let Some(pallet) = pallet { + id = id.in_pallet(pallet); + } + + self.try_add_type(id) + } + + /// Try adding a type, returning `None` if the type doesn't exist. + pub fn try_add_type( + &mut self, + id: LookupName, + ) -> Option> { + match self.add_type(id) { + Ok(id) => Some(Ok(id)), + Err(PortableRegistryAddTypeError::TypeNotFound(_)) => None, + Err(PortableRegistryAddTypeError::ResolveError(e)) => Some(Err(e)), + } + } + + /// Add a new legacy type, giving its string ID/name and, if applicable, the pallet that it's seen in, + /// returning the corresponding "modern" type ID to use in its place, or an error if something does wrong. + pub fn add_type_str( + &mut self, + id: &str, + pallet: Option<&str>, + ) -> Result { + let mut id = LookupName::parse(id) + .map_err(|e| TypeRegistryResolveError::LookupNameInvalid(id.to_owned(), e))?; + + if let Some(pallet) = pallet { + id = id.in_pallet(pallet); + } + + self.add_type(id) + } + + /// Add a new legacy type, returning the corresponding "modern" type ID to use in + /// its place, or an error if something does wrong. + pub fn add_type(&mut self, id: LookupName) -> Result { + if let Some(new_id) = self.old_to_new.get(&id) { + return Ok(*new_id); + } + + // Assign a new ID immediately to prevent any recursion. If we don't do this, then + // recursive types (ie types that contain themselves) will lead to a stack overflow. + // with this, we assign IDs up front, so the ID is returned immediately on recursing. + let new_id = self.scale_info_types.types.len() as u32; + + // Add a placeholder type to "reserve" this ID. + self.scale_info_types.types.push(PortableType { + id: new_id, + ty: scale_info::Type::new( + scale_info::Path { segments: vec![] }, + core::iter::empty(), + scale_info::TypeDef::Variant(scale_info::TypeDefVariant { variants: vec![] }), + Default::default(), + ), + }); + + // Cache the ID so that recursing calls bail early. + self.old_to_new.insert(id.clone(), new_id); + + let visitor = PortableRegistryVisitor { + builder: &mut *self, + current_type: &id, + }; + + match visitor + .builder + .legacy_types + .resolve_type(id.clone(), visitor) + { + Ok(Ok(ty)) => { + self.scale_info_types.types[new_id as usize].ty = ty; + Ok(new_id) + } + Ok(Err(e)) => { + self.old_to_new.remove(&id); + Err(e) + } + Err(e) => { + self.old_to_new.remove(&id); + Err(e.into()) + } + } + } + + /// Return the current [`scale_info::PortableRegistry`]. + pub fn types(&self) -> &PortableRegistry { + &self.scale_info_types + } + + /// Finish adding types and return the modern type registry. + pub fn finish(self) -> PortableRegistry { + self.scale_info_types + } +} + +struct PortableRegistryVisitor<'a, 'info> { + builder: &'a mut PortableRegistryBuilder<'info>, + current_type: &'a LookupName, +} + +impl<'a, 'info> ResolvedTypeVisitor<'info> for PortableRegistryVisitor<'a, 'info> { + type TypeId = LookupName; + type Value = Result, PortableRegistryAddTypeError>; + + fn visit_unhandled(self, kind: UnhandledKind) -> Self::Value { + panic!("A handler exists for every type, but visit_unhandled({kind:?}) was called"); + } + + fn visit_not_found(self) -> Self::Value { + if self.builder.ignore_not_found { + // Return the "unknown" type if we're ignoring not found types: + Ok(unknown_type()) + } else { + // Otherwise just return an error at this point: + Err(PortableRegistryAddTypeError::TypeNotFound( + self.current_type.clone(), + )) + } + } + + fn visit_primitive(self, primitive: Primitive) -> Self::Value { + let p = match primitive { + Primitive::Bool => scale_info::TypeDefPrimitive::Bool, + Primitive::Char => scale_info::TypeDefPrimitive::Char, + Primitive::Str => scale_info::TypeDefPrimitive::Str, + Primitive::U8 => scale_info::TypeDefPrimitive::U8, + Primitive::U16 => scale_info::TypeDefPrimitive::U16, + Primitive::U32 => scale_info::TypeDefPrimitive::U32, + Primitive::U64 => scale_info::TypeDefPrimitive::U64, + Primitive::U128 => scale_info::TypeDefPrimitive::U128, + Primitive::U256 => scale_info::TypeDefPrimitive::U256, + Primitive::I8 => scale_info::TypeDefPrimitive::I8, + Primitive::I16 => scale_info::TypeDefPrimitive::I16, + Primitive::I32 => scale_info::TypeDefPrimitive::I32, + Primitive::I64 => scale_info::TypeDefPrimitive::I64, + Primitive::I128 => scale_info::TypeDefPrimitive::I128, + Primitive::I256 => scale_info::TypeDefPrimitive::I256, + }; + + Ok(scale_info::Type::new( + Default::default(), + core::iter::empty(), + scale_info::TypeDef::Primitive(p), + Default::default(), + )) + } + + fn visit_sequence>( + self, + path: Path, + inner_type_id: Self::TypeId, + ) -> Self::Value { + let inner_id = self.builder.add_type(inner_type_id)?; + let path = scale_info::Path { + segments: prepare_path(path, self.builder), + }; + + Ok(scale_info::Type::new( + path, + core::iter::empty(), + scale_info::TypeDef::Sequence(scale_info::TypeDefSequence { + type_param: inner_id.into(), + }), + Default::default(), + )) + } + + fn visit_composite(self, path: Path, fields: Fields) -> Self::Value + where + Path: PathIter<'info>, + Fields: FieldIter<'info, Self::TypeId>, + { + let path = scale_info::Path { + segments: prepare_path(path, self.builder), + }; + + let mut scale_info_fields = Vec::>::new(); + for field in fields { + let type_name = field.id.to_string(); + let id = self.builder.add_type(field.id)?; + scale_info_fields.push(scale_info::Field { + name: field.name.map(Into::into), + ty: id.into(), + type_name: Some(type_name), + docs: Default::default(), + }); + } + + Ok(scale_info::Type::new( + path, + core::iter::empty(), + scale_info::TypeDef::Composite(scale_info::TypeDefComposite { + fields: scale_info_fields, + }), + Default::default(), + )) + } + + fn visit_array(self, inner_type_id: LookupName, len: usize) -> Self::Value { + let inner_id = self.builder.add_type(inner_type_id)?; + + Ok(scale_info::Type::new( + Default::default(), + core::iter::empty(), + scale_info::TypeDef::Array(scale_info::TypeDefArray { + len: len as u32, + type_param: inner_id.into(), + }), + Default::default(), + )) + } + + fn visit_tuple(self, type_ids: TypeIds) -> Self::Value + where + TypeIds: ExactSizeIterator, + { + let mut scale_info_fields = Vec::new(); + for old_id in type_ids { + let new_id = self.builder.add_type(old_id)?; + scale_info_fields.push(new_id.into()); + } + + Ok(scale_info::Type::new( + Default::default(), + core::iter::empty(), + scale_info::TypeDef::Tuple(scale_info::TypeDefTuple { + fields: scale_info_fields, + }), + Default::default(), + )) + } + + fn visit_variant(self, path: Path, variants: Var) -> Self::Value + where + Path: PathIter<'info>, + Fields: FieldIter<'info, Self::TypeId>, + Var: VariantIter<'info, Fields>, + { + let path = scale_info::Path { + segments: prepare_path(path, self.builder), + }; + + let mut scale_info_variants = Vec::new(); + for variant in variants { + let mut scale_info_variant_fields = Vec::>::new(); + for field in variant.fields { + let type_name = field.id.to_string(); + let id = self.builder.add_type(field.id)?; + scale_info_variant_fields.push(scale_info::Field { + name: field.name.map(Into::into), + ty: id.into(), + type_name: Some(type_name), + docs: Default::default(), + }); + } + + scale_info_variants.push(scale_info::Variant { + name: variant.name.to_owned(), + index: variant.index, + fields: scale_info_variant_fields, + docs: Default::default(), + }) + } + + Ok(scale_info::Type::new( + path, + core::iter::empty(), + scale_info::TypeDef::Variant(scale_info::TypeDefVariant { + variants: scale_info_variants, + }), + Default::default(), + )) + } + + fn visit_compact(self, inner_type_id: Self::TypeId) -> Self::Value { + let inner_id = self.builder.add_type(inner_type_id)?; + + // Configure the path and type params to maximise compat. + let path = ["parity_scale_codec", "Compact"] + .into_iter() + .map(ToOwned::to_owned) + .collect(); + let type_params = [scale_info::TypeParameter { + name: "T".to_owned(), + ty: Some(inner_id.into()), + }]; + + Ok(scale_info::Type::new( + scale_info::Path { segments: path }, + type_params, + scale_info::TypeDef::Compact(scale_info::TypeDefCompact { + type_param: inner_id.into(), + }), + Default::default(), + )) + } + + fn visit_bit_sequence( + self, + store_format: BitsStoreFormat, + order_format: BitsOrderFormat, + ) -> Self::Value { + // These order types are added by default into a `TypeRegistry`, so we + // expect them to exist. Parsing should always succeed. + let order_ty_str = match order_format { + BitsOrderFormat::Lsb0 => "bitvec::order::Lsb0", + BitsOrderFormat::Msb0 => "bitvec::order::Msb0", + }; + let order_ty = LookupName::parse(order_ty_str).unwrap(); + let new_order_ty = self.builder.add_type(order_ty)?; + + // The store types also exist by default. Parsing should always succeed. + let store_ty_str = match store_format { + BitsStoreFormat::U8 => "u8", + BitsStoreFormat::U16 => "u16", + BitsStoreFormat::U32 => "u32", + BitsStoreFormat::U64 => "u64", + }; + let store_ty = LookupName::parse(store_ty_str).unwrap(); + let new_store_ty = self.builder.add_type(store_ty)?; + + // Configure the path and type params to look like BitVec's to try + // and maximise compatibility. + let path = ["bitvec", "vec", "BitVec"] + .into_iter() + .map(ToOwned::to_owned) + .collect(); + let type_params = [ + scale_info::TypeParameter { + name: "Store".to_owned(), + ty: Some(new_store_ty.into()), + }, + scale_info::TypeParameter { + name: "Order".to_owned(), + ty: Some(new_order_ty.into()), + }, + ]; + + Ok(scale_info::Type::new( + scale_info::Path { segments: path }, + type_params, + scale_info::TypeDef::BitSequence(scale_info::TypeDefBitSequence { + bit_order_type: new_order_ty.into(), + bit_store_type: new_store_ty.into(), + }), + Default::default(), + )) + } +} + +fn prepare_path<'info, Path: PathIter<'info>>( + path: Path, + builder: &mut PortableRegistryBuilder<'_>, +) -> Vec { + // If no sanitizint, just return the path as-is. + if !builder.sanitize_paths { + return path.map(|p| p.to_owned()).collect(); + } + + /// Names of prelude types. For codegen to work, any type that _isn't_ one of these must + /// have a path that is sensible and can be converted to module names. + static PRELUDE_TYPE_NAMES: [&str; 24] = [ + "Vec", + "Option", + "Result", + "Cow", + "BTreeMap", + "BTreeSet", + "BinaryHeap", + "VecDeque", + "LinkedList", + "Range", + "RangeInclusive", + "NonZeroI8", + "NonZeroU8", + "NonZeroI16", + "NonZeroU16", + "NonZeroI32", + "NonZeroU32", + "NonZeroI64", + "NonZeroU64", + "NonZeroI128", + "NonZeroU128", + "NonZeroIsize", + "NonZeroUsize", + "Duration", + ]; + + let path: Vec<&str> = path.collect(); + + // No path should be empty; at least the type name should be present. + if path.is_empty() { + panic!( + "Empty path is not expected when converting legacy type; type name expected at least" + ); + } + + // The special::Unknown type can be returned as is; dupe paths allowed. + if path.len() == 2 && path[0] == "special" && path[1] == "Unknown" { + return vec!["special".to_owned(), "Unknown".to_owned()]; + } + + // If non-prelude type has no path, give it one. + if path.len() == 1 && !PRELUDE_TYPE_NAMES.contains(&path[0]) { + return vec![ + "other".to_owned(), + prepare_ident(path[0], &mut builder.seen_names_in_default_path), + ]; + } + + // Non-compliant paths are converted to our default path + let non_compliant_path = path[0..path.len() - 1].iter().any(|&p| { + p.is_empty() + || p.starts_with(|c: char| !c.is_ascii_alphabetic()) + || p.contains(|c: char| !c.is_ascii_alphanumeric() || c.is_ascii_uppercase()) + }); + if non_compliant_path { + let last = *path.last().unwrap(); + return vec![ + "other".to_owned(), + prepare_ident(last, &mut builder.seen_names_in_default_path), + ]; + } + + // If path happens by chance to be ["other", Foo] then ensure Foo isn't duped + if path.len() == 2 && path[0] == "other" { + return vec![ + "other".to_owned(), + prepare_ident(path[1], &mut builder.seen_names_in_default_path), + ]; + } + + path.iter().map(|&p| p.to_owned()).collect() +} + +fn prepare_ident(base_ident: &str, seen: &mut BTreeSet) -> String { + let mut n = 1; + let mut ident = base_ident.to_owned(); + while !seen.insert(ident.clone()) { + ident = format!("{base_ident}{n}"); + n += 1; + } + ident +} + +fn unknown_type() -> scale_info::Type { + scale_info::Type::new( + scale_info::Path { + segments: Vec::from_iter(["special".to_owned(), "Unknown".to_owned()]), + }, + core::iter::empty(), + scale_info::TypeDef::Variant(scale_info::TypeDefVariant { + variants: Vec::new(), + }), + Default::default(), + ) +} diff --git a/metadata/src/from/legacy/tests.rs b/metadata/src/from/legacy/tests.rs new file mode 100644 index 0000000000..c4589d5ef2 --- /dev/null +++ b/metadata/src/from/legacy/tests.rs @@ -0,0 +1,477 @@ +use super::*; +use alloc::collections::BTreeSet; +use codec::Decode; +use core::str::FromStr; +use frame_decode::constants::ConstantTypeInfo; +use frame_decode::runtime_apis::RuntimeApiEntryInfo; +use frame_metadata::RuntimeMetadata; +use scale_info_legacy::LookupName; +use scale_type_resolver::TypeResolver; + +/// Load some legacy kusama metadata from our artifacts. +fn legacy_kusama_metadata(version: u8) -> (u64, RuntimeMetadata) { + const VERSIONS: [(u8, u64, &str); 5] = [ + (9, 1021, "metadata_v9_1021.scale"), + (10, 1038, "metadata_v10_1038.scale"), + (11, 1045, "metadata_v11_1045.scale"), + (12, 2025, "metadata_v12_2025.scale"), + (13, 9030, "metadata_v13_9030.scale"), + ]; + + let (spec_version, filename) = VERSIONS + .iter() + .find(|(v, _spec_version, _filename)| *v == version) + .map(|(_, spec_version, name)| (*spec_version, *name)) + .unwrap_or_else(|| panic!("v{version} metadata artifact does not exist")); + + let mut path = std::path::PathBuf::from_str("../artifacts/kusama/").unwrap(); + path.push(filename); + + let bytes = std::fs::read(path).expect("Could not read file"); + let metadata = RuntimeMetadata::decode(&mut &*bytes).expect("Could not SCALE decode metadata"); + + (spec_version, metadata) +} + +/// Load our kusama types. +/// TODO: This is WRONG at the moment; change to point to kusama types when they exist: +fn kusama_types() -> scale_info_legacy::ChainTypeRegistry { + frame_decode::legacy_types::polkadot::relay_chain() +} + +/// Sanitizing paths changes things between old and new, so disable this in tests by default +/// so that we can compare paths and check that by default things translate identically. +/// Tests assume that ignore_not_found is enabled, which converts not found types to +/// special::Unknown instead of returning an error. +fn test_opts() -> super::Opts { + super::Opts { + sanitize_paths: false, + ignore_not_found: true, + } +} + +/// Return a pair of original metadata + converted subxt_metadata::Metadata +fn metadata_pair( + version: u8, + opts: super::Opts, +) -> (TypeRegistrySet<'static>, RuntimeMetadata, crate::Metadata) { + let (spec_version, metadata) = legacy_kusama_metadata(version); + let types = kusama_types(); + + // Extend the types with builtins. + let types_for_spec = { + let mut types_for_spec = types.for_spec_version(spec_version).to_owned(); + let extended_types = + frame_decode::helpers::type_registry_from_metadata_any(&metadata).unwrap(); + types_for_spec.prepend(extended_types); + types_for_spec + }; + + let subxt_metadata = match &metadata { + RuntimeMetadata::V9(m) => super::from_v9(m, &types_for_spec, opts), + RuntimeMetadata::V10(m) => super::from_v10(m, &types_for_spec, opts), + RuntimeMetadata::V11(m) => super::from_v11(m, &types_for_spec, opts), + RuntimeMetadata::V12(m) => super::from_v12(m, &types_for_spec, opts), + RuntimeMetadata::V13(m) => super::from_v13(m, &types_for_spec, opts), + _ => panic!("Metadata version {} not expected", metadata.version()), + } + .expect("Could not convert to subxt_metadata::Metadata"); + + (types_for_spec, metadata, subxt_metadata) +} + +/// A representation of the shape of some type that we can compare across metadatas. +#[derive(PartialEq, Debug, Clone)] +enum Shape { + Array(Box, usize), + BitSequence( + scale_type_resolver::BitsStoreFormat, + scale_type_resolver::BitsOrderFormat, + ), + Compact(Box), + Composite(Vec, Vec<(Option, Shape)>), + Primitive(scale_type_resolver::Primitive), + Sequence(Vec, Box), + Tuple(Vec), + Variant(Vec, Vec), + // This is very important for performance; if we've already seen a variant at some path, + // we'll return just the variant path next time in this, to avoid duplicating lots of variants. + // This also eliminates recursion, since variants allow for it. + SeenVariant(Vec), +} + +#[derive(PartialEq, Debug, Clone)] +struct Variant { + index: u8, + name: String, + fields: Vec<(Option, Shape)>, +} + +impl Shape { + /// convert some modern type definition into a [`Shape`]. + fn from_modern_type(id: u32, types: &scale_info::PortableRegistry) -> Shape { + let mut seen_variants = BTreeSet::new(); + Shape::from_modern_type_inner(id, &mut seen_variants, types) + } + + fn from_modern_type_inner( + id: u32, + seen_variants: &mut BTreeSet>, + types: &scale_info::PortableRegistry, + ) -> Shape { + let visitor = + scale_type_resolver::visitor::new((seen_variants, types), |_, _| panic!("Unhandled")) + .visit_array(|(seen_variants, types), type_id, len| { + let inner = Shape::from_modern_type_inner(type_id, seen_variants, types); + Shape::Array(Box::new(inner), len) + }) + .visit_bit_sequence(|_, store, order| Shape::BitSequence(store, order)) + .visit_compact(|(seen_variants, types), type_id| { + let inner = Shape::from_modern_type_inner(type_id, seen_variants, types); + Shape::Compact(Box::new(inner)) + }) + .visit_composite(|(seen_variants, types), path, fields| { + let path = path.map(|p| p.to_owned()).collect(); + let inners = fields + .map(|field| { + let name = field.name.map(|n| n.to_owned()); + let inner = + Shape::from_modern_type_inner(field.id, seen_variants, types); + (name, inner) + }) + .collect(); + Shape::Composite(path, inners) + }) + .visit_primitive(|_types, prim| Shape::Primitive(prim)) + .visit_sequence(|(seen_variants, types), path, type_id| { + let path = path.map(|p| p.to_owned()).collect(); + let inner = Shape::from_modern_type_inner(type_id, seen_variants, types); + Shape::Sequence(path, Box::new(inner)) + }) + .visit_tuple(|(seen_variants, types), fields| { + let inners = fields + .map(|field| Shape::from_modern_type_inner(field, seen_variants, types)) + .collect(); + Shape::Tuple(inners) + }) + .visit_variant(|(seen_variants, types), path, variants| { + let path: Vec = path.map(|p| p.to_owned()).collect(); + // very important to avoid recursion and performance costs: + if !seen_variants.insert(path.clone()) { + return Shape::SeenVariant(path); + } + let variants = variants + .map(|v| Variant { + index: v.index, + name: v.name.to_owned(), + fields: v + .fields + .map(|field| { + let name = field.name.map(|n| n.to_owned()); + let inner = Shape::from_modern_type_inner( + field.id, + seen_variants, + types, + ); + (name, inner) + }) + .collect(), + }) + .collect(); + Shape::Variant(path, variants) + }) + .visit_not_found(|_types| { + panic!("PortableRegistry should not have a type which can't be found") + }); + + types.resolve_type(id, visitor).unwrap() + } + + /// convert some historic type definition into a [`Shape`]. + fn from_legacy_type(name: &LookupName, types: &TypeRegistrySet<'_>) -> Shape { + let mut seen_variants = BTreeSet::new(); + Shape::from_legacy_type_inner(name.clone(), &mut seen_variants, types) + } + + fn from_legacy_type_inner( + id: LookupName, + seen_variants: &mut BTreeSet>, + types: &TypeRegistrySet<'_>, + ) -> Shape { + let visitor = + scale_type_resolver::visitor::new((seen_variants, types), |_, _| panic!("Unhandled")) + .visit_array(|(seen_variants, types), type_id, len| { + let inner = Shape::from_legacy_type_inner(type_id, seen_variants, types); + Shape::Array(Box::new(inner), len) + }) + .visit_bit_sequence(|_types, store, order| Shape::BitSequence(store, order)) + .visit_compact(|(seen_variants, types), type_id| { + let inner = Shape::from_legacy_type_inner(type_id, seen_variants, types); + Shape::Compact(Box::new(inner)) + }) + .visit_composite(|(seen_variants, types), path, fields| { + let path = path.map(|p| p.to_owned()).collect(); + let inners = fields + .map(|field| { + let name = field.name.map(|n| n.to_owned()); + let inner = + Shape::from_legacy_type_inner(field.id, seen_variants, types); + (name, inner) + }) + .collect(); + Shape::Composite(path, inners) + }) + .visit_primitive(|_types, prim| Shape::Primitive(prim)) + .visit_sequence(|(seen_variants, types), path, type_id| { + let path = path.map(|p| p.to_owned()).collect(); + let inner = Shape::from_legacy_type_inner(type_id, seen_variants, types); + Shape::Sequence(path, Box::new(inner)) + }) + .visit_tuple(|(seen_variants, types), fields| { + let inners = fields + .map(|field| Shape::from_legacy_type_inner(field, seen_variants, types)) + .collect(); + Shape::Tuple(inners) + }) + .visit_variant(|(seen_variants, types), path, variants| { + let path: Vec = path.map(|p| p.to_owned()).collect(); + // very important to avoid recursion and performance costs: + if !seen_variants.insert(path.clone()) { + return Shape::SeenVariant(path); + } + let variants = variants + .map(|v| Variant { + index: v.index, + name: v.name.to_owned(), + fields: v + .fields + .map(|field| { + let name = field.name.map(|n| n.to_owned()); + let inner = Shape::from_legacy_type_inner( + field.id, + seen_variants, + types, + ); + (name, inner) + }) + .collect(), + }) + .collect(); + Shape::Variant(path, variants) + }) + .visit_not_found(|(seen_variants, _)| { + // When we convert legacy to modern types, any types we don't find + // are replaced with empty variants (since we can't have dangling types + // in our new PortableRegistry). Do the same here so they compare equal. + Shape::from_legacy_type_inner( + LookupName::parse("special::Unknown").unwrap(), + seen_variants, + types, + ) + }); + + types.resolve_type(id, visitor).unwrap() + } +} + +// Go over all of the constants listed via frame-decode and check that our old +// and new metadatas both have identical output. +macro_rules! constants_eq { + ($name:ident, $version:literal, $version_path:ident) => { + #[test] + fn $name() { + let (old_types, old_md, new_md) = metadata_pair($version, test_opts()); + let RuntimeMetadata::$version_path(old_md) = old_md else { + panic!("Wrong version") + }; + + let old: Vec<_> = old_md + .constant_tuples() + .map(|(p, n)| old_md.constant_info(&p, &n).unwrap()) + .map(|c| { + ( + c.bytes.to_owned(), + Shape::from_legacy_type(&c.type_id, &old_types), + ) + }) + .collect(); + let new: Vec<_> = new_md + .constant_tuples() + .map(|(p, n)| new_md.constant_info(&p, &n).unwrap()) + .map(|c| { + ( + c.bytes.to_owned(), + Shape::from_modern_type(c.type_id, new_md.types()), + ) + }) + .collect(); + + assert_eq!(old, new); + } + }; +} + +constants_eq!(v9_constants_eq, 9, V9); +constants_eq!(v10_constants_eq, 10, V10); +constants_eq!(v11_constants_eq, 11, V11); +constants_eq!(v12_constants_eq, 12, V12); +constants_eq!(v13_constants_eq, 13, V13); + +/// Make sure all Runtime APIs are the same once translated. +#[test] +fn runtime_apis() { + for version in 9..=13 { + let (old_types, _old_md, new_md) = metadata_pair(version, test_opts()); + + let old: Vec<_> = old_types + .runtime_api_tuples() + .map(|(p, n)| { + old_types + .runtime_api_info(&p, &n) + .unwrap() + .map_ids(|id| Ok::<_, ()>(Shape::from_legacy_type(&id, &old_types))) + .unwrap() + }) + .collect(); + let new: Vec<_> = new_md + .runtime_api_tuples() + .map(|(p, n)| { + new_md + .runtime_api_info(&p, &n) + .unwrap() + .map_ids(|id| Ok::<_, ()>(Shape::from_modern_type(id, new_md.types()))) + .unwrap() + }) + .collect(); + + assert_eq!(old, new); + } +} + +macro_rules! storage_eq { + ($name:ident, $version:literal, $version_path:ident) => { + #[test] + fn $name() { + let (old_types, old_md, new_md) = metadata_pair($version, test_opts()); + let RuntimeMetadata::$version_path(old_md) = old_md else { + panic!("Wrong version") + }; + + let old: Vec<_> = old_md + .storage_tuples() + .map(|(p, n)| { + let info = old_md + .storage_info(&p, &n) + .unwrap() + .map_ids(|id| Ok::<_, ()>(Shape::from_legacy_type(&id, &old_types))) + .unwrap(); + (p.into_owned(), n.into_owned(), info) + }) + .collect(); + + let new: Vec<_> = new_md + .storage_tuples() + .map(|(p, n)| { + let info = new_md + .storage_info(&p, &n) + .unwrap() + .map_ids(|id| Ok::<_, ()>(Shape::from_modern_type(id, new_md.types()))) + .unwrap(); + (p.into_owned(), n.into_owned(), info) + }) + .collect(); + + if old.len() != new.len() { + panic!("Storage entries for version 9 metadata differ in length"); + } + + for (old, new) in old.into_iter().zip(new.into_iter()) { + assert_eq!((&old.0, &old.1), (&new.0, &new.1), "Storage entry mismatch"); + assert_eq!( + old.2, new.2, + "Storage entry {}.{} does not match!", + old.0, old.1 + ); + } + } + }; +} + +storage_eq!(v9_storage_eq, 9, V9); +storage_eq!(v10_storage_eq, 10, V10); +storage_eq!(v11_storage_eq, 11, V11); +storage_eq!(v12_storage_eq, 12, V12); +storage_eq!(v13_storage_eq, 13, V13); + +#[test] +fn builtin_call() { + for version in 9..=13 { + let (old_types, _old_md, new_md) = metadata_pair(version, test_opts()); + + let old = Shape::from_legacy_type(&LookupName::parse("builtin::Call").unwrap(), &old_types); + let new = Shape::from_modern_type(new_md.outer_enums.call_enum_ty, new_md.types()); + assert_eq!(old, new, "Call types do not match in metadata V{version}!"); + } +} + +#[test] +fn builtin_error() { + for version in 9..=13 { + let (old_types, _old_md, new_md) = metadata_pair(version, test_opts()); + + let old = + Shape::from_legacy_type(&LookupName::parse("builtin::Error").unwrap(), &old_types); + let new = Shape::from_modern_type(new_md.outer_enums.error_enum_ty, new_md.types()); + assert_eq!(old, new, "Error types do not match in metadata V{version}!"); + } +} + +#[test] +fn builtin_event() { + for version in 9..=13 { + let (old_types, _old_md, new_md) = metadata_pair(version, test_opts()); + + let old = + Shape::from_legacy_type(&LookupName::parse("builtin::Event").unwrap(), &old_types); + let new = Shape::from_modern_type(new_md.outer_enums.event_enum_ty, new_md.types()); + assert_eq!(old, new, "Event types do not match in metadata V{version}!"); + } +} + +#[test] +fn codegen_works() { + for version in 9..=13 { + // We need to do this against `subxt_codegen::Metadata` and so cannot re-use our + // test functions for it. This is because the compiler sees some difference between + // `subxct_codegen::Metadata` and `crate::Metadata` even though they should be identical. + let new_md = { + let (spec_version, metadata) = legacy_kusama_metadata(version); + let types = kusama_types(); + + let types_for_spec = { + let mut types_for_spec = types.for_spec_version(spec_version).to_owned(); + let extended_types = + frame_decode::helpers::type_registry_from_metadata_any(&metadata).unwrap(); + types_for_spec.prepend(extended_types); + types_for_spec + }; + + match &metadata { + RuntimeMetadata::V9(m) => subxt_codegen::Metadata::from_v9(m, &types_for_spec), + RuntimeMetadata::V10(m) => subxt_codegen::Metadata::from_v10(m, &types_for_spec), + RuntimeMetadata::V11(m) => subxt_codegen::Metadata::from_v11(m, &types_for_spec), + RuntimeMetadata::V12(m) => subxt_codegen::Metadata::from_v12(m, &types_for_spec), + RuntimeMetadata::V13(m) => subxt_codegen::Metadata::from_v13(m, &types_for_spec), + _ => panic!("Metadata version {} not expected", metadata.version()), + } + .expect("Could not convert to subxt_metadata::Metadata") + }; + + // We only test that generation succeeds without any errors, not necessarily that it's 100% useful: + let codegen = subxt_codegen::CodegenBuilder::new(); + let _ = codegen + .generate(new_md) + .map_err(|e| e.into_compile_error()) + .unwrap_or_else(|e| panic!("Codegen failed for metadata V{version}: {e}")); + } +} diff --git a/metadata/src/from/mod.rs b/metadata/src/from/mod.rs index 16e0f816d1..5a5ed0c55b 100644 --- a/metadata/src/from/mod.rs +++ b/metadata/src/from/mod.rs @@ -8,6 +8,10 @@ mod v14; mod v15; mod v16; +/// Legacy translation hidden behind the corresponding feature flag. +#[cfg(feature = "legacy")] +pub mod legacy; + /// The metadata versions that we support converting into [`crate::Metadata`]. /// These are ordest from highest to lowest, so that the metadata we'd want to /// pick first is first in the array. diff --git a/metadata/src/from/v14.rs b/metadata/src/from/v14.rs index e8e3fd90a8..ee31cd8537 100644 --- a/metadata/src/from/v14.rs +++ b/metadata/src/from/v14.rs @@ -73,7 +73,9 @@ impl TryFrom for Metadata { name.clone(), PalletMetadataInner { name: name.clone(), - index: p.index, + call_index: p.index, + event_index: p.index, + error_index: p.index, storage, call_ty: p.calls.as_ref().map(|c| c.ty.id), call_variant_index, @@ -99,7 +101,9 @@ impl TryFrom for Metadata { Ok(Metadata { types: m.types, pallets, - pallets_by_index, + pallets_by_call_index: pallets_by_index.clone(), + pallets_by_error_index: pallets_by_index.clone(), + pallets_by_event_index: pallets_by_index, extrinsic: from_extrinsic_metadata(m.extrinsic, missing_extrinsic_type_ids), dispatch_error_ty, outer_enums: OuterEnumsMetadata { diff --git a/metadata/src/from/v15.rs b/metadata/src/from/v15.rs index 157c01a7a0..0a170c1cde 100644 --- a/metadata/src/from/v15.rs +++ b/metadata/src/from/v15.rs @@ -69,7 +69,9 @@ impl TryFrom for Metadata { name.clone(), PalletMetadataInner { name, - index: p.index, + call_index: p.index, + event_index: p.index, + error_index: p.index, storage, call_ty: p.calls.as_ref().map(|c| c.ty.id), call_variant_index, @@ -126,7 +128,9 @@ impl TryFrom for Metadata { Ok(Metadata { types: m.types, pallets, - pallets_by_index, + pallets_by_call_index: pallets_by_index.clone(), + pallets_by_error_index: pallets_by_index.clone(), + pallets_by_event_index: pallets_by_index, extrinsic: from_extrinsic_metadata(m.extrinsic), dispatch_error_ty, apis, diff --git a/metadata/src/from/v16.rs b/metadata/src/from/v16.rs index 83301f70b3..8369620264 100644 --- a/metadata/src/from/v16.rs +++ b/metadata/src/from/v16.rs @@ -88,7 +88,9 @@ impl TryFrom for Metadata { name.clone(), PalletMetadataInner { name, - index: p.index, + call_index: p.index, + event_index: p.index, + error_index: p.index, storage, call_ty: p.calls.as_ref().map(|c| c.ty.id), call_variant_index, @@ -157,7 +159,9 @@ impl TryFrom for Metadata { Ok(Metadata { types: m.types, pallets, - pallets_by_index, + pallets_by_call_index: pallets_by_index.clone(), + pallets_by_error_index: pallets_by_index.clone(), + pallets_by_event_index: pallets_by_index, extrinsic: from_extrinsic_metadata(m.extrinsic), dispatch_error_ty, apis, diff --git a/metadata/src/lib.rs b/metadata/src/lib.rs index b317050afc..53b1402e08 100644 --- a/metadata/src/lib.rs +++ b/metadata/src/lib.rs @@ -26,15 +26,19 @@ use alloc::borrow::Cow; use alloc::collections::BTreeMap; use alloc::string::{String, ToString}; use alloc::vec::Vec; -use frame_decode::constants::{ConstantInfo, ConstantInfoError, Entry}; +use frame_decode::constants::{ConstantEntry, ConstantInfo, ConstantInfoError}; use frame_decode::custom_values::{CustomValue, CustomValueInfo, CustomValueInfoError}; use frame_decode::extrinsics::{ ExtrinsicCallInfo, ExtrinsicExtensionInfo, ExtrinsicInfoArg, ExtrinsicInfoError, ExtrinsicSignatureInfo, }; -use frame_decode::runtime_apis::{RuntimeApiInfo, RuntimeApiInfoError, RuntimeApiInput}; -use frame_decode::storage::{StorageInfo, StorageInfoError, StorageKeyInfo}; -use frame_decode::view_functions::{ViewFunctionInfo, ViewFunctionInfoError, ViewFunctionInput}; +use frame_decode::runtime_apis::{ + RuntimeApiEntry, RuntimeApiInfo, RuntimeApiInfoError, RuntimeApiInput, +}; +use frame_decode::storage::{StorageEntry, StorageInfo, StorageInfoError, StorageKeyInfo}; +use frame_decode::view_functions::{ + ViewFunctionEntry, ViewFunctionInfo, ViewFunctionInfoError, ViewFunctionInput, +}; use hashbrown::HashMap; use scale_info::{PortableRegistry, Variant, form::PortableForm}; @@ -49,6 +53,9 @@ pub use from::SUPPORTED_METADATA_VERSIONS; pub use from::TryFromError; pub use utils::validation::MetadataHasher; +#[cfg(feature = "legacy")] +pub use from::legacy::Error as LegacyFromError; + type CustomMetadataInner = frame_metadata::v15::CustomMetadata; /// Node metadata. This can be constructed by providing some compatible [`frame_metadata`] @@ -60,8 +67,18 @@ pub struct Metadata { types: PortableRegistry, /// Metadata of all the pallets. pallets: OrderedMap, - /// Find the location in the pallet Vec by pallet index. - pallets_by_index: HashMap, + /// Find the pallet for a given call index. + pallets_by_call_index: HashMap, + /// Find the pallet for a given event index. + /// + /// for modern metadatas, this is the same as pallets_by_call_index, + /// but for old metadatas this can vary. + pallets_by_event_index: HashMap, + /// Find the pallet for a given error index. + /// + /// for modern metadatas, this is the same as pallets_by_call_index, + /// but for old metadatas this can vary. + pallets_by_error_index: HashMap, /// Metadata of the extrinsic. extrinsic: ExtrinsicMetadata, /// The types of the outer enums. @@ -84,7 +101,7 @@ impl frame_decode::extrinsics::ExtrinsicTypeInfo for Metadata { pallet_index: u8, call_index: u8, ) -> Result, ExtrinsicInfoError<'_>> { - let pallet = self.pallet_by_index(pallet_index).ok_or({ + let pallet = self.pallet_by_call_index(pallet_index).ok_or({ ExtrinsicInfoError::PalletNotFound { index: pallet_index, } @@ -179,16 +196,17 @@ impl frame_decode::storage::StorageTypeInfo for Metadata { Ok(info) } - - fn storage_entries(&self) -> impl Iterator> { +} +impl frame_decode::storage::StorageEntryInfo for Metadata { + fn storage_entries(&self) -> impl Iterator> { self.pallets().flat_map(|pallet| { let pallet_name = pallet.name(); - let pallet_iter = core::iter::once(Entry::In(pallet_name.into())); + let pallet_iter = core::iter::once(StorageEntry::In(pallet_name.into())); let entries_iter = pallet.storage().into_iter().flat_map(|storage| { storage .entries() .iter() - .map(|entry| Entry::Name(entry.name().into())) + .map(|entry| StorageEntry::Name(entry.name().into())) }); pallet_iter.chain(entries_iter) @@ -223,14 +241,15 @@ impl frame_decode::runtime_apis::RuntimeApiTypeInfo for Metadata { Ok(info) } - - fn runtime_apis(&self) -> impl Iterator> { +} +impl frame_decode::runtime_apis::RuntimeApiEntryInfo for Metadata { + fn runtime_api_entries(&self) -> impl Iterator> { self.runtime_api_traits().flat_map(|api_trait| { let trait_name = api_trait.name(); - let trait_iter = core::iter::once(Entry::In(trait_name.into())); + let trait_iter = core::iter::once(RuntimeApiEntry::In(trait_name.into())); let method_iter = api_trait .methods() - .map(|method| Entry::Name(method.name().into())); + .map(|method| RuntimeApiEntry::Name(method.name().into())); trait_iter.chain(method_iter) }) @@ -264,14 +283,15 @@ impl frame_decode::view_functions::ViewFunctionTypeInfo for Metadata { Ok(info) } - - fn view_functions(&self) -> impl Iterator> { +} +impl frame_decode::view_functions::ViewFunctionEntryInfo for Metadata { + fn view_function_entries(&self) -> impl Iterator> { self.pallets().flat_map(|pallet| { let pallet_name = pallet.name(); - let pallet_iter = core::iter::once(Entry::In(pallet_name.into())); + let pallet_iter = core::iter::once(ViewFunctionEntry::In(pallet_name.into())); let fn_iter = pallet .view_functions() - .map(|function| Entry::Name(function.name().into())); + .map(|function| ViewFunctionEntry::Name(function.name().into())); pallet_iter.chain(fn_iter) }) @@ -304,14 +324,15 @@ impl frame_decode::constants::ConstantTypeInfo for Metadata { Ok(info) } - - fn constants(&self) -> impl Iterator> { +} +impl frame_decode::constants::ConstantEntryInfo for Metadata { + fn constant_entries(&self) -> impl Iterator> { self.pallets().flat_map(|pallet| { let pallet_name = pallet.name(); - let pallet_iter = core::iter::once(Entry::In(pallet_name.into())); + let pallet_iter = core::iter::once(ConstantEntry::In(pallet_name.into())); let constant_iter = pallet .constants() - .map(|constant| Entry::Name(constant.name().into())); + .map(|constant| ConstantEntry::Name(constant.name().into())); pallet_iter.chain(constant_iter) }) @@ -338,7 +359,8 @@ impl frame_decode::custom_values::CustomValueTypeInfo for Metadata { Ok(info) } - +} +impl frame_decode::custom_values::CustomValueEntryInfo for Metadata { fn custom_values(&self) -> impl Iterator> { self.custom.map.keys().map(|name| CustomValue { name: Cow::Borrowed(name), @@ -347,11 +369,65 @@ impl frame_decode::custom_values::CustomValueTypeInfo for Metadata { } impl Metadata { - /// This is essentiall an alias for `::decode(&mut bytes)` + /// This is essentially an alias for `::decode(&mut bytes)` pub fn decode_from(mut bytes: &[u8]) -> Result { ::decode(&mut bytes) } + /// Convert V13 metadata into [`Metadata`], given the necessary extra type information. + #[cfg(feature = "legacy")] + pub fn from_v13( + metadata: &frame_metadata::v13::RuntimeMetadataV13, + types: &scale_info_legacy::TypeRegistrySet<'_>, + ) -> Result { + from::legacy::from_v13(metadata, types, from::legacy::Opts::compat()) + } + + /// Convert V12 metadata into [`Metadata`], given the necessary extra type information. + #[cfg(feature = "legacy")] + pub fn from_v12( + metadata: &frame_metadata::v12::RuntimeMetadataV12, + types: &scale_info_legacy::TypeRegistrySet<'_>, + ) -> Result { + from::legacy::from_v12(metadata, types, from::legacy::Opts::compat()) + } + + /// Convert V13 metadata into [`Metadata`], given the necessary extra type information. + #[cfg(feature = "legacy")] + pub fn from_v11( + metadata: &frame_metadata::v11::RuntimeMetadataV11, + types: &scale_info_legacy::TypeRegistrySet<'_>, + ) -> Result { + from::legacy::from_v11(metadata, types, from::legacy::Opts::compat()) + } + + /// Convert V13 metadata into [`Metadata`], given the necessary extra type information. + #[cfg(feature = "legacy")] + pub fn from_v10( + metadata: &frame_metadata::v10::RuntimeMetadataV10, + types: &scale_info_legacy::TypeRegistrySet<'_>, + ) -> Result { + from::legacy::from_v10(metadata, types, from::legacy::Opts::compat()) + } + + /// Convert V9 metadata into [`Metadata`], given the necessary extra type information. + #[cfg(feature = "legacy")] + pub fn from_v9( + metadata: &frame_metadata::v9::RuntimeMetadataV9, + types: &scale_info_legacy::TypeRegistrySet<'_>, + ) -> Result { + from::legacy::from_v9(metadata, types, from::legacy::Opts::compat()) + } + + /// Convert V8 metadata into [`Metadata`], given the necessary extra type information. + #[cfg(feature = "legacy")] + pub fn from_v8( + metadata: &frame_metadata::v8::RuntimeMetadataV8, + types: &scale_info_legacy::TypeRegistrySet<'_>, + ) -> Result { + from::legacy::from_v8(metadata, types, from::legacy::Opts::compat()) + } + /// Access the underlying type registry. pub fn types(&self) -> &PortableRegistry { &self.types @@ -385,10 +461,36 @@ impl Metadata { }) } - /// Access a pallet given its encoded variant index. - pub fn pallet_by_index(&self, variant_index: u8) -> Option> { + /// Access a pallet given some call/extrinsic pallet index byte + pub fn pallet_by_call_index(&self, variant_index: u8) -> Option> { let inner = self - .pallets_by_index + .pallets_by_call_index + .get(&variant_index) + .and_then(|i| self.pallets.get_by_index(*i))?; + + Some(PalletMetadata { + inner, + types: self.types(), + }) + } + + /// Access a pallet given some event pallet index byte + pub fn pallet_by_event_index(&self, variant_index: u8) -> Option> { + let inner = self + .pallets_by_event_index + .get(&variant_index) + .and_then(|i| self.pallets.get_by_index(*i))?; + + Some(PalletMetadata { + inner, + types: self.types(), + }) + } + + /// Access a pallet given some error pallet index byte + pub fn pallet_by_error_index(&self, variant_index: u8) -> Option> { + let inner = self + .pallets_by_error_index .get(&variant_index) .and_then(|i| self.pallets.get_by_index(*i))?; @@ -458,9 +560,19 @@ impl<'a> PalletMetadata<'a> { &self.inner.name } - /// The pallet index. - pub fn index(&self) -> u8 { - self.inner.index + /// The index to use for calls in this pallet. + pub fn call_index(&self) -> u8 { + self.inner.call_index + } + + /// The index to use for events in this pallet. + pub fn event_index(&self) -> u8 { + self.inner.event_index + } + + /// The index to use for errors in this pallet. + pub fn error_index(&self) -> u8 { + self.inner.error_index } /// The pallet docs. @@ -613,8 +725,18 @@ impl<'a> PalletMetadata<'a> { struct PalletMetadataInner { /// Pallet name. name: String, - /// Pallet index. - index: u8, + /// The index for calls in the pallet. + call_index: u8, + /// The index for events in the pallet. + /// + /// This is the same as `call_index` for modern metadatas, + /// but can be different for older metadatas (pre-V12). + event_index: u8, + /// The index for errors in the pallet. + /// + /// This is the same as `call_index` for modern metadatas, + /// but can be different for older metadatas (pre-V12). + error_index: u8, /// Pallet storage metadata. storage: Option, /// Type ID for the pallet Call enum. diff --git a/subxt/src/error/dispatch_error.rs b/subxt/src/error/dispatch_error.rs index a98da908ff..4b27e63dfd 100644 --- a/subxt/src/error/dispatch_error.rs +++ b/subxt/src/error/dispatch_error.rs @@ -169,11 +169,12 @@ impl std::fmt::Display for ModuleError { impl ModuleError { /// Return more details about this error. pub fn details(&self) -> Result, ModuleErrorDetailsError> { - let pallet = self.metadata.pallet_by_index(self.pallet_index()).ok_or( - ModuleErrorDetailsError::PalletNotFound { + let pallet = self + .metadata + .pallet_by_error_index(self.pallet_index()) + .ok_or(ModuleErrorDetailsError::PalletNotFound { pallet_index: self.pallet_index(), - }, - )?; + })?; let variant = pallet .error_variant_by_index(self.error_index())