Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/sdt/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ impl<T: Copy, const MIN_REVISION: u8> ExtendedField<T, MIN_REVISION> {
#[repr(C, packed)]
pub struct SdtHeader {
pub signature: Signature,
// TODO: Make sure this and other fields are interpreted as little-endian on big-endian machine.
Copy link
Copy Markdown
Member

@IsaacWoods IsaacWoods Mar 27, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not aware of any big-endian archs with ACPI support, but the spec does specify all values are little-endian on all machines. If we do want to tackle this / care about it, we likely need a better abstraction and would need to make a pretty substantial number of changes to tables + potentially the interpreter. Does this need to be handled for this PR?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah no, not in this PR - this was a bit of a lazy comment from me, sorry!

I was aiming to just have a reminder that additional work is needed for big-endian support, since I think it's not explicity documented anywhere. I plopped it in whilst thinking about how to load table headers from byte-slices - transmute is obviously useful on little-endian machines, but not so good on big-endian ones.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(as you say though - big-endian machines running ACPI don't seem to be especially common outside of weird qemu testing setups)

pub length: u32,
pub revision: u8,
pub checksum: u8,
Expand Down
15 changes: 15 additions & 0 deletions tests/multi_table.asl
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
/* This file is a test for `aml_tester` rather than of the parser itself.
* Can `aml_tester` cope with multiple tables?
*/
DefinitionBlock("", "DSDT", 1, "RSACPI", "BUFFLD", 1) {
OperationRegion(MEM, SystemMemory, 0x40000, 0x1000)
Field(MEM, ByteAcc, NoLock, Preserve) {
A, 8
}
}
DefinitionBlock("", "SSDT", 1, "RSACPI", "BUFFLD", 1) {
OperationRegion(MEMB, SystemMemory, 0x50000, 0x1000)
Field(MEMB, ByteAcc, NoLock, Preserve) {
B, 8
}
}
4 changes: 3 additions & 1 deletion tests/test_infra/mod.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use acpi::Handler;
use aml_test_tools::{
RunTestResult,
TestResult,
handlers::logging_handler::LoggingHandler,
new_interpreter,
run_test_for_string,
Expand All @@ -13,5 +14,6 @@ pub fn run_aml_test(asl: &'static str, handler: impl Handler) {
let logged_handler = LoggingHandler::new(handler);
let interpreter = new_interpreter(logged_handler);

assert!(matches!(run_test_for_string(asl, interpreter), RunTestResult::Pass(_)));
let result = run_test_for_string(asl, interpreter);
assert!(matches!(result, RunTestResult::Pass(_)), "Test failed with: {:?}", TestResult::from(&result));
}
31 changes: 25 additions & 6 deletions tools/aml_test_tools/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,15 @@
//! As always, feel free to offer PRs for improvements.

pub mod handlers;
pub mod tables;

use crate::tables::{TestAcpiTable, bytes_to_tables};
use acpi::{
Handler,
PhysicalMapping,
address::MappedGas,
aml::{AmlError, Interpreter, namespace::AmlName, object::Object},
sdt::Signature,
};
use log::{error, trace};
use std::{
Expand Down Expand Up @@ -99,6 +102,8 @@ pub enum TestFailureReason {
CompileFail,
/// Some error occurred attempting to read or write the test file.
FilesystemErr,
/// There was a problem interpreting the basic structure of the tables in the AML file.
TablesErr,
/// Our interpreter failed to parse or execute the resulting AML.
ParseFail(AmlError),
}
Expand Down Expand Up @@ -278,10 +283,11 @@ where
let mut contents = Vec::new();
file.read_to_end(&mut contents).unwrap();

const AML_TABLE_HEADER_LENGTH: usize = 36;
let stream = &contents[AML_TABLE_HEADER_LENGTH..];
let Ok(tables) = bytes_to_tables(&contents) else {
return RunTestResult::Failed(interpreter, TestFailureReason::TablesErr);
};

run_test(stream, interpreter)
run_test(tables, interpreter)
}

/// Internal function to create a temporary script file from an ASL string, plus to calculate the
Expand All @@ -307,19 +313,32 @@ fn create_script_file(asl: &'static str) -> TempScriptFile {
///
/// Arguments:
///
/// * `stream`: A slice containing the AML bytecode to test.
/// * `tables`: A Vec of tables to test. The DSDT will be loaded first, if found. Other tables will
/// be loaded in the order they appear in the Vec.
/// * `interpreter`: The interpreter to test with. The interpreter is consumed to maintain unwind
/// safety - if the interpreter panics, the caller should not be able to see the interpreter in
/// an inconsistent state.
pub fn run_test<T>(stream: &[u8], interpreter: Interpreter<T>) -> RunTestResult<T>
pub fn run_test<T>(tables: Vec<TestAcpiTable>, interpreter: Interpreter<T>) -> RunTestResult<T>
where
T: Handler,
{
// Without `AssertUnwindSafe`, the following code will not build as the Interpreter is not
// unwind safe. To avoid the caller being able to see an inconsistent Interpreter, if a panic
// occurs we drop the Interpreter, forcing the caller to create a new one.
let result = catch_unwind(AssertUnwindSafe(|| -> Result<(), AmlError> {
interpreter.load_table(stream)?;
// Load the DSDT table first, if there is one.
if let Some(dsdt) = tables.iter().find(|t| t.header().signature == Signature::DSDT) {
trace!("Loading table: DSDT");
interpreter.load_table(dsdt.content())?;
}
let others = tables.iter().filter(|t| t.header().signature != Signature::DSDT);

for t in others {
trace!("Loading table: {:?}", t.header().signature);
interpreter.load_table(t.content())?;
}

trace!("All tables loaded");

if let Some(result) = interpreter.evaluate_if_present(AmlName::from_str("\\MAIN").unwrap(), vec![])? {
match *result {
Expand Down
62 changes: 62 additions & 0 deletions tools/aml_test_tools/src/tables.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
//! A basic interpreter to extract ACPI tables from byte-slices.

use acpi::sdt::SdtHeader;
use std::mem::transmute;

const AML_TABLE_HEADER_LENGTH: usize = 36;

/// An ACPI table separated into header and content.
///
/// This is not provided in the main crate as it is unnecessary - but it is useful for testing.
pub struct TestAcpiTable {
header: SdtHeader,
content: Vec<u8>,
}

impl TryFrom<&[u8]> for TestAcpiTable {
type Error = &'static str;

fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> {
if bytes.len() < AML_TABLE_HEADER_LENGTH {
return Err("Buffer shorter than table header");
}

let mut header_bytes: [u8; AML_TABLE_HEADER_LENGTH] = [0; AML_TABLE_HEADER_LENGTH];
header_bytes.copy_from_slice(&bytes[..AML_TABLE_HEADER_LENGTH]);
let header: SdtHeader = unsafe { transmute(header_bytes) };

if header.length < AML_TABLE_HEADER_LENGTH as u32 {
return Err("AML table header reported length too short");
}

let content = bytes[AML_TABLE_HEADER_LENGTH..header.length as usize].to_vec();

Ok(Self { header, content })
}
}

/// Provide accessor functions for the two fields - don't provide write access so that `content`
/// and `header` can't get out of sync with each other.
impl TestAcpiTable {
pub fn content(&self) -> &[u8] {
&self.content
}

pub fn header(&self) -> &SdtHeader {
&self.header
}
}

/// Construct a Vec of AML tables from a slice of bytes.
pub fn bytes_to_tables(bytes: &[u8]) -> Result<Vec<TestAcpiTable>, &'static str> {
let mut tables = Vec::new();
let mut offset = 0;

while offset < bytes.len() {
let table = TestAcpiTable::try_from(&bytes[offset..])?;
offset += table.header.length as usize;
tables.push(table);
}

Ok(tables)
}
Loading