Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Avoid bitwise moving of CPP Module #21

Merged
merged 6 commits into from
Aug 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions .github/workflows/develop.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,11 @@ jobs:
run: cargo test --workspace --all-features

############ Examples ############
- name: Run 'hello world add' example
# python export_model.py
run: cargo run
working-directory: examples/hello_world_add
- name: Run 'hello world add no_std' example
# python export_model.py
run: cargo run
working-directory: examples/hello_world_add_no_std
# - name: Run 'hello world add' example
# # python export_model.py
# run: cargo run
# working-directory: examples/hello_world_add
8 changes: 6 additions & 2 deletions examples/hello_world_add/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
#![deny(warnings)]

use std::path::PathBuf;

use executorch::evalue::{EValue, Tag};
use executorch::module::Module;
use executorch::tensor::{Array, Tensor};
Expand All @@ -12,8 +14,10 @@ fn main() {

executorch::platform::pal_init();

let mut module = Module::new("model.pte", None);

let mut module = Module::new(
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("model.pte"),
None,
);
let input_array1 = Array::new(array![1.0_f32]);
let input_tensor1 = input_array1.to_tensor_impl();
let input_evalue1 = EValue::from_tensor(Tensor::new(&input_tensor1));
Expand Down
7 changes: 4 additions & 3 deletions examples/hello_world_add_no_std/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,11 @@ fn main() {

executorch::platform::pal_init();

let memory_allocator =
MemoryAllocator::new(unsafe { &mut *core::ptr::addr_of_mut!(MEMORY_ALLOCATOR_BUF) });
// Safety: We are the main function, no other function access the buffer
let buffer = unsafe { &mut *core::ptr::addr_of_mut!(MEMORY_ALLOCATOR_BUF) };
let memory_allocator = MemoryAllocator::new(buffer);

let file_data_loader = FileDataLoader::from_cstr(cstr::cstr!(b"model.pte"), None).unwrap();
let file_data_loader = FileDataLoader::from_path_cstr(cstr::cstr!(b"model.pte"), None).unwrap();

let program = Program::load(
&file_data_loader,
Expand Down
134 changes: 69 additions & 65 deletions executorch-sys/cpp/executorch_rs_ext/api_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,57 +6,57 @@

namespace executorch_rs
{
namespace
{
template <typename T>
struct ManuallyDrop
{
union
{
T value;
};
ManuallyDrop(T &&value) : value(std::move(value)) {}
~ManuallyDrop() {}
#if defined(EXECUTORCH_RS_STD)
template <typename T>
Vec<T> crate_Vec(std::vector<T> &&vec)
{
size_t len = vec.size();
T *arr = new T[len];
std::move(vec.begin(), vec.end(), arr);
return Vec<T>{
.data = arr,
.len = len,
.cap = len,
};
}

template <typename T>
RawVec<T> crate_RawVec(std::vector<T> &&vec)
{
auto vec2 = ManuallyDrop<std::vector<T>>(std::move(vec));
return RawVec<T>{
.data = vec2.value.data(),
.len = vec2.value.size(),
.cap = vec2.value.capacity(),
};
}
#define VEC_DESTRUCTOR_IMPL(T, name) \
void Vec_##name##_destructor(Vec<T> *vec) \
{ \
delete[] vec->data; \
}

static_assert(sizeof(Result_i64) == sizeof(torch::executor::Result<int64_t>), "Result_i64 size mismatch");
// static_assert(offsetof(Result_i64, value_) == offsetof(torch::executor::Result<int64_t>, value_), "Result_i64 value_ offset mismatch");
// static_assert(offsetof(Result_i64, error_) == offsetof(torch::executor::Result<int64_t>, error_), "Result_i64 error_ offset mismatch");
// static_assert(offsetof(Result_i64, hasValue_) == offsetof(torch::executor::Result<int64_t>, hasValue_), "Result_i64 hasValue_ offset mismatch");
Result_i64 crate_Result_i64(const torch::executor::Result<int64_t> &result)
{
Result_i64 result2{
.error_ = torch::executor::Error::Ok,
.hasValue_ = false,
};
memcpy(&result2, &result, sizeof(Result_i64));
return result2;
}
VEC_DESTRUCTOR_IMPL(char, char)
VEC_DESTRUCTOR_IMPL(Vec<char>, Vec_char)
VEC_DESTRUCTOR_IMPL(torch::executor::EValue, EValue)
#endif

static_assert(sizeof(Result_MethodMeta) == sizeof(torch::executor::Result<torch::executor::MethodMeta>), "Result_MethodMeta size mismatch");
// static_assert(offsetof(Result_MethodMeta, value_) == offsetof(torch::executor::Result<torch::executor::MethodMeta>, value_), "Result_MethodMeta value_ offset mismatch");
// static_assert(offsetof(Result_MethodMeta, error_) == offsetof(torch::executor::Result<torch::executor::MethodMeta>, error_), "Result_MethodMeta error_ offset mismatch");
// static_assert(offsetof(Result_MethodMeta, hasValue_) == offsetof(torch::executor::Result<torch::executor::MethodMeta>, hasValue_), "Result_MethodMeta hasValue_ offset mismatch");
Result_MethodMeta crate_Result_MethodMeta(const torch::executor::Result<torch::executor::MethodMeta> &result)
{
Result_MethodMeta result2{
.error_ = torch::executor::Error::Ok,
.hasValue_ = false,
};
memcpy(&result2, &result, sizeof(Result_MethodMeta));
return result2;
}
static_assert(sizeof(Result_i64) == sizeof(torch::executor::Result<int64_t>), "Result_i64 size mismatch");
// static_assert(offsetof(Result_i64, value_) == offsetof(torch::executor::Result<int64_t>, value_), "Result_i64 value_ offset mismatch");
// static_assert(offsetof(Result_i64, error_) == offsetof(torch::executor::Result<int64_t>, error_), "Result_i64 error_ offset mismatch");
// static_assert(offsetof(Result_i64, hasValue_) == offsetof(torch::executor::Result<int64_t>, hasValue_), "Result_i64 hasValue_ offset mismatch");
Result_i64 crate_Result_i64(const torch::executor::Result<int64_t> &result)
{
Result_i64 result2{
.error_ = torch::executor::Error::Ok,
.hasValue_ = false,
};
memcpy(&result2, &result, sizeof(Result_i64));
return result2;
}

static_assert(sizeof(Result_MethodMeta) == sizeof(torch::executor::Result<torch::executor::MethodMeta>), "Result_MethodMeta size mismatch");
// static_assert(offsetof(Result_MethodMeta, value_) == offsetof(torch::executor::Result<torch::executor::MethodMeta>, value_), "Result_MethodMeta value_ offset mismatch");
// static_assert(offsetof(Result_MethodMeta, error_) == offsetof(torch::executor::Result<torch::executor::MethodMeta>, error_), "Result_MethodMeta error_ offset mismatch");
// static_assert(offsetof(Result_MethodMeta, hasValue_) == offsetof(torch::executor::Result<torch::executor::MethodMeta>, hasValue_), "Result_MethodMeta hasValue_ offset mismatch");
Result_MethodMeta crate_Result_MethodMeta(const torch::executor::Result<torch::executor::MethodMeta> &result)
{
Result_MethodMeta result2{
.error_ = torch::executor::Error::Ok,
.hasValue_ = false,
};
memcpy(&result2, &result, sizeof(Result_MethodMeta));
return result2;
}

Result_MethodMeta Program_method_meta(const torch::executor::Program *program, const char *method_name)
Expand Down Expand Up @@ -151,6 +151,10 @@ namespace executorch_rs
tensor->~Tensor();
}

torch::executor::EValue EValue_shallow_clone(torch::executor::EValue *evalue)
{
return *evalue;
}
void EValue_destructor(torch::executor::EValue *evalue)
{
evalue->~EValue();
Expand All @@ -170,48 +174,48 @@ namespace executorch_rs
}

#if defined(EXECUTORCH_RS_MODULE)
torch::executor::Module Module_new(torch::executor::ArrayRef<char> file_path, torch::executor::Module::MlockConfig mlock_config, torch::executor::EventTracer *event_tracer)
torch::executor::Module *Module_new(torch::executor::ArrayRef<char> file_path, torch::executor::Module::MlockConfig mlock_config, torch::executor::EventTracer *event_tracer)
{
std::string file_path_str(file_path.begin(), file_path.end());
std::unique_ptr<torch::executor::EventTracer> event_tracer2(event_tracer);
return torch::executor::Module(file_path_str, mlock_config, std::move(event_tracer2));
return new torch::executor::Module(file_path_str, mlock_config, std::move(event_tracer2));
}
void Module_destructor(torch::executor::Module *module)
void Module_destructor(torch::executor::Module *module_)
{
module->~Module();
module_->~Module();
}
torch::executor::Result<RawVec<RawVec<char>>> Module_method_names(torch::executor::Module *module)
torch::executor::Result<Vec<Vec<char>>> Module_method_names(torch::executor::Module *module_)
{
std::unordered_set<std::string> method_names = ET_UNWRAP(module->method_names());
std::vector<RawVec<char>> method_names_vec;
std::unordered_set<std::string> method_names = ET_UNWRAP(module_->method_names());
std::vector<Vec<char>> method_names_vec;
for (const std::string &method_name : method_names)
{
std::vector<char> method_name_vec(method_name.begin(), method_name.end());
method_names_vec.push_back(crate_RawVec(std::move(method_name_vec)));
method_names_vec.push_back(crate_Vec(std::move(method_name_vec)));
}
return crate_RawVec(std::move(method_names_vec));
return crate_Vec(std::move(method_names_vec));
}
torch::executor::Error Module_load_method(torch::executor::Module *module, torch::executor::ArrayRef<char> method_name)
torch::executor::Error Module_load_method(torch::executor::Module *module_, torch::executor::ArrayRef<char> method_name)
{
std::string method_name_str(method_name.begin(), method_name.end());
return module->load_method(method_name_str);
return module_->load_method(method_name_str);
}
bool Module_is_method_loaded(const torch::executor::Module *module, torch::executor::ArrayRef<char> method_name)
bool Module_is_method_loaded(const torch::executor::Module *module_, torch::executor::ArrayRef<char> method_name)
{
std::string method_name_str(method_name.begin(), method_name.end());
return module->is_method_loaded(method_name_str);
return module_->is_method_loaded(method_name_str);
}
Result_MethodMeta Module_method_meta(torch::executor::Module *module, torch::executor::ArrayRef<char> method_name)
Result_MethodMeta Module_method_meta(torch::executor::Module *module_, torch::executor::ArrayRef<char> method_name)
{
std::string method_name_str(method_name.begin(), method_name.end());
return crate_Result_MethodMeta(module->method_meta(method_name_str));
return crate_Result_MethodMeta(module_->method_meta(method_name_str));
}
torch::executor::Result<RawVec<torch::executor::EValue>> Module_execute(torch::executor::Module *module, torch::executor::ArrayRef<char> method_name, torch::executor::ArrayRef<torch::executor::EValue> inputs)
torch::executor::Result<Vec<torch::executor::EValue>> Module_execute(torch::executor::Module *module_, torch::executor::ArrayRef<char> method_name, torch::executor::ArrayRef<torch::executor::EValue> inputs)
{
std::string method_name_str(method_name.begin(), method_name.end());
std::vector<torch::executor::EValue> inputs_vec(inputs.begin(), inputs.end());
std::vector<torch::executor::EValue> outputs = ET_UNWRAP(module->execute(method_name_str, inputs_vec));
return crate_RawVec(std::move(outputs));
std::vector<torch::executor::EValue> outputs = ET_UNWRAP(module_->execute(method_name_str, inputs_vec));
return crate_Vec(std::move(outputs));
}
#endif
}
21 changes: 16 additions & 5 deletions executorch-sys/cpp/executorch_rs_ext/api_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,24 @@

namespace executorch_rs
{
#if defined(EXECUTORCH_RS_STD)
template <typename T>
struct RawVec
struct Vec
{
T *data;
size_t len;
size_t cap;
};

#define VEC_DESTRUCTOR_DEC(T, name) \
void Vec_##name##_destructor(Vec<T> *vec);

VEC_DESTRUCTOR_DEC(char, char)
VEC_DESTRUCTOR_DEC(Vec<char>, Vec_char)
VEC_DESTRUCTOR_DEC(torch::executor::EValue, EValue)

#endif

struct Result_i64
{

Expand Down Expand Up @@ -84,20 +94,21 @@ namespace executorch_rs
void *Tensor_mutable_data_ptr(const exec_aten::Tensor *tensor);
void Tensor_destructor(exec_aten::Tensor *tensor);

torch::executor::EValue EValue_shallow_clone(torch::executor::EValue *evalue);
void EValue_destructor(torch::executor::EValue *evalue);
const exec_aten::ArrayRef<int64_t> BoxedEvalueList_i64_get(const torch::executor::BoxedEvalueList<int64_t> *list);
const exec_aten::ArrayRef<exec_aten::Tensor> BoxedEvalueList_Tensor_get(const torch::executor::BoxedEvalueList<exec_aten::Tensor> *list);

torch::executor::util::BufferDataLoader BufferDataLoader_new(const void *data, size_t size);

#if defined(EXECUTORCH_RS_MODULE)
torch::executor::Module Module_new(torch::executor::ArrayRef<char> file_path, torch::executor::Module::MlockConfig mlock_config, torch::executor::EventTracer *event_tracer);
torch::executor::Module *Module_new(torch::executor::ArrayRef<char> file_path, torch::executor::Module::MlockConfig mlock_config, torch::executor::EventTracer *event_tracer);
void Module_destructor(torch::executor::Module *module);
torch::executor::Result<RawVec<RawVec<char>>> Module_method_names(torch::executor::Module *module);
torch::executor::Result<Vec<Vec<char>>> Module_method_names(torch::executor::Module *module);
torch::executor::Error Module_load_method(torch::executor::Module *module, torch::executor::ArrayRef<char> method_name);
bool Module_is_method_loaded(const torch::executor::Module *module, torch::executor::ArrayRef<char> method_name);
Result_MethodMeta Module_method_meta(const torch::executor::Module *module, torch::executor::ArrayRef<char> method_name);
torch::executor::Result<RawVec<torch::executor::EValue>> Module_execute(torch::executor::Module *module, torch::executor::ArrayRef<char> method_name, torch::executor::ArrayRef<torch::executor::EValue> inputs);
Result_MethodMeta Module_method_meta(torch::executor::Module *module, torch::executor::ArrayRef<char> method_name);
torch::executor::Result<Vec<torch::executor::EValue>> Module_execute(torch::executor::Module *module, torch::executor::ArrayRef<char> method_name, torch::executor::ArrayRef<torch::executor::EValue> inputs);
#endif

}
11 changes: 11 additions & 0 deletions executorch-sys/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,3 +109,14 @@ impl Drop for et_c::Tensor {
unsafe { et_rs_c::Tensor_destructor(self) }
}
}

#[cfg(feature = "std")]
impl<T> et_rs_c::Vec<T> {
pub fn as_slice(&self) -> &[T] {
unsafe { std::slice::from_raw_parts(self.data, self.len) }
}

pub fn as_mut_slice(&mut self) -> &mut [T] {
unsafe { std::slice::from_raw_parts_mut(self.data, self.len) }
}
}
22 changes: 15 additions & 7 deletions src/data_loader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,12 @@ use crate::{et_c, et_rs_c};
/// This struct is like a base class for data loaders. All other data loaders implement `AsRef<DataLoader>` and other
/// structs, such as `Program`, take a reference to `DataLoader` instead of the concrete data loader type.
pub struct DataLoader(pub(crate) UnsafeCell<et_c::DataLoader>);
impl DataLoader {
pub(crate) fn from_inner_ref(loader: &et_c::DataLoader) -> &Self {
// Safety: Self has a single field of (UnsafeCell of) et_c::DataLoader
unsafe { std::mem::transmute(loader) }
}
}

/// A DataLoader that wraps a pre-allocated buffer. The FreeableBuffers
/// that it returns do not actually free any data.
Expand All @@ -27,16 +33,18 @@ pub struct BufferDataLoader<'a>(
impl<'a> BufferDataLoader<'a> {
/// Creates a new BufferDataLoader that wraps the given data.
pub fn new(data: &'a [u8]) -> Self {
let loader =
unsafe { et_rs_c::BufferDataLoader_new(data.as_ptr() as *const _, data.len()) };
// Safety: the returned Self has a lifetime guaranteeing it will not outlive the buffer
let loader = unsafe { et_rs_c::BufferDataLoader_new(data.as_ptr().cast(), data.len()) };
Self(UnsafeCell::new(loader), PhantomData)
}
}
impl AsRef<DataLoader> for BufferDataLoader<'_> {
fn as_ref(&self) -> &DataLoader {
// SAFETY: BufferDataLoader has a single field of (UnsafeCell of) et_c::util::BufferDataLoader, which is a
// subclass of et_c::DataLoader, and DataLoaders has a single field of (UnsafeCell of) et_c::DataLoader.
unsafe { std::mem::transmute::<&BufferDataLoader, &DataLoader>(self) }
// Safely: et_c::util::BufferDataLoader is a subclass of et_c::DataLoader
let loader = unsafe {
std::mem::transmute::<&et_c::util::BufferDataLoader, &et_c::DataLoader>(&*self.0.get())
};
DataLoader::from_inner_ref(loader)
}
}

Expand Down Expand Up @@ -89,7 +97,7 @@ mod file_data_loader {
) -> Result<Self> {
let file_name = file_name.as_ref().to_str().expect("Invalid file name");
let file_name = std::ffi::CString::new(file_name).unwrap();
Self::from_cstr(&file_name, alignment)
Self::from_path_cstr(&file_name, alignment)
}

/// Creates a new FileDataLoader given a `CStr`.
Expand All @@ -115,7 +123,7 @@ mod file_data_loader {
/// # Safety
///
/// The `file_name` should be a valid UTF-8 string and not contains a null byte other than the one at the end.
pub fn from_cstr(file_name: &CStr, alignment: Option<usize>) -> Result<Self> {
pub fn from_path_cstr(file_name: &CStr, alignment: Option<usize>) -> Result<Self> {
let alignment = alignment.unwrap_or(16);
let loader =
unsafe { et_c::util::FileDataLoader::from(file_name.as_ptr(), alignment) }.rs()?;
Expand Down
6 changes: 6 additions & 0 deletions src/evalue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -469,6 +469,12 @@ impl Debug for EValue<'_> {
st.finish()
}
}
impl<'a> Clone for EValue<'a> {
fn clone(&self) -> Self {
let value = unsafe { et_rs_c::EValue_shallow_clone(&self.0 as *const _ as *mut _) };
unsafe { EValue::new(value) }
}
}

/// Helper class used to correlate EValues in the executor table, with the
/// unwrapped list of the proper type. Because values in the runtime's values
Expand Down
Loading