[VFS] Move victoria falls to rust. (Breaks voyageurs)
Move victoria falls to rust, which allows us to remove both the denali and victoria falls C++ code. This disk driver appears to work properly but has highlighted some instability in the voyageus xhci implementation which now breaks.
This commit is contained in:
parent
f918966727
commit
dc801786b1
37 changed files with 504 additions and 2065 deletions
1
rust/Cargo.lock
generated
1
rust/Cargo.lock
generated
|
|
@ -54,6 +54,7 @@ version = "0.1.0"
|
|||
dependencies = [
|
||||
"denali_client",
|
||||
"mammoth",
|
||||
"yellowstone-yunq",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
|
|||
|
|
@ -1,31 +1,50 @@
|
|||
use mammoth::{cap::Capability, zion::ZError};
|
||||
|
||||
use crate::{DenaliClient, DiskBlock, ReadRequest};
|
||||
use crate::{DenaliClient, DiskBlock, ReadManyRequest, ReadRequest};
|
||||
|
||||
pub struct DiskReader {
|
||||
client: DenaliClient,
|
||||
disk_id: u64,
|
||||
lba_offset: u64,
|
||||
block_multiplier: u64,
|
||||
}
|
||||
|
||||
impl DiskReader {
|
||||
pub fn new(client: DenaliClient, disk_id: u64, lba_offset: u64) -> Self {
|
||||
pub fn new(client: DenaliClient, disk_id: u64, lba_offset: u64, block_multiplier: u64) -> Self {
|
||||
Self {
|
||||
client,
|
||||
disk_id,
|
||||
lba_offset,
|
||||
block_multiplier,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Make yunq clients callable from a non-mutable reference so this can be called from
|
||||
// shared ownership.
|
||||
pub fn read(&mut self, lba: u64, cnt: u64) -> Result<Capability, ZError> {
|
||||
let read_resp = self.client.read(&ReadRequest {
|
||||
device_id: self.disk_id,
|
||||
block: DiskBlock {
|
||||
lba: self.lba_offset + lba,
|
||||
size: cnt,
|
||||
lba: self.lba_offset + (lba * self.block_multiplier),
|
||||
size: cnt * self.block_multiplier,
|
||||
},
|
||||
})?;
|
||||
|
||||
Ok(Capability::take(read_resp.memory))
|
||||
}
|
||||
|
||||
pub fn read_many(&mut self, blocks: &[DiskBlock]) -> Result<Capability, ZError> {
|
||||
let read_resp = self.client.read_many(&ReadManyRequest {
|
||||
device_id: self.disk_id,
|
||||
blocks: blocks
|
||||
.iter()
|
||||
.map(|b| DiskBlock {
|
||||
lba: self.lba_offset + (b.lba * self.block_multiplier),
|
||||
size: b.size * self.block_multiplier,
|
||||
})
|
||||
.collect(),
|
||||
})?;
|
||||
|
||||
Ok(Capability::take(read_resp.memory))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,3 +6,4 @@ edition = "2024"
|
|||
[dependencies]
|
||||
denali_client = { path = "../../client/denali_client" }
|
||||
mammoth = { path = "../../mammoth" }
|
||||
yellowstone-yunq = { path = "../../yellowstone/" }
|
||||
|
|
|
|||
|
|
@ -1,19 +1,284 @@
|
|||
use denali_client::DiskReader;
|
||||
use mammoth::mem::MemoryRegion;
|
||||
use core::cmp::min;
|
||||
|
||||
use crate::types::Superblock;
|
||||
use alloc::{collections::BTreeMap, string::String, vec::Vec};
|
||||
use denali_client::{DenaliClient, DiskBlock, DiskReader, ReadRequest};
|
||||
use mammoth::{cap::Capability, debug, mem::MemoryRegion, zion::ZError};
|
||||
use yellowstone_yunq::DenaliInfo;
|
||||
|
||||
use crate::types::{BlockGroupDescriptor, DirEntry, Inode, Superblock};
|
||||
|
||||
pub struct FileInfo {
|
||||
pub inode: u32,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
/// Ext2 Driver with the ability to read files and directories from the given disk.
|
||||
///
|
||||
/// Implementation based on the information available at
|
||||
/// https://www.nongnu.org/ext2-doc/ext2.html
|
||||
pub struct Ext2Driver {
|
||||
reader: DiskReader,
|
||||
superblock_region: MemoryRegion,
|
||||
bgdt_region: MemoryRegion,
|
||||
|
||||
/// Cache of the memory regions for the inode tables available indexed by
|
||||
/// the block_group number.
|
||||
inode_table_map: Vec<Option<MemoryRegion>>,
|
||||
|
||||
/// Cache of inode_num to memory capability.
|
||||
/// This is particularly important for directories so we
|
||||
/// don't iterate over the disk each time.
|
||||
inode_cache: BTreeMap<u32, Capability>,
|
||||
}
|
||||
|
||||
impl Ext2Driver {
|
||||
pub fn new(mut reader: DiskReader) -> Self {
|
||||
let super_block_mem = MemoryRegion::from_cap(reader.read(2, 2).unwrap()).unwrap();
|
||||
let super_block: &Superblock = super_block_mem.as_ref();
|
||||
let inodes = super_block.inodes_count;
|
||||
let magic = super_block.magic;
|
||||
mammoth::debug!("Superblock ({:#x}): inodes: {:#x}", magic, inodes);
|
||||
Self { reader }
|
||||
pub fn new(denali_info: DenaliInfo) -> Self {
|
||||
let mut client = DenaliClient::new(Capability::take(denali_info.denali_endpoint));
|
||||
|
||||
// Calculate the absolute offset and size of the superblock. It is located at
|
||||
// offset 1024 of the partition and is 1024 bytes long. (Mostly extra
|
||||
// reserved space).
|
||||
// Ref: https://www.nongnu.org/ext2-doc/ext2.html#def-superblock
|
||||
let abs_superblock_start = denali_info.lba_offset + 2;
|
||||
let abs_superblock_size = 2; // TODO: This assumes 512 bytes sectors.
|
||||
let superblock_region = MemoryRegion::from_cap(Capability::take(
|
||||
client
|
||||
.read(&ReadRequest {
|
||||
device_id: denali_info.device_id,
|
||||
block: DiskBlock {
|
||||
lba: abs_superblock_start,
|
||||
size: abs_superblock_size,
|
||||
},
|
||||
})
|
||||
.unwrap()
|
||||
.memory,
|
||||
))
|
||||
.unwrap();
|
||||
let superblock: &Superblock = superblock_region.as_ref();
|
||||
assert!(superblock.is_valid());
|
||||
|
||||
let mut reader = DiskReader::new(
|
||||
client,
|
||||
denali_info.device_id,
|
||||
denali_info.lba_offset,
|
||||
superblock.sectors_per_block(),
|
||||
);
|
||||
|
||||
let bgdt_region = MemoryRegion::from_cap(
|
||||
reader
|
||||
.read(superblock.bgdt_block_num(), superblock.bgdt_block_size())
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let mut inode_table_map = Vec::new();
|
||||
inode_table_map.resize_with(superblock.num_block_groups() as usize, || None);
|
||||
|
||||
Self {
|
||||
reader,
|
||||
superblock_region,
|
||||
bgdt_region,
|
||||
inode_table_map,
|
||||
inode_cache: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn superblock(&self) -> &Superblock {
|
||||
self.superblock_region.as_ref()
|
||||
}
|
||||
|
||||
fn bgdt(&self) -> &[BlockGroupDescriptor] {
|
||||
self.bgdt_region.slice()
|
||||
}
|
||||
|
||||
/// Updates the cached inode tables to contain the inode table for
|
||||
/// a specific group.
|
||||
fn populate_inode_table_if_none(&mut self, block_group_num: usize) {
|
||||
if let None = self.inode_table_map[block_group_num] {
|
||||
debug!(
|
||||
"Cache MISS on inode table for block_group {}",
|
||||
block_group_num
|
||||
);
|
||||
let inode_table = self.bgdt()[block_group_num].inode_table;
|
||||
self.inode_table_map[block_group_num] = Some(
|
||||
MemoryRegion::from_cap(
|
||||
self.reader
|
||||
.read(
|
||||
inode_table as u64,
|
||||
self.superblock().inode_table_block_size(),
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
"Cache HIT on inode table for block_group {}",
|
||||
block_group_num
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_inode(&mut self, inode_num: u32) -> Inode {
|
||||
// See the following for a description of finding an inode.
|
||||
// https://www.nongnu.org/ext2-doc/ext2.html#idm140660447281728
|
||||
let block_group_num = (inode_num - 1) / self.superblock().inodes_per_group;
|
||||
self.populate_inode_table_if_none(block_group_num as usize);
|
||||
let region = self.inode_table_map[block_group_num as usize]
|
||||
.as_ref()
|
||||
.unwrap();
|
||||
|
||||
let local_index = (inode_num - 1) % self.superblock().inodes_per_group;
|
||||
let offset = self.superblock().inode_size() * local_index as u64;
|
||||
unsafe { region.raw_ptr_at_offset::<Inode>(offset).read().clone() }
|
||||
}
|
||||
|
||||
fn get_blocks_from_single_indirect(&mut self, block_num: u64, num_blocks: usize) -> Vec<u32> {
|
||||
assert!(num_blocks <= 256);
|
||||
let single_indr_block_mem =
|
||||
MemoryRegion::from_cap(self.reader.read(block_num, 1).unwrap()).unwrap();
|
||||
|
||||
single_indr_block_mem.slice()[..num_blocks].to_vec()
|
||||
}
|
||||
|
||||
fn get_blocks_from_double_indirect(&mut self, block_num: u64, num_blocks: usize) -> Vec<u32> {
|
||||
assert!(num_blocks > 0 && num_blocks <= (256 * 256));
|
||||
let num_dbl_indr = ((num_blocks - 1) / 256) + 1;
|
||||
|
||||
let dbl_indr_block_mem =
|
||||
MemoryRegion::from_cap(self.reader.read(block_num, 1).unwrap()).unwrap();
|
||||
|
||||
let dbl_indr_blocks: &[u32] = dbl_indr_block_mem.slice();
|
||||
|
||||
let mut blocks_to_read = Vec::new();
|
||||
|
||||
for i in 0..num_dbl_indr {
|
||||
let num_blocks_in_single = min(num_blocks - (256 * i), 256);
|
||||
blocks_to_read.append(
|
||||
&mut self.get_blocks_from_single_indirect(
|
||||
dbl_indr_blocks[i] as u64,
|
||||
num_blocks_in_single,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
blocks_to_read
|
||||
}
|
||||
|
||||
fn run_len_compress_blocks(&self, blocks: Vec<u32>) -> Vec<DiskBlock> {
|
||||
let mut curr_block = DiskBlock {
|
||||
lba: blocks[0] as u64,
|
||||
size: 1,
|
||||
};
|
||||
|
||||
let mut iter = blocks.into_iter();
|
||||
iter.next();
|
||||
|
||||
let mut blocks = Vec::new();
|
||||
|
||||
while let Some(block) = iter.next() {
|
||||
if block as u64 == (curr_block.lba + curr_block.size) {
|
||||
curr_block.size += 1;
|
||||
} else {
|
||||
blocks.push(curr_block.clone());
|
||||
|
||||
curr_block.lba = block as u64;
|
||||
curr_block.size = 1;
|
||||
}
|
||||
}
|
||||
|
||||
blocks.push(curr_block);
|
||||
|
||||
blocks
|
||||
}
|
||||
|
||||
fn read_inode(&mut self, _inode_num: u32, inode: Inode) -> Result<Capability, ZError> {
|
||||
// TODO: Cache this method using _inode_num
|
||||
// TODO: This assumes 512 byte sectors.
|
||||
let real_block_cnt = (inode.blocks as u64 - 1) / (self.superblock().block_size() / 512) + 1;
|
||||
if inode.block[14] != 0 {
|
||||
debug!("Can't handle triply indirect inodes yet.");
|
||||
return Err(ZError::UNIMPLEMENTED);
|
||||
}
|
||||
|
||||
let mut blocks_to_read = Vec::new();
|
||||
|
||||
for i in 0..min(12, real_block_cnt) {
|
||||
blocks_to_read.push(inode.block[i as usize])
|
||||
}
|
||||
|
||||
// Singly indirect block.
|
||||
if inode.block[12] != 0 {
|
||||
let num_blocks = min(256, real_block_cnt - 12) as usize;
|
||||
blocks_to_read.append(
|
||||
&mut self.get_blocks_from_single_indirect(inode.block[12] as u64, num_blocks),
|
||||
);
|
||||
}
|
||||
|
||||
// Doubly indirect block.
|
||||
if inode.block[13] != 0 {
|
||||
let num_blocks = min(256 * 256, real_block_cnt - 268) as usize;
|
||||
blocks_to_read.append(
|
||||
&mut self.get_blocks_from_double_indirect(inode.block[13] as u64, num_blocks),
|
||||
);
|
||||
};
|
||||
|
||||
self.reader
|
||||
.read_many(&self.run_len_compress_blocks(blocks_to_read))
|
||||
}
|
||||
|
||||
fn read_inode_into_mem(
|
||||
&mut self,
|
||||
inode_num: u32,
|
||||
inode: Inode,
|
||||
) -> Result<MemoryRegion, ZError> {
|
||||
if !self.inode_cache.contains_key(&inode_num) {
|
||||
debug!("Cache MISS for inode_num: {}", inode_num);
|
||||
let inode_cap = self.read_inode(inode_num, inode)?;
|
||||
self.inode_cache.insert(inode_num, inode_cap);
|
||||
} else {
|
||||
debug!("Cache HIT for inode_num: {}", inode_num);
|
||||
}
|
||||
|
||||
MemoryRegion::from_cap(self.inode_cache[&inode_num].duplicate(Capability::PERMS_ALL)?)
|
||||
}
|
||||
|
||||
pub fn read_file(&mut self, inode_num: u32) -> Result<Capability, ZError> {
|
||||
let inode = self.get_inode(inode_num);
|
||||
if (inode.mode & 0x8000) == 0 {
|
||||
debug!("Reading non file.");
|
||||
return Err(ZError::INVALID_ARGUMENT);
|
||||
}
|
||||
self.read_inode(inode_num, inode)
|
||||
}
|
||||
|
||||
pub fn read_directory(&mut self, inode_num: u32) -> Result<Vec<FileInfo>, ZError> {
|
||||
let inode = self.get_inode(inode_num);
|
||||
if (inode.mode & 0x4000) == 0 {
|
||||
let mode = inode.mode;
|
||||
debug!("Reading non directory. Inode {:?}, Mode {}", inode, mode);
|
||||
return Err(ZError::INVALID_ARGUMENT);
|
||||
}
|
||||
|
||||
let dir = self.read_inode_into_mem(inode_num, inode)?;
|
||||
|
||||
let mut file_names = Vec::new();
|
||||
|
||||
let mut offset = 0;
|
||||
while offset < dir.size() {
|
||||
let dir_ptr: DirEntry = unsafe { dir.raw_ptr_at_offset::<DirEntry>(offset).read() };
|
||||
|
||||
let name = dir_ptr.name;
|
||||
let file_name: String =
|
||||
String::from_utf8(name[..dir_ptr.name_len as usize].to_vec()).unwrap();
|
||||
file_names.push(FileInfo {
|
||||
inode: dir_ptr.inode,
|
||||
name: file_name,
|
||||
});
|
||||
|
||||
offset += dir_ptr.record_length as u64;
|
||||
}
|
||||
|
||||
Ok(file_names)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
/// Superblock structure.
|
||||
/// https://www.nongnu.org/ext2-doc/ext2.html#superblock
|
||||
#[repr(C, packed)]
|
||||
pub struct Superblock {
|
||||
pub inodes_count: u32,
|
||||
|
|
@ -28,3 +30,95 @@ pub struct Superblock {
|
|||
pub first_ino: u32,
|
||||
pub inode_size: u16,
|
||||
}
|
||||
|
||||
impl Superblock {
|
||||
pub fn is_valid(&self) -> bool {
|
||||
self.magic == 0xEF53
|
||||
}
|
||||
|
||||
pub fn sectors_per_block(&self) -> u64 {
|
||||
1 << (self.log_block_size + 1)
|
||||
}
|
||||
|
||||
pub fn block_size(&self) -> u64 {
|
||||
1024 << self.log_block_size
|
||||
}
|
||||
|
||||
pub fn bgdt_block_num(&self) -> u64 {
|
||||
if self.block_size() == 1024 { 2 } else { 1 }
|
||||
}
|
||||
|
||||
pub fn bgdt_block_size(&self) -> u64 {
|
||||
(self.num_block_groups() * (size_of::<BlockGroupDescriptor>() as u64) - 1)
|
||||
/ self.block_size()
|
||||
+ 1
|
||||
}
|
||||
|
||||
pub fn num_block_groups(&self) -> u64 {
|
||||
(((self.blocks_count - 1) / self.blocks_per_group) + 1) as u64
|
||||
}
|
||||
|
||||
pub fn inode_size(&self) -> u64 {
|
||||
if self.rev_level >= 1 {
|
||||
self.inode_size as u64
|
||||
} else {
|
||||
const DEFAULT_INODE_SIZE: u64 = 0x80;
|
||||
DEFAULT_INODE_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
pub fn inode_table_block_size(&self) -> u64 {
|
||||
(self.inode_size() * self.inodes_per_group as u64) / self.block_size()
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C, packed)]
|
||||
#[derive(Debug)]
|
||||
pub struct BlockGroupDescriptor {
|
||||
pub block_bitmap: u32,
|
||||
pub inode_bitmap: u32,
|
||||
pub inode_table: u32,
|
||||
pub free_blocks_count: u16,
|
||||
pub free_inodes_count: u16,
|
||||
pub used_dirs_count: u16,
|
||||
reserved: [u8; 14],
|
||||
}
|
||||
|
||||
const _: () = assert!(size_of::<BlockGroupDescriptor>() == 32);
|
||||
|
||||
#[repr(C, packed)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Inode {
|
||||
pub mode: u16,
|
||||
pub uid: u16,
|
||||
pub size: u32,
|
||||
pub atime: u32,
|
||||
pub ctime: u32,
|
||||
pub mtime: u32,
|
||||
pub dtime: u32,
|
||||
pub gid: u16,
|
||||
pub links_count: u16,
|
||||
pub blocks: u32,
|
||||
pub flags: u32,
|
||||
pub osd1: u32,
|
||||
pub block: [u32; 15],
|
||||
pub generation: u32,
|
||||
pub file_acl: u32,
|
||||
pub dir_acl: u32,
|
||||
pub faddr: u32,
|
||||
pub osd2: [u32; 3],
|
||||
}
|
||||
|
||||
const _: () = assert!(size_of::<Inode>() == 128);
|
||||
|
||||
pub const EXT2_FT_FILE: u8 = 0x1;
|
||||
pub const EXT2_FT_DIR: u8 = 0x2;
|
||||
|
||||
#[repr(C, packed)]
|
||||
pub struct DirEntry {
|
||||
pub inode: u32,
|
||||
pub record_length: u16,
|
||||
pub name_len: u8,
|
||||
pub file_type: u8,
|
||||
pub name: [u8; 256],
|
||||
}
|
||||
|
|
|
|||
|
|
@ -94,10 +94,21 @@ impl MemoryRegion {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn raw_ptr_at_offset<T>(&self, offset: u64) -> *const T {
|
||||
// TODO: Come up with a better safety check here.
|
||||
// We can't use the size of T because it might not be sized.
|
||||
assert!(offset + size_of::<T>() as u64 <= self.size);
|
||||
(self.virt_addr + offset) as *const T
|
||||
}
|
||||
|
||||
pub fn cap(&self) -> &Capability {
|
||||
&self.mem_cap
|
||||
}
|
||||
|
||||
pub fn size(&self) -> u64 {
|
||||
self.size
|
||||
}
|
||||
|
||||
pub fn duplicate(&self, offset: u64, length: u64) -> Result<Capability, ZError> {
|
||||
syscall::memory_obj_duplicate(&self.mem_cap, offset, length)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,12 @@
|
|||
#![no_std]
|
||||
#![no_main]
|
||||
|
||||
use denali_client::{DenaliClient, DiskReader};
|
||||
use ext2::Ext2Driver;
|
||||
use mammoth::{cap::Capability, define_entry, zion::z_err_t};
|
||||
use mammoth::{define_entry, zion::z_err_t};
|
||||
use victoriafalls::{server::VictoriaFallsServerImpl, VFSServer};
|
||||
use yellowstone_yunq::RegisterEndpointRequest;
|
||||
use yunq::server::spawn_server_thread;
|
||||
use yunq::server::YunqServer;
|
||||
|
||||
define_entry!();
|
||||
|
||||
|
|
@ -12,13 +15,21 @@ extern "C" fn main() -> z_err_t {
|
|||
let yellowstone = yellowstone_yunq::from_init_endpoint();
|
||||
|
||||
let denali_info = yellowstone.get_denali().unwrap();
|
||||
let client = DenaliClient::new(Capability::take(denali_info.denali_endpoint));
|
||||
|
||||
let driver = Ext2Driver::new(DiskReader::new(
|
||||
client,
|
||||
denali_info.device_id,
|
||||
denali_info.lba_offset,
|
||||
));
|
||||
let driver = Ext2Driver::new(denali_info);
|
||||
|
||||
let vfs_server = VFSServer::new(VictoriaFallsServerImpl::new(driver)).unwrap();
|
||||
|
||||
let yellowstone = yellowstone_yunq::from_init_endpoint();
|
||||
yellowstone
|
||||
.register_endpoint(&RegisterEndpointRequest {
|
||||
endpoint_name: "victoriafalls".into(),
|
||||
endpoint_capability: vfs_server.create_client_cap().unwrap().release(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let server_thread = spawn_server_thread(vfs_server);
|
||||
server_thread.join().unwrap();
|
||||
|
||||
0
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ include!(concat!(env!("OUT_DIR"), "/yunq.rs"));
|
|||
|
||||
pub mod dir;
|
||||
pub mod file;
|
||||
pub mod server;
|
||||
|
||||
static mut VFS_CLIENT: Option<VFSClient> = None;
|
||||
|
||||
|
|
|
|||
75
rust/sys/victoriafalls/src/server.rs
Normal file
75
rust/sys/victoriafalls/src/server.rs
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
use alloc::{string::String, vec::Vec};
|
||||
use ext2::Ext2Driver;
|
||||
use mammoth::{debug, zion::ZError};
|
||||
|
||||
use crate::{Directory, GetDirectoryRequest, OpenFileRequest, OpenFileResponse, VFSServerHandler};
|
||||
|
||||
pub struct VictoriaFallsServerImpl {
|
||||
ext2_driver: Ext2Driver,
|
||||
}
|
||||
|
||||
impl VictoriaFallsServerImpl {
|
||||
pub fn new(ext2_driver: Ext2Driver) -> Self {
|
||||
VictoriaFallsServerImpl { ext2_driver }
|
||||
}
|
||||
|
||||
fn find_path_in_dir(&mut self, inode_num: u32, file_name: &str) -> Result<u32, ZError> {
|
||||
let files = self.ext2_driver.read_directory(inode_num)?;
|
||||
|
||||
files
|
||||
.iter()
|
||||
.find(|fi| fi.name == file_name)
|
||||
.map(|fi| fi.inode)
|
||||
.ok_or(ZError::NOT_FOUND)
|
||||
}
|
||||
}
|
||||
|
||||
impl VFSServerHandler for VictoriaFallsServerImpl {
|
||||
fn open_file(&mut self, req: OpenFileRequest) -> Result<OpenFileResponse, ZError> {
|
||||
debug!("Reading {}", req.path);
|
||||
let mut tokens = req.path.split('/');
|
||||
if tokens.next() != Some("") {
|
||||
debug!("Path must be absolute");
|
||||
return Err(ZError::INVALID_ARGUMENT);
|
||||
}
|
||||
|
||||
let mut inode_num = 2; // Start with root.
|
||||
|
||||
while let Some(path_token) = tokens.next() {
|
||||
inode_num = self.find_path_in_dir(inode_num, path_token)?;
|
||||
}
|
||||
|
||||
let inode = self.ext2_driver.get_inode(inode_num);
|
||||
Ok(OpenFileResponse {
|
||||
path: req.path,
|
||||
memory: self.ext2_driver.read_file(inode_num)?.release(),
|
||||
size: inode.size as u64,
|
||||
})
|
||||
}
|
||||
|
||||
fn get_directory(&mut self, req: GetDirectoryRequest) -> Result<Directory, ZError> {
|
||||
debug!("Reading dir {}", req.path);
|
||||
let mut tokens = req.path.split('/');
|
||||
if tokens.next() != Some("") {
|
||||
debug!("Path must be absolute");
|
||||
return Err(ZError::INVALID_ARGUMENT);
|
||||
}
|
||||
|
||||
let mut inode_num = 2; // Start with root.
|
||||
|
||||
while let Some(path_token) = tokens.next() {
|
||||
inode_num = self.find_path_in_dir(inode_num, path_token)?;
|
||||
}
|
||||
|
||||
let files: Vec<String> = self
|
||||
.ext2_driver
|
||||
.read_directory(inode_num)?
|
||||
.into_iter()
|
||||
.map(|fi| fi.name)
|
||||
.collect();
|
||||
|
||||
Ok(Directory {
|
||||
filenames: files.join(","),
|
||||
})
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue