Documentation Index
Fetch the complete documentation index at: https://docs.z23.cc/llms.txt
Use this file to discover all available pages before exploring further.
OP-TEE Advanced Storage Features - Analysis
Overview
This document analyzes advanced storage features in OP-TEE, including compression mechanisms, deduplication strategies, caching optimizations, storage snapshots, backup capabilities, and replication/redundancy features. While some features are implemented, others represent architectural foundations for future development.
Storage Compression Mechanisms
Block-Level Compression Considerations
OP-TEE’s storage architecture could support compression at the block level:
// Current block management in REE FS
#define BLOCK_SHIFT 12 // 4KB blocks
#define BLOCK_SIZE (1 << BLOCK_SHIFT)
// Potential compression integration point
static TEE_Result out_of_place_write(struct tee_fs_fd *fdp, size_t pos,
const void *buf_core, const void *buf_user,
size_t len)
{
uint8_t *block = get_tmp_block(); // 4KB buffer for compression
// Current: Direct block writes
// Potential: Compress block before encryption
// if (compression_enabled) {
// compressed_size = compress_block(block, compressed_block);
// if (compressed_size < BLOCK_SIZE) {
// // Use compressed version with size metadata
// }
// }
res = tee_fs_htree_write_block(&fdp->ht, start_block_num, block);
}
Directory entries and metadata could benefit from compression:
// Current directory entry structure
struct dirfile_entry {
TEE_UUID uuid; // 16 bytes
uint8_t oid[TEE_OBJECT_ID_MAX_LEN]; // 64 bytes (often sparse)
uint32_t oidlen; // 4 bytes
uint8_t hash[TEE_FS_HTREE_HASH_SIZE]; // 32 bytes
uint32_t file_number; // 4 bytes
// Total: 120 bytes, potential for compression
};
// Object IDs often have patterns that could be compressed
// UUIDs have structured format suitable for compression
Encryption-Compression Interaction
Compression must occur before encryption to be effective:
// Proper order: Compress -> Encrypt -> Store
// Current encryption in tee_fs_crypt_block:
TEE_Result tee_fs_crypt_block(const TEE_UUID *uuid, uint8_t *out,
const uint8_t *in, size_t size,
uint16_t blk_idx, const uint8_t *encrypted_fek,
TEE_OperationMode mode)
{
// Decompression would occur after decryption:
// Decrypt -> Decompress -> Return to user
// Compression would occur before encryption:
// User data -> Compress -> Encrypt -> Store
}
Storage Deduplication Strategies
Hash-Based Deduplication Framework
OP-TEE’s hash tree architecture provides foundation for deduplication:
// Current hash tree node structure
struct tee_fs_htree_node_image {
uint8_t hash[TEE_FS_HTREE_HASH_SIZE]; // SHA-256 hash
uint8_t iv[TEE_FS_HTREE_IV_SIZE];
uint8_t tag[TEE_FS_HTREE_TAG_SIZE];
uint16_t flags;
};
// Block-level deduplication potential:
// 1. Calculate SHA-256 of block content
// 2. Check if hash exists in dedup table
// 3. If exists, reference existing block
// 4. If not, store new block and add to table
Content-Addressed Storage
Hash-based addressing enables natural deduplication:
// Theoretical content-addressed block storage
struct content_block {
uint8_t content_hash[TEE_SHA256_HASH_SIZE]; // Block identifier
uint32_t ref_count; // Reference counter
uint32_t physical_address; // Storage location
uint16_t actual_size; // Compressed size
uint16_t flags; // Compression/encryption flags
};
// Block lookup by content hash
static TEE_Result find_content_block(const uint8_t *content_hash,
struct content_block **block)
{
// Search content hash table
// If found, increment reference count
// If not found, allocate new block
}
Per-TA Deduplication Scope
Security considerations limit deduplication scope:
// Deduplication must respect TA isolation
TEE_Result deduplicate_block_for_ta(const TEE_UUID *uuid,
const uint8_t *block_data,
size_t block_size,
uint32_t *block_ref)
{
uint8_t content_hash[TEE_SHA256_HASH_SIZE];
uint8_t ta_scoped_hash[TEE_SHA256_HASH_SIZE];
// Calculate content hash
tee_hash_createdigest(TEE_ALG_SHA256, block_data, block_size,
content_hash, sizeof(content_hash));
// Scope hash to TA to prevent cross-TA data inference
do_hmac(ta_scoped_hash, sizeof(ta_scoped_hash),
content_hash, sizeof(content_hash),
uuid, sizeof(*uuid));
// Use scoped hash for dedup lookup
return find_or_create_block(ta_scoped_hash, block_data, block_size, block_ref);
}
Caching Optimization Features
RPMB Cache Implementation
RPMB file system includes sophisticated caching:
// RPMB caching configuration
#define RPMB_BUF_MAX_ENTRIES (CFG_RPMB_FS_CACHE_ENTRIES + CFG_RPMB_FS_RD_ENTRIES)
struct rpmb_fat_entry_dir {
struct rpmb_fat_entry *rpmb_fat_entry_buf; // Cache buffer
uint32_t idx_curr; // Current index
uint32_t num_buffered; // Cached entries
uint32_t num_total_read; // Total read operations
bool last_reached; // End-of-data marker
};
// Cache-aware FAT entry reading
static TEE_Result read_fat_entry(struct rpmb_fat_entry_dir *fed,
struct rpmb_fat_entry *entry)
{
// Check if entry is in cache
if (fed->idx_curr < fed->num_buffered) {
*entry = fed->rpmb_fat_entry_buf[fed->idx_curr];
fed->idx_curr++;
return TEE_SUCCESS;
}
// Cache miss - read from storage
return read_fat_entries_from_rpmb(fed, entry);
}
Memory Pool Caching
Storage operations use memory pool caching:
// Block allocation with pooling
static void *get_tmp_block(void)
{
// Uses memory pool for efficient allocation
return mempool_alloc(mempool_default, BLOCK_SIZE);
}
static void put_tmp_block(void *tmp_block)
{
// Returns to pool for reuse
mempool_free(mempool_default, tmp_block);
}
// Potential enhancement: Block content caching
struct block_cache_entry {
uint32_t block_number;
uint8_t *block_data;
bool dirty;
uint32_t last_access;
};
Directory Handle Caching
Directory handles are cached with reference counting:
static TEE_Result get_dirh(struct tee_fs_dirfile_dirh **dirh)
{
if (!ree_fs_dirh) {
// Create new directory handle
TEE_Result res = open_dirh(&ree_fs_dirh);
if (res) {
*dirh = NULL;
return res;
}
}
ree_fs_dirh_refcount++; // Reference counting for caching
*dirh = ree_fs_dirh;
return TEE_SUCCESS;
}
// Cache eviction on zero references
static void put_dirh_primitive(bool close)
{
ree_fs_dirh_refcount--;
if (ree_fs_dirh && (!ree_fs_dirh_refcount || close))
close_dirh(&ree_fs_dirh); // Evict from cache
}
Storage Snapshots and Backup
Versioned Storage Foundation
Hash tree dual-version storage provides snapshot foundation:
// Each node maintains two versions
struct htree_node {
size_t id;
bool dirty;
bool block_updated;
struct tee_fs_htree_node_image node;
struct htree_node *parent;
struct htree_node *child[2];
};
// Version selection mechanism
#define HTREE_NODE_COMMITTED_BLOCK BIT32(0)
#define HTREE_NODE_COMMITTED_CHILD(n) BIT32(1 + (n))
// Potential snapshot extension:
struct storage_snapshot {
uint32_t snapshot_id;
uint32_t base_counter;
uint8_t root_hash[TEE_FS_HTREE_HASH_SIZE];
uint64_t timestamp;
uint32_t ref_count;
};
Copy-on-Write Implementation
Current out-of-place writes provide CoW foundation:
// Current out-of-place write mechanism
static TEE_Result out_of_place_write(struct tee_fs_fd *fdp, size_t pos,
const void *buf_core, const void *buf_user,
size_t len)
{
// Read original block
if (start_block_num * BLOCK_SIZE < ROUNDUP(meta->length, BLOCK_SIZE)) {
res = tee_fs_htree_read_block(&fdp->ht, start_block_num, block);
} else {
memset(block, 0, BLOCK_SIZE); // New block
}
// Modify block content
if (data_core_ptr) {
memcpy(block + offset, data_core_ptr, size_to_write);
}
// Write modified block (new location)
res = tee_fs_htree_write_block(&fdp->ht, start_block_num, block);
// This naturally implements CoW for snapshots
}
Incremental Backup Strategy
Hash trees enable efficient incremental backups:
// Backup process using hash comparison
TEE_Result create_incremental_backup(struct tee_fs_htree *current_tree,
struct tee_fs_htree *base_tree,
struct backup_manifest *manifest)
{
// Compare root hashes
if (!memcmp(current_tree->root.node.hash, base_tree->root.node.hash,
TEE_FS_HTREE_HASH_SIZE)) {
manifest->type = BACKUP_TYPE_UNCHANGED;
return TEE_SUCCESS;
}
// Traverse trees to find changed blocks
manifest->type = BACKUP_TYPE_INCREMENTAL;
return traverse_and_backup_changes(current_tree, base_tree, manifest);
}
Storage Replication and Redundancy
Multi-Backend Storage
OP-TEE architecture supports multiple storage backends:
// Storage backend abstraction
struct tee_fs_htree_storage {
size_t block_size;
TEE_Result (*rpc_read_init)(void *aux, struct tee_fs_rpc_operation *op,
enum tee_fs_htree_type type, size_t idx,
uint8_t vers, void **data);
TEE_Result (*rpc_write_init)(void *aux, struct tee_fs_rpc_operation *op,
enum tee_fs_htree_type type, size_t idx,
uint8_t vers, void **data);
TEE_Result (*rpc_read_final)(struct tee_fs_rpc_operation *op, size_t *bytes);
TEE_Result (*rpc_write_final)(struct tee_fs_rpc_operation *op);
};
// Potential replication wrapper
struct replicated_storage {
struct tee_fs_htree_storage primary;
struct tee_fs_htree_storage secondary;
enum replication_mode mode; // SYNC, ASYNC, FAILOVER
};
RAID-Like Redundancy
Hash tree structure supports RAID-like redundancy:
// Theoretical RAID-1 implementation
TEE_Result raid1_write_block(struct raid1_context *ctx, size_t block_num,
const void *block)
{
TEE_Result res1, res2;
// Write to both mirrors
res1 = tee_fs_htree_write_block(&ctx->mirror1, block_num, block);
res2 = tee_fs_htree_write_block(&ctx->mirror2, block_num, block);
// Require both writes to succeed
if (res1 != TEE_SUCCESS || res2 != TEE_SUCCESS) {
EMSG("RAID-1 write failure: mirror1=%x, mirror2=%x", res1, res2);
return TEE_ERROR_STORAGE_NOT_AVAILABLE;
}
return TEE_SUCCESS;
}
// RAID-1 read with failover
TEE_Result raid1_read_block(struct raid1_context *ctx, size_t block_num,
void *block)
{
TEE_Result res;
// Try primary mirror first
res = tee_fs_htree_read_block(&ctx->mirror1, block_num, block);
if (res == TEE_SUCCESS) {
return res;
}
DMSG("Primary mirror failed, trying secondary");
// Failover to secondary mirror
res = tee_fs_htree_read_block(&ctx->mirror2, block_num, block);
if (res == TEE_SUCCESS) {
// Mark primary for rebuild
ctx->primary_degraded = true;
return res;
}
EMSG("Both RAID-1 mirrors failed");
return TEE_ERROR_STORAGE_NOT_AVAILABLE;
}
Storage can replicate across different backends:
// Cross-backend replication
struct cross_platform_storage {
struct tee_fs_htree_storage ree_storage; // REE file system
struct tee_fs_htree_storage rpmb_storage; // RPMB secure storage
enum sync_policy policy; // PREFER_SECURE, PREFER_FAST, BOTH
};
TEE_Result cross_platform_sync(struct cross_platform_storage *cps)
{
uint8_t ree_hash[TEE_FS_HTREE_HASH_SIZE];
uint8_t rpmb_hash[TEE_FS_HTREE_HASH_SIZE];
// Get root hashes from both backends
get_root_hash(&cps->ree_storage, ree_hash);
get_root_hash(&cps->rpmb_storage, rpmb_hash);
// Synchronize if different
if (memcmp(ree_hash, rpmb_hash, sizeof(ree_hash))) {
return synchronize_storages(cps);
}
return TEE_SUCCESS;
}
Lazy Synchronization
Current implementation includes lazy sync for performance:
// Dirty tracking for batch operations
struct htree_node {
bool dirty; // Node needs sync
bool block_updated; // Block data changed
// ...
};
// Batch synchronization
TEE_Result tee_fs_htree_sync_to_storage(struct tee_fs_htree **ht_arg,
uint8_t *hash, uint32_t *counter)
{
if (!ht->dirty) return TEE_SUCCESS; // No changes to sync
// Sync all dirty nodes in single operation
res = htree_traverse_post_order(ht, htree_sync_node_to_storage, ctx);
return res;
}
Asynchronous I/O Framework
Storage operations could benefit from async I/O:
// Theoretical async I/O for storage
struct async_storage_op {
enum op_type type; // READ, WRITE, SYNC
size_t block_num;
void *buffer;
TEE_Result (*callback)(struct async_storage_op *op, TEE_Result result);
void *user_data;
};
// Async write operation
TEE_Result async_write_block(struct tee_fs_htree *ht, size_t block_num,
const void *block, async_callback_t callback)
{
struct async_storage_op *op = alloc_async_op();
op->type = ASYNC_WRITE;
op->block_num = block_num;
op->buffer = dup_block(block);
op->callback = callback;
return queue_async_operation(op);
}
Advanced Security Features
Zero-Knowledge Storage
Content-based addressing with encrypted blocks:
// Zero-knowledge content addressing
TEE_Result store_zero_knowledge_block(const void *plaintext, size_t size,
uint8_t *block_id)
{
uint8_t content_key[32];
uint8_t encrypted_block[BLOCK_SIZE];
// Generate content-derived key
tee_hash_createdigest(TEE_ALG_SHA256, plaintext, size,
content_key, sizeof(content_key));
// Encrypt block with content key
encrypt_block(content_key, plaintext, size, encrypted_block);
// Block ID is hash of encrypted data
tee_hash_createdigest(TEE_ALG_SHA256, encrypted_block, BLOCK_SIZE,
block_id, TEE_SHA256_HASH_SIZE);
// Store encrypted block indexed by block_id
return store_content_block(block_id, encrypted_block);
}
Homomorphic Storage Operations
Theoretical support for operations on encrypted data:
// Homomorphic operations on encrypted storage
TEE_Result homomorphic_search(const struct search_criteria *criteria,
uint8_t *result_hash)
{
// Perform search operations on encrypted directory entries
// without decrypting individual entries
return perform_encrypted_search(criteria, result_hash);
}
This advanced storage feature analysis demonstrates OP-TEE’s architectural flexibility and potential for implementing sophisticated storage optimizations while maintaining security guarantees. The modular design enables incremental addition of advanced features as requirements evolve.