View difference between Paste ID: CJxMZs7j and
SHOW: | | - or go back to the newest paste.
1-
1+
/*
2
The cache needs to be able to provide the following operating modes:
3
- read sector(s) to an already allocated buffer
4
  NOTE: we can provide an hint to cache whether the sectors should be cached or not if it's not already in the cache
5
- write sectors(s) from an already filled buffer
6
  NOTE: same remark
7
- read a sector and lock a buffer to it
8
- flag a locked sector as dirty
9
- clean a locked sector (write it back if needed)
10
- flag a locked sector as inconsistent (prevent writeback)
11
- remove an inconsistency flag (should be done ASAP)
12
- unlock a sector (maybe with a flag if it should be cleaned or not)
13
- clean everything (and optionally flush the storage layer)
14
- invalidate everything? (if we respect locks, this might cause deadlocks, if not, it might cause all hell of trouble. yet, we need this, to clean things out after e.g. changing microsd cards. just fail (and block card removal) if there are still open handles?)
15
*/
16
17
struct sectorcache_entry
18
{
19
    bool valid;
20
    bool free;
21
    int locks;
22
    bool exclusive;
23
    bool dirty;
24
    bool consistent;
25
    IF_MD2(int drive;)
26
    unsigned long sector;
27
    unsigned char* buffer;
28
};
29
30
static struct mutex sectorcache_mutex SHAREDBSS_ATTR;
31
static struct wakeup sectorcache_block SHAREDBSS_ATTR;
32
static struct sectorcache_entry[SECTORCACHE_SIZE];
33
static unsigned char sectorcache_buffer[SECTORCACHE_SIZE][SECTOR_SIZE];
34
35
void sectorcache_init(void)
36
{
37
    int i;
38
    memset(sectorcache_entry, 0, sizeof(sectorcache_entry));
39
    mutex_init(&sectorcache_mutex);
40
    wakeup_init(&sectorcache_block);
41
    for (i = 0; i < SECTORCACHE_SIZE; i++)
42
        sectorcache_entry[i].buffer = sectorcache_buffer[i];
43
}
44
45
void sectorcache_wait_block()
46
{
47
    mutex_unlock(&sectorcache_mutex);
48
    wakeup_wait(&sectorcache_block, TIMEOUT_BLOCK);
49
    mutex_lock(&sectorcache_mutex);
50
}
51
52
int sectorcache_readthrough(IF_MD2(int drive,) unsigned long start, int count, void* buf, bool keepincache);
53
int sectorcache_writethrough(IF_MD2(int drive,) unsigned long start, int count, const void* buf, bool keepincache);
54
int sectorcache_lock(IF_MD2(int drive,) unsigned long sector, struct sectorcache_entry** handle, bool exclusive);
55
int sectorcache_unlock(struct sectorcache_entry** handle, bool clean, bool keepincache);
56
57
int sectorcache_markdirty(struct sectorcache_entry* handle)
58
{
59
    mutex_lock(&sectorcache_mutex);
60
    handle->dirty = true;
61
    mutex_unlock(&sectorcache_mutex);
62
}
63
64
int sectorcache_markinconsistent(struct sectorcache_entry* handle)
65
{
66
    mutex_lock(&sectorcache_mutex);
67
    handle->consistent = false;
68
    mutex_unlock(&sectorcache_mutex);
69
}
70
71
int sectorcache_markconsistent(struct sectorcache_entry* handle)
72
{
73
    mutex_lock(&sectorcache_mutex);
74
    handle->consistent = true;
75
    mutex_unlock(&sectorcache_mutex);
76
    wakeup_signal(&sectorcache_block);
77
}
78
79
int sectorcache_clean(struct sectorcache_entry* handle);
80
81
int sectorcache_clean_all(bool flushstorage)
82
{
83
    int i;
84
    bool retry = true;
85
    mutex_lock(&sectorcache_mutex);
86
    while (retry)
87
    {
88
        retry = false;
89
        for (i = 0; i < SECTORCACHE_SIZE; i++)
90
            if (sectorcache_entry[i].dirty)
91
            {
92
                if (!sectorcache_entry[i].consistent)
93
                {
94
                    sectorcache_wait_block();
95
                    retry = true;
96
                }
97
                sectorcache_clean(&sectorcache_entry[i]);
98
            }
99
    }
100
#ifdef HAVE_STORAGE_FLUSH
101
    if (flushstorage) storage_flush();
102
#endif
103
    mutex_unlock(&sectorcache_mutex);
104
}
105
106
int sectorcache_invalidate(IF_MD2(int drive));