View difference between Paste ID: LshWrjr6 and
SHOW: | | - or go back to the newest paste.
1-
1+
/*
2
The cache needs to be able to provide the following operating modes:
3
- read sector(s) to an already allocated buffer
4
  NOTE: we can provide an hint to cache whether the sectors should be cached or not if it's not already in the cache
5
- write sectors(s) from an already filled buffer
6
  NOTE: same remark
7
- read a sector and lock a buffer to it
8
- flag a locked sector as dirty
9
- clean a locked sector (write it back if needed)
10
- flag a locked sector as inconsistent (prevent writeback)
11
- remove an inconsistency flag (should be done ASAP)
12
- unlock a sector (maybe with a flag if it should be cleaned or not)
13
- clean everything (and optionally flush the storage layer)
14
- invalidate everything? (if we respect locks, this might cause deadlocks, if not, it might cause all hell of trouble. yet, we need this, to clean things out after e.g. changing microsd cards. just fail (and block card removal) if there are still open handles?)
15
*/
16
17
struct sectorcache_entry
18
{
19
    bool valid;
20
    bool free;
21
    int locks;
22
    bool exclusive;
23
    bool dirty;
24
    bool consistent;
25
    IF_MD2(int drive;)
26
    unsigned long sector;
27
    unsigned char* buffer;
28
};
29
30
static struct mutex sectorcache_mutex SHAREDBSS_ATTR;
31
static struct wakeup sectorcache_block SHAREDBSS_ATTR;
32
static int sectorcache_replace_idx;
33
static struct sectorcache_entry[SECTORCACHE_SIZE];
34
static unsigned char sectorcache_buffer[SECTORCACHE_SIZE][SECTOR_SIZE];
35
36
void sectorcache_init(void)
37
{
38
    int i;
39
    sectorcache_replace_idx = 0;
40
    memset(sectorcache_entry, 0, sizeof(sectorcache_entry));
41
    mutex_init(&sectorcache_mutex);
42
    wakeup_init(&sectorcache_block);
43
    for (i = 0; i < SECTORCACHE_SIZE; i++)
44
        sectorcache_entry[i].buffer = sectorcache_buffer[i];
45
}
46
47
static void sectorcache_wait_block()
48
{
49
    mutex_unlock(&sectorcache_mutex);
50
    wakeup_wait(&sectorcache_block, TIMEOUT_BLOCK);
51
    mutex_lock(&sectorcache_mutex);
52
}
53
54
static struct sectorcache_entry* sectorcache_find_entry(IF_MD2(int drive,) unsigned long sector)
55
{
56
    int i;
57
    for (i = 0; i < SECTORCACHE_SIZE; i++)
58
        if (sectorcache_entry[i].valid && sectorcache_entry[i].sector == sector
59
            IF_MD2( && sectorcache_entry[i].drive == drive))
60
            return &sectorcache_entry[i];
61
    return null;
62
}
63
64
static struct sectorcache_entry* sectorcache_allocate_entry(void)
65
{
66
    int i, idx;
67
    sectorcache_replace_idx++;
68
    for (int i = 0; i < SECTORCACHE_SIZE; i++)
69
    {
70
        idx = (sectorcache_replace_idx + i) % SECTORCACHE_SIZE;
71
        if (!sectorcache_entry[idx].valid)
72
            return &sectorcache_entry[idx];
73
    }
74
    for (int i = 0; i < SECTORCACHE_SIZE; i++)
75
    {
76
        idx = (sectorcache_replace_idx + i) % SECTORCACHE_SIZE;
77
        if (sectorcache_entry[idx].free)
78
            return &sectorcache_entry[idx];
79
    }
80
    for (int i = 0; i < SECTORCACHE_SIZE; i++)
81
    {
82
        idx = (sectorcache_replace_idx + i) % SECTORCACHE_SIZE;
83
        if (!sectorcache_entry[idx].locks)
84
            return &sectorcache_entry[idx];
85
    }
86
    return null;
87
}
88
89
int sectorcache_readthrough(IF_MD2(int drive,) unsigned long start, int count, void* buf, bool keepincache);
90
int sectorcache_writethrough(IF_MD2(int drive,) unsigned long start, int count, const void* buf, bool keepincache);
91
int sectorcache_lock(IF_MD2(int drive,) unsigned long sector, struct sectorcache_entry** handle, bool exclusive);
92
int sectorcache_unlock(struct sectorcache_entry** handle, bool clean, bool keepincache);
93
94
int sectorcache_markdirty(struct sectorcache_entry* handle)
95
{
96
    mutex_lock(&sectorcache_mutex);
97
    handle->dirty = true;
98
    mutex_unlock(&sectorcache_mutex);
99
}
100
101
int sectorcache_markinconsistent(struct sectorcache_entry* handle)
102
{
103
    mutex_lock(&sectorcache_mutex);
104
    handle->consistent = false;
105
    mutex_unlock(&sectorcache_mutex);
106
}
107
108
int sectorcache_markconsistent(struct sectorcache_entry* handle)
109
{
110
    mutex_lock(&sectorcache_mutex);
111
    handle->consistent = true;
112
    mutex_unlock(&sectorcache_mutex);
113
    wakeup_signal(&sectorcache_block);
114
}
115
116
int sectorcache_clean(struct sectorcache_entry* handle);
117
118
int sectorcache_clean_all(bool flushstorage)
119
{
120
    int i;
121
    bool retry = true;
122
    mutex_lock(&sectorcache_mutex);
123
    while (retry)
124
    {
125
        retry = false;
126
        for (i = 0; i < SECTORCACHE_SIZE; i++)
127
            if (sectorcache_entry[i].dirty)
128
            {
129
                if (!sectorcache_entry[i].consistent)
130
                {
131
                    sectorcache_wait_block();
132
                    retry = true;
133
                }
134
                sectorcache_clean(&sectorcache_entry[i]);
135
            }
136
    }
137
#ifdef HAVE_STORAGE_FLUSH
138
    if (flushstorage) storage_flush();
139
#endif
140
    mutex_unlock(&sectorcache_mutex);
141
}
142
143
int sectorcache_invalidate(IF_MD2(int drive));