Reimplement AVL trees
[monolithium.git] / kernel / src / cache.c
1 /*
2  * cache.c
3  *
4  * Copyright (C) 2016 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
5  *
6  * This program is free software: you can redistribute it and/or modify
7  * it under the terms of the GNU Affero General Public License as
8  * published by the Free Software Foundation, either version 3 of the
9  * License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU Affero General Public License for more details.
15  *
16  * You should have received a copy of the GNU Affero General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <device.h>
21 #include <cache.h>
22 #include <heap.h>
23
24 static void flush_cache_internal(cache_descriptor_t *cache, avl_node_t *node, void *context)
25 {
26     if (node == NULL) return;
27
28     cache_entry_t *entry = CONTAINER_OF(node, cache_entry_t, node);
29     if (entry->dirty)
30     {
31         dword_t written;
32         dword_t ret = cache->write_proc(context, entry->data, entry->address * cache->block_size, cache->block_size, &written);
33         if ((ret == ERR_SUCCESS) || (written == cache->block_size)) entry->dirty = FALSE;
34     }
35
36     flush_cache_internal(cache, node->left, context);
37     flush_cache_internal(cache, node->right, context);
38 }
39
40 static void cleanup_cache_internal(avl_node_t *node)
41 {
42     if (node == NULL) return;
43
44     cleanup_cache_internal(node->left);
45     cleanup_cache_internal(node->right);
46
47     cache_entry_t *entry = CONTAINER_OF(node, cache_entry_t, node);
48     heap_free(&evictable_heap, entry);
49 }
50
51 static int compare_address(const void *key1, const void *key2)
52 {
53     const qword_t first = *(const qword_t*)key1;
54     const qword_t second = *(const qword_t*)key2;
55
56     if (first < second) return -1;
57     else if (first == second) return 0;
58     else return 1;
59 }
60
61 void init_cache(cache_descriptor_t *cache,
62                 dword_t flags,
63                 dword_t block_size,
64                 read_write_buffer_proc_t read_proc,
65                 read_write_buffer_proc_t write_proc)
66 {
67     cache->enabled = TRUE;
68     cache->resource = 0;
69     cache->flags = flags;
70     cache->block_size = block_size;
71     cache->read_proc = read_proc;
72     cache->write_proc = write_proc;
73
74     AVL_TREE_INIT(&cache->entries, cache_entry_t, node, address, compare_address);
75 }
76
77 void cleanup_cache(cache_descriptor_t *cache)
78 {
79     cleanup_cache_internal(cache->entries.root);
80     cache->entries.root = NULL;
81 }
82
83 dword_t read_cache(cache_descriptor_t *cache, void *context, byte_t *buffer, qword_t offset, dword_t length, dword_t *bytes_read)
84 {
85     dword_t ret = ERR_SUCCESS;
86     qword_t i;
87     qword_t first_block = offset / (qword_t)cache->block_size;
88     qword_t last_block = (offset + length - 1) / (qword_t)cache->block_size;
89     bool_t exclusive = FALSE;
90
91     acquire_resource_shared(&cache->resource);
92     *bytes_read = 0;
93
94     for (i = first_block; i <= last_block; i++)
95     {
96         dword_t start_offset = 0, bytes_to_copy = cache->block_size;
97         avl_node_t *element = avl_tree_lookup(&cache->entries, &i);
98
99         if (element == NULL && !exclusive)
100         {
101             release_resource(&cache->resource);
102             acquire_resource_exclusive(&cache->resource);
103             exclusive = TRUE;
104             element = avl_tree_lookup(&cache->entries, &i);
105         }
106
107         if (element == NULL)
108         {
109             cache_entry_t *new_entry = (cache_entry_t*)heap_alloc(&evictable_heap, sizeof(cache_entry_t) + cache->block_size);
110             if (new_entry != NULL)
111             {
112                 new_entry->address = i;
113                 new_entry->dirty = FALSE;
114             }
115
116             ret = cache->read_proc(context, new_entry->data, i * (qword_t)cache->block_size, cache->block_size, NULL);
117             if (ret != ERR_SUCCESS)
118             {
119                 heap_free(&evictable_heap, new_entry);
120                 break;
121             }
122
123             avl_tree_insert(&cache->entries, &new_entry->node);
124             element = &new_entry->node;
125         }
126
127         cache_entry_t *entry = CONTAINER_OF(element, cache_entry_t, node);
128
129         if (first_block == last_block)
130         {
131             start_offset = (dword_t)(offset % (qword_t)cache->block_size);
132             bytes_to_copy = length;
133         }
134         else if (i == first_block)
135         {
136             start_offset = (dword_t)(offset % (qword_t)cache->block_size);
137             bytes_to_copy -= start_offset;
138         }
139         else if (i == last_block)
140         {
141             bytes_to_copy = length - *bytes_read;
142         }
143
144         memcpy(&buffer[*bytes_read], &entry->data[start_offset], bytes_to_copy);
145         *bytes_read += bytes_to_copy;
146     }
147
148     release_resource(&cache->resource);
149     return ret;
150 }
151
152 dword_t write_cache(cache_descriptor_t *cache, void *context, const byte_t *buffer, qword_t offset, dword_t length, dword_t *bytes_written)
153 {
154     dword_t ret = ERR_SUCCESS;
155     qword_t i;
156     qword_t first_block = offset / (qword_t)cache->block_size;
157     qword_t last_block = (offset + length - 1) / (qword_t)cache->block_size;
158
159     acquire_resource_exclusive(&cache->resource);
160     *bytes_written = 0;
161
162     for (i = first_block; i <= last_block; i++)
163     {
164         dword_t start_offset = 0, bytes_to_copy = cache->block_size;
165         avl_node_t *element = avl_tree_lookup(&cache->entries, &i);
166
167         if (element == NULL)
168         {
169             cache_entry_t *new_entry = (cache_entry_t*)heap_alloc(&evictable_heap, sizeof(cache_entry_t) + cache->block_size);
170             if (new_entry == NULL)
171             {
172                 ret = ERR_NOMEMORY;
173                 break;
174             }
175
176             new_entry->address = i;
177             new_entry->dirty = FALSE;
178
179             ret = cache->read_proc(context, new_entry->data, i * (qword_t)cache->block_size, cache->block_size, NULL);
180             if (ret != ERR_SUCCESS)
181             {
182                 heap_free(&evictable_heap, new_entry);
183                 break;
184             }
185
186             avl_tree_insert(&cache->entries, &new_entry->node);
187             element = &new_entry->node;
188         }
189
190         cache_entry_t *entry = CONTAINER_OF(element, cache_entry_t, node);
191
192         if (first_block == last_block)
193         {
194             start_offset = (dword_t)(offset % (qword_t)cache->block_size);
195             bytes_to_copy = length;
196         }
197         else if (i == first_block)
198         {
199             start_offset = (dword_t)(offset % (qword_t)cache->block_size);
200             bytes_to_copy -= start_offset;
201         }
202         else if (i == last_block)
203         {
204             bytes_to_copy = length - *bytes_written;
205         }
206
207         memcpy(&entry->data[start_offset], &buffer[*bytes_written], bytes_to_copy);
208         *bytes_written += bytes_to_copy;
209
210         if (cache->flags & CACHE_WRITE_THROUGH)
211         {
212             dword_t written;
213             ret = cache->write_proc(context, entry->data, i * (qword_t)cache->block_size, cache->block_size, &written);
214             if ((ret != ERR_SUCCESS) || (written != cache->block_size)) entry->dirty = TRUE;
215         }
216         else
217         {
218             entry->dirty = TRUE;
219         }
220     }
221
222     release_resource(&cache->resource);
223     return ret;
224 }
225
226 dword_t flush_cache(cache_descriptor_t *cache, void *context)
227 {
228     flush_cache_internal(cache, cache->entries.root, context);
229     return ERR_SUCCESS;
230 }