[kernel] Fix block copying in the cache
[monolithium.git] / kernel / src / cache.c
1 /*
2  * cache.c
3  *
4  * Copyright (C) 2016 Aleksandar Andrejevic <theflash@sdf.lonestar.org>
5  *
6  * This program is free software: you can redistribute it and/or modify
7  * it under the terms of the GNU Affero General Public License as
8  * published by the Free Software Foundation, either version 3 of the
9  * License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU Affero General Public License for more details.
15  *
16  * You should have received a copy of the GNU Affero General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <device.h>
21 #include <cache.h>
22 #include <heap.h>
23
24 static void flush_cache_internal(cache_descriptor_t *cache, avl_tree_t *node, void *context)
25 {
26     if (node == NULL) return;
27
28     cache_entry_t *entry = CONTAINER_OF(node, cache_entry_t, tree);
29     if (entry->dirty)
30     {
31         dword_t written;
32         dword_t ret = cache->write_proc(context, entry->data, entry->tree.key * cache->block_size, cache->block_size, &written);
33         if ((ret == ERR_SUCCESS) || (written == cache->block_size)) entry->dirty = FALSE;
34     }
35
36     flush_cache_internal(cache, node->left, context);
37     flush_cache_internal(cache, node->right, context);
38 }
39
40 static void cleanup_cache_internal(avl_tree_t *node)
41 {
42     if (node == NULL) return;
43
44     cleanup_cache_internal(node->left);
45     cleanup_cache_internal(node->right);
46
47     cache_entry_t *entry = CONTAINER_OF(node, cache_entry_t, tree);
48     heap_free(&evictable_heap, entry);
49 }
50
51 dword_t cleanup_cache(cache_descriptor_t *cache)
52 {
53     cleanup_cache_internal(cache->root);
54     cache->root = NULL;
55
56     return ERR_SUCCESS;
57 }
58
59 dword_t read_cache(cache_descriptor_t *cache, void *context, byte_t *buffer, qword_t offset, dword_t length, dword_t *bytes_read)
60 {
61     dword_t ret = ERR_SUCCESS;
62     qword_t i;
63     qword_t first_block = offset / (qword_t)cache->block_size;
64     qword_t last_block = (offset + length - 1) / (qword_t)cache->block_size;
65     bool_t exclusive = FALSE;
66
67     acquire_resource_shared(&cache->resource);
68     *bytes_read = 0;
69
70     for (i = first_block; i <= last_block; i++)
71     {
72         dword_t start_offset = 0, bytes_to_copy = cache->block_size;
73         avl_tree_t *element = avl_tree_lookup(cache->root, i);
74
75         if (element == NULL && !exclusive)
76         {
77             release_resource(&cache->resource);
78             acquire_resource_exclusive(&cache->resource);
79             exclusive = TRUE;
80             element = avl_tree_lookup(cache->root, i);
81         }
82
83         if (element == NULL)
84         {
85             cache_entry_t *new_entry = (cache_entry_t*)heap_alloc(&evictable_heap, sizeof(cache_entry_t) + cache->block_size);
86             if (new_entry != NULL)
87             {
88                 new_entry->tree.key = i;
89                 new_entry->dirty = FALSE;
90             }
91
92             ret = cache->read_proc(context, new_entry->data, i * (qword_t)cache->block_size, cache->block_size, NULL);
93             if (ret != ERR_SUCCESS)
94             {
95                 heap_free(&evictable_heap, new_entry);
96                 break;
97             }
98
99             avl_tree_insert(&cache->root, &new_entry->tree);
100             element = &new_entry->tree;
101         }
102
103         cache_entry_t *entry = CONTAINER_OF(element, cache_entry_t, tree);
104
105         if (first_block == last_block)
106         {
107             start_offset = (dword_t)(offset % (qword_t)cache->block_size);
108             bytes_to_copy = length;
109         }
110         else if (i == first_block)
111         {
112             start_offset = (dword_t)(offset % (qword_t)cache->block_size);
113             bytes_to_copy -= start_offset;
114         }
115         else if (i == last_block)
116         {
117             bytes_to_copy = length - *bytes_read;
118         }
119
120         memcpy(&buffer[*bytes_read], &entry->data[start_offset], bytes_to_copy);
121         *bytes_read += bytes_to_copy;
122     }
123
124     release_resource(&cache->resource);
125     return ret;
126 }
127
128 dword_t write_cache(cache_descriptor_t *cache, void *context, const byte_t *buffer, qword_t offset, dword_t length, dword_t *bytes_written)
129 {
130     dword_t ret = ERR_SUCCESS;
131     qword_t i;
132     qword_t first_block = offset / (qword_t)cache->block_size;
133     qword_t last_block = (offset + length - 1) / (qword_t)cache->block_size;
134
135     acquire_resource_exclusive(&cache->resource);
136     *bytes_written = 0;
137
138     for (i = first_block; i <= last_block; i++)
139     {
140         dword_t start_offset = 0, bytes_to_copy = cache->block_size;
141         avl_tree_t *element = avl_tree_lookup(cache->root, i);
142
143         if (element == NULL)
144         {
145             cache_entry_t *new_entry = (cache_entry_t*)heap_alloc(&evictable_heap, sizeof(cache_entry_t) + cache->block_size);
146             if (new_entry == NULL)
147             {
148                 ret = ERR_NOMEMORY;
149                 break;
150             }
151
152             new_entry->tree.key = i;
153             new_entry->dirty = FALSE;
154
155             ret = cache->read_proc(context, new_entry->data, i * (qword_t)cache->block_size, cache->block_size, NULL);
156             if (ret != ERR_SUCCESS)
157             {
158                 heap_free(&evictable_heap, new_entry);
159                 break;
160             }
161
162             avl_tree_insert(&cache->root, &new_entry->tree);
163             element = &new_entry->tree;
164         }
165
166         cache_entry_t *entry = CONTAINER_OF(element, cache_entry_t, tree);
167
168         if (first_block == last_block)
169         {
170             start_offset = (dword_t)(offset % (qword_t)cache->block_size);
171             bytes_to_copy = length;
172         }
173         else if (i == first_block)
174         {
175             start_offset = (dword_t)(offset % (qword_t)cache->block_size);
176             bytes_to_copy -= start_offset;
177         }
178         else if (i == last_block)
179         {
180             bytes_to_copy = length - *bytes_written;
181         }
182
183         memcpy(&entry->data[start_offset], &buffer[*bytes_written], bytes_to_copy);
184         *bytes_written += bytes_to_copy;
185
186         if (cache->flags & CACHE_WRITE_THROUGH)
187         {
188             dword_t written;
189             ret = cache->write_proc(context, entry->data, i * (qword_t)cache->block_size, cache->block_size, &written);
190             if ((ret != ERR_SUCCESS) || (written != cache->block_size)) entry->dirty = TRUE;
191         }
192         else
193         {
194             entry->dirty = TRUE;
195         }
196     }
197
198     release_resource(&cache->resource);
199     return ret;
200 }
201
202 dword_t flush_cache(cache_descriptor_t *cache, void *context)
203 {
204     flush_cache_internal(cache, cache->root, context);
205     return ERR_SUCCESS;
206 }