static void change_attributes(struct inode *inode, struct fuse_attr *attr)
{
- if(S_ISREG(inode->i_mode) && inode->i_size != attr->size) {
+ if(S_ISREG(inode->i_mode) && i_size_read(inode) != attr->size) {
#ifdef KERNEL_2_6
invalidate_inode_pages(inode->i_mapping);
#else
inode->i_nlink = attr->nlink;
inode->i_uid = attr->uid;
inode->i_gid = attr->gid;
- inode->i_size = attr->size;
+ i_size_write(inode, attr->size);
inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_blocks = attr->blocks;
#ifdef KERNEL_2_6
static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
{
inode->i_mode = attr->mode & S_IFMT;
- inode->i_size = attr->size;
+ i_size_write(inode, attr->size);
if(S_ISREG(inode->i_mode)) {
inode->i_op = &fuse_file_inode_operations;
fuse_init_file_inode(inode);
if(!out.h.error) {
if(attr->ia_valid & ATTR_SIZE &&
- outarg.attr.size < inode->i_size)
+ outarg.attr.size < i_size_read(inode))
vmtruncate(inode, outarg.attr.size);
change_attributes(inode, &outarg.attr);
size_t outsize = out.args[0].size;
if(outsize < PAGE_CACHE_SIZE)
memset(buffer + outsize, 0, PAGE_CACHE_SIZE - outsize);
+ flush_dcache_page(page);
SetPageUptodate(page);
}
{
size_t index = bl_index << FUSE_BLOCK_PAGE_SHIFT;
size_t end_index = ((bl_index + 1) << FUSE_BLOCK_PAGE_SHIFT) - 1;
- size_t file_end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+ size_t file_end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
if (end_index > file_end_index)
end_index = file_end_index;
{
size_t start_index = bl_index << FUSE_BLOCK_PAGE_SHIFT;
size_t end_index = ((bl_index + 1) << FUSE_BLOCK_PAGE_SHIFT) - 1;
- size_t file_end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+ size_t file_end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
int i;
{
size_t bl_index = pos >> FUSE_BLOCK_SHIFT;
size_t bl_end_index = (pos + count) >> FUSE_BLOCK_SHIFT;
- size_t bl_file_end_index = inode->i_size >> FUSE_BLOCK_SHIFT;
+ size_t bl_file_end_index = i_size_read(inode) >> FUSE_BLOCK_SHIFT;
if (bl_end_index > bl_file_end_index)
bl_end_index = bl_file_end_index;
static int get_write_count(struct inode *inode, struct page *page)
{
unsigned long end_index;
+ loff_t size = i_size_read(inode);
int count;
- end_index = inode->i_size >> PAGE_CACHE_SHIFT;
+ end_index = size >> PAGE_CACHE_SHIFT;
if(page->index < end_index)
count = PAGE_CACHE_SIZE;
else {
- count = inode->i_size & (PAGE_CACHE_SIZE - 1);
+ count = size & (PAGE_CACHE_SIZE - 1);
if(page->index > end_index || count == 0)
return 0;
}
err = write_buffer(inode, page, offset, to - offset);
if(!err) {
loff_t pos = (page->index << PAGE_CACHE_SHIFT) + to;
- if(pos > inode->i_size)
- inode->i_size = pos;
+ if(pos > i_size_read(inode))
+ i_size_write(inode, pos);
}
return err;
}
#include <linux/list.h>
#include <linux/spinlock.h>
+#ifndef KERNEL_2_6
+#define i_size_read(inode) ((inode)->i_size))
+#define i_size_write(inode, size) do { (inode)->i_size = size; } while(0)
+#endif
+
/** Read combining parameters */
#define FUSE_BLOCK_SHIFT 16
#define FUSE_BLOCK_SIZE 65536