@@ -173,7 +173,6 @@ static void nvfs_mgroup_free(nvfs_mgroup_ptr_t nvfs_mgroup, bool from_dma)
173
173
nvfs_dbg ("freeing base_index %lx(ref:%d) found \n" ,
174
174
nvfs_mgroup -> base_index , atomic_read (& nvfs_mgroup -> ref ));
175
175
kfree (nvfs_mgroup );
176
- nvfs_mgroup = NULL ;
177
176
}
178
177
179
178
@@ -225,7 +224,7 @@ void nvfs_mgroup_put_dma(nvfs_mgroup_ptr_t nvfs_mgroup) {
225
224
226
225
static nvfs_mgroup_ptr_t nvfs_get_mgroup_from_vaddr_internal (u64 cpuvaddr )
227
226
{
228
- struct page * page ;
227
+ struct page * page = NULL ;
229
228
int ret ;
230
229
unsigned long cur_base_index = 0 ;
231
230
nvfs_mgroup_ptr_t nvfs_mgroup = NULL ;
@@ -371,12 +370,12 @@ nvfs_mgroup_ptr_t nvfs_mgroup_pin_shadow_pages(u64 cpuvaddr, unsigned long lengt
371
370
count = DIV_ROUND_UP (length , PAGE_SIZE );
372
371
block_count = DIV_ROUND_UP (length , NVFS_BLOCK_SIZE );
373
372
pages = (struct page * * ) kmalloc (count * sizeof (struct page * ), GFP_KERNEL );
374
-
375
373
if (!pages ) {
376
374
nvfs_err ("%s:%d shadow buffer pages allocation failed\n" ,
377
375
__func__ , __LINE__ );
378
376
goto out ;
379
377
}
378
+ memset (pages , 0 , count * sizeof (struct page * ));
380
379
381
380
#ifdef CONFIG_FAULT_INJECTION
382
381
if (nvfs_fault_trigger (& nvfs_pin_shadow_pages_error )) {
@@ -625,7 +624,7 @@ static int nvfs_mgroup_mmap_internal(struct file *filp, struct vm_area_struct *v
625
624
nvfs_mgroup_ptr_t nvfs_mgroup , nvfs_new_mgroup ;
626
625
struct nvfs_gpu_args * gpu_info ;
627
626
int os_pages_count ;
628
- vm_flags_t vm_flags ;
627
+ vm_flags_t vm_flags , vm_flags_to_set ;
629
628
630
629
nvfs_stat64 (& nvfs_n_mmap );
631
630
/* check length - do not allow larger mappings than the number of
@@ -668,11 +667,13 @@ static int nvfs_mgroup_mmap_internal(struct file *filp, struct vm_area_struct *v
668
667
goto error ;
669
668
}
670
669
670
+
671
+ vm_flags_to_set = VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY ;
671
672
/* dont allow mremap to expand and dont allow copy on fork */
672
673
#ifdef NVFS_VM_FLAGS_NOT_CONSTANT
673
- vma -> vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY ;
674
+ vma -> vm_flags |= vm_flags_to_set ;
674
675
#else
675
- vm_flags_set (vma , VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY );
676
+ vm_flags_set (vma , vm_flags_to_set );
676
677
#endif
677
678
vma -> vm_ops = & nvfs_mmap_ops ;
678
679
nvfs_new_mgroup = (nvfs_mgroup_ptr_t )kzalloc (sizeof (struct nvfs_io_mgroup ), GFP_KERNEL );
@@ -697,7 +698,6 @@ static int nvfs_mgroup_mmap_internal(struct file *filp, struct vm_area_struct *v
697
698
nvfs_mgroup = nvfs_mgroup_get_unlocked (base_index );
698
699
if (unlikely (nvfs_mgroup && tries -- )) {
699
700
nvfs_mgroup_put (nvfs_mgroup );
700
- continue ;
701
701
} else {
702
702
nvfs_new_mgroup -> base_index = base_index ;
703
703
atomic_set (& nvfs_new_mgroup -> ref ,1 );
@@ -744,7 +744,6 @@ static int nvfs_mgroup_mmap_internal(struct file *filp, struct vm_area_struct *v
744
744
BUG_ON (vma -> vm_private_data != NULL );
745
745
}
746
746
747
- j = 0 ;
748
747
for (i = 0 ; i < nvfs_blocks_count ; i ++ ) {
749
748
j = i / nvfs_block_count_per_page ;
750
749
if (nvfs_mgroup -> nvfs_ppages [j ] == NULL ) {
@@ -1051,7 +1050,7 @@ int nvfs_mgroup_fill_mpages(nvfs_mgroup_ptr_t nvfs_mgroup, unsigned nr_blocks)
1051
1050
nvfs_mgroup_fill_mpage (nvfs_mgroup -> nvfs_ppages [j /nvfs_block_count_per_page ],
1052
1051
& nvfs_mgroup -> nvfs_metadata [j ], nvfsio );
1053
1052
}
1054
- nvfsio -> nvfs_active_blocks_end = j - 1 ;
1053
+ nvfsio -> nvfs_active_blocks_end = ( j > 0 ? j - 1 : 0 ) ;
1055
1054
1056
1055
// clear the state for unqueued pages
1057
1056
for (; j < nvfs_mgroup -> nvfs_blocks_count ; j ++ ) {
0 commit comments