@@ -955,17 +955,34 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
955
955
struct drm_gem_object * gobj ;
956
956
struct amdgpu_vm_bo_base * base ;
957
957
struct amdgpu_bo * robj ;
958
+ struct drm_exec exec ;
959
+ struct amdgpu_fpriv * fpriv = filp -> driver_priv ;
958
960
int r ;
959
961
962
+ if (args -> padding )
963
+ return - EINVAL ;
964
+
960
965
gobj = drm_gem_object_lookup (filp , args -> handle );
961
966
if (!gobj )
962
967
return - ENOENT ;
963
968
964
969
robj = gem_to_amdgpu_bo (gobj );
965
970
966
- r = amdgpu_bo_reserve (robj , false);
967
- if (unlikely (r ))
968
- goto out ;
971
+ drm_exec_init (& exec , DRM_EXEC_INTERRUPTIBLE_WAIT |
972
+ DRM_EXEC_IGNORE_DUPLICATES , 0 );
973
+ drm_exec_until_all_locked (& exec ) {
974
+ r = drm_exec_lock_obj (& exec , gobj );
975
+ drm_exec_retry_on_contention (& exec );
976
+ if (r )
977
+ goto out_exec ;
978
+
979
+ if (args -> op == AMDGPU_GEM_OP_GET_MAPPING_INFO ) {
980
+ r = amdgpu_vm_lock_pd (& fpriv -> vm , & exec , 0 );
981
+ drm_exec_retry_on_contention (& exec );
982
+ if (r )
983
+ goto out_exec ;
984
+ }
985
+ }
969
986
970
987
switch (args -> op ) {
971
988
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO : {
@@ -976,7 +993,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
976
993
info .alignment = robj -> tbo .page_alignment << PAGE_SHIFT ;
977
994
info .domains = robj -> preferred_domains ;
978
995
info .domain_flags = robj -> flags ;
979
- amdgpu_bo_unreserve ( robj );
996
+ drm_exec_fini ( & exec );
980
997
if (copy_to_user (out , & info , sizeof (info )))
981
998
r = - EFAULT ;
982
999
break ;
@@ -985,20 +1002,17 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
985
1002
if (drm_gem_is_imported (& robj -> tbo .base ) &&
986
1003
args -> value & AMDGPU_GEM_DOMAIN_VRAM ) {
987
1004
r = - EINVAL ;
988
- amdgpu_bo_unreserve (robj );
989
- break ;
1005
+ goto out_exec ;
990
1006
}
991
1007
if (amdgpu_ttm_tt_get_usermm (robj -> tbo .ttm )) {
992
1008
r = - EPERM ;
993
- amdgpu_bo_unreserve (robj );
994
- break ;
1009
+ goto out_exec ;
995
1010
}
996
1011
for (base = robj -> vm_bo ; base ; base = base -> next )
997
1012
if (amdgpu_xgmi_same_hive (amdgpu_ttm_adev (robj -> tbo .bdev ),
998
1013
amdgpu_ttm_adev (base -> vm -> root .bo -> tbo .bdev ))) {
999
1014
r = - EINVAL ;
1000
- amdgpu_bo_unreserve (robj );
1001
- goto out ;
1015
+ goto out_exec ;
1002
1016
}
1003
1017
1004
1018
@@ -1011,15 +1025,63 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
1011
1025
1012
1026
if (robj -> flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID )
1013
1027
amdgpu_vm_bo_invalidate (robj , true);
1028
+ drm_exec_fini (& exec );
1029
+ break ;
1030
+ case AMDGPU_GEM_OP_GET_MAPPING_INFO : {
1031
+ struct amdgpu_bo_va * bo_va = amdgpu_vm_bo_find (& fpriv -> vm , robj );
1032
+ struct drm_amdgpu_gem_vm_entry * vm_entries ;
1033
+ struct amdgpu_bo_va_mapping * mapping ;
1034
+ int num_mappings = 0 ;
1035
+ /*
1036
+ * num_entries is set as an input to the size of the user-allocated array of
1037
+ * drm_amdgpu_gem_vm_entry stored at args->value.
1038
+ * num_entries is sent back as output as the number of mappings the bo has.
1039
+ * If that number is larger than the size of the array, the ioctl must
1040
+ * be retried.
1041
+ */
1042
+ vm_entries = kvcalloc (args -> num_entries , sizeof (* vm_entries ), GFP_KERNEL );
1043
+ if (!vm_entries )
1044
+ return - ENOMEM ;
1045
+
1046
+ amdgpu_vm_bo_va_for_each_valid_mapping (bo_va , mapping ) {
1047
+ if (num_mappings < args -> num_entries ) {
1048
+ vm_entries [num_mappings ].addr = mapping -> start * AMDGPU_GPU_PAGE_SIZE ;
1049
+ vm_entries [num_mappings ].size = (mapping -> last - mapping -> start + 1 ) * AMDGPU_GPU_PAGE_SIZE ;
1050
+ vm_entries [num_mappings ].offset = mapping -> offset ;
1051
+ vm_entries [num_mappings ].flags = mapping -> flags ;
1052
+ }
1053
+ num_mappings += 1 ;
1054
+ }
1055
+
1056
+ amdgpu_vm_bo_va_for_each_invalid_mapping (bo_va , mapping ) {
1057
+ if (num_mappings < args -> num_entries ) {
1058
+ vm_entries [num_mappings ].addr = mapping -> start * AMDGPU_GPU_PAGE_SIZE ;
1059
+ vm_entries [num_mappings ].size = (mapping -> last - mapping -> start + 1 ) * AMDGPU_GPU_PAGE_SIZE ;
1060
+ vm_entries [num_mappings ].offset = mapping -> offset ;
1061
+ vm_entries [num_mappings ].flags = mapping -> flags ;
1062
+ }
1063
+ num_mappings += 1 ;
1064
+ }
1014
1065
1015
- amdgpu_bo_unreserve (robj );
1066
+ drm_exec_fini (& exec );
1067
+
1068
+ if (num_mappings > 0 && num_mappings <= args -> num_entries )
1069
+ r = copy_to_user (u64_to_user_ptr (args -> value ), vm_entries , num_mappings * sizeof (* vm_entries ));
1070
+
1071
+ args -> num_entries = num_mappings ;
1072
+
1073
+ kvfree (vm_entries );
1016
1074
break ;
1075
+ }
1017
1076
default :
1018
- amdgpu_bo_unreserve ( robj );
1077
+ drm_exec_fini ( & exec );
1019
1078
r = - EINVAL ;
1020
1079
}
1021
1080
1022
- out :
1081
+ drm_gem_object_put (gobj );
1082
+ return r ;
1083
+ out_exec :
1084
+ drm_exec_fini (& exec );
1023
1085
drm_gem_object_put (gobj );
1024
1086
return r ;
1025
1087
}
0 commit comments