The .so files for the vdso are named vdso*.so, and these structures
are image of the corresponding vdso. Naming them accordingly is more
consistent, very slightly more compact (by one character...) and
simplifies the Makefile just a little bit.

Signed-off-by: H. Peter Anvin (Intel) <[email protected]>
---
 arch/x86/entry/syscall_32.c  |  2 +-
 arch/x86/entry/vdso/Makefile |  8 ++++----
 arch/x86/entry/vdso/vma.c    | 10 +++++-----
 arch/x86/include/asm/elf.h   |  2 +-
 arch/x86/include/asm/vdso.h  |  6 +++---
 arch/x86/kernel/process_64.c |  6 +++---
 arch/x86/kernel/signal_32.c  |  4 ++--
 7 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index 2b15ea17bb7c..eff33a4e0adc 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -318,7 +318,7 @@ __visible noinstr bool do_fast_syscall_32(struct pt_regs 
*regs)
         * convention.  Adjust regs so it looks like we entered using int80.
         */
        unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
-                                       vdso_image_32.sym_int80_landing_pad;
+                                       vdso32_image.sym_int80_landing_pad;
 
        /*
         * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index f247f5f5cb44..7f833026d5b2 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -16,9 +16,9 @@ vobjs-$(CONFIG_X86_SGX)       += vsgx.o
 obj-y                                          += vma.o extable.o
 
 # vDSO images to build:
-obj-$(CONFIG_X86_64)                           += vdso-image-64.o
-obj-$(CONFIG_X86_X32_ABI)                      += vdso-image-x32.o
-obj-$(CONFIG_COMPAT_32)                                += vdso-image-32.o 
vdso32-setup.o
+obj-$(CONFIG_X86_64)                           += vdso64-image.o
+obj-$(CONFIG_X86_X32_ABI)                      += vdsox32-image.o
+obj-$(CONFIG_COMPAT_32)                                += vdso32-image.o 
vdso32-setup.o
 
 vobjs := $(addprefix $(obj)/, $(vobjs-y))
 vobjs32 := $(addprefix $(obj)/, $(vobjs32-y))
@@ -44,7 +44,7 @@ hostprogs += vdso2c
 quiet_cmd_vdso2c = VDSO2C  $@
       cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
 
-$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
+$(obj)/vdso%-image.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
        $(call if_changed,vdso2c)
 
 #
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index afe105b2f907..8f98c2d7c7a9 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -65,7 +65,7 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping 
*sm,
 static void vdso_fix_landing(const struct vdso_image *image,
                struct vm_area_struct *new_vma)
 {
-       if (in_ia32_syscall() && image == &vdso_image_32) {
+       if (in_ia32_syscall() && image == &vdso32_image) {
                struct pt_regs *regs = current_pt_regs();
                unsigned long vdso_land = image->sym_int80_landing_pad;
                unsigned long old_land_addr = vdso_land +
@@ -230,7 +230,7 @@ static int load_vdso32(void)
        if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
                return 0;
 
-       return map_vdso(&vdso_image_32, 0);
+       return map_vdso(&vdso32_image, 0);
 }
 
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
@@ -239,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, 
int uses_interp)
                if (!vdso64_enabled)
                        return 0;
 
-               return map_vdso(&vdso_image_64, 0);
+               return map_vdso(&vdso64_image, 0);
        }
 
        return load_vdso32();
@@ -252,7 +252,7 @@ int compat_arch_setup_additional_pages(struct linux_binprm 
*bprm,
        if (IS_ENABLED(CONFIG_X86_X32_ABI) && x32) {
                if (!vdso64_enabled)
                        return 0;
-               return map_vdso(&vdso_image_x32, 0);
+               return map_vdso(&vdsox32_image, 0);
        }
 
        if (IS_ENABLED(CONFIG_IA32_EMULATION))
@@ -267,7 +267,7 @@ bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
        const struct vdso_image *image = current->mm->context.vdso_image;
        unsigned long vdso = (unsigned long) current->mm->context.vdso;
 
-       if (in_ia32_syscall() && image == &vdso_image_32) {
+       if (in_ia32_syscall() && image == &vdso32_image) {
                if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad 
||
                    regs->ip == vdso + 
image->sym_vdso32_rt_sigreturn_landing_pad)
                        return true;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 6c8fdc96be7e..2ba5f166e58f 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -361,7 +361,7 @@ else if (IS_ENABLED(CONFIG_IA32_EMULATION))                 
        \
 
 #define VDSO_ENTRY                                                     \
        ((unsigned long)current->mm->context.vdso +                     \
-        vdso_image_32.sym___kernel_vsyscall)
+        vdso32_image.sym___kernel_vsyscall)
 
 struct linux_binprm;
 
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index b7253ef3205a..e8afbe9faa5b 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -27,9 +27,9 @@ struct vdso_image {
        long sym_vdso32_rt_sigreturn_landing_pad;
 };
 
-extern const struct vdso_image vdso_image_64;
-extern const struct vdso_image vdso_image_x32;
-extern const struct vdso_image vdso_image_32;
+extern const struct vdso_image vdso64_image;
+extern const struct vdso_image vdsox32_image;
+extern const struct vdso_image vdso32_image;
 
 extern int __init init_vdso_image(const struct vdso_image *image);
 
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 52a5c03c353c..ae00c788962a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -942,14 +942,14 @@ long do_arch_prctl_64(struct task_struct *task, int 
option, unsigned long arg2)
 #ifdef CONFIG_CHECKPOINT_RESTORE
 # ifdef CONFIG_X86_X32_ABI
        case ARCH_MAP_VDSO_X32:
-               return prctl_map_vdso(&vdso_image_x32, arg2);
+               return prctl_map_vdso(&vdsox32_image, arg2);
 # endif
 # ifdef CONFIG_IA32_EMULATION
        case ARCH_MAP_VDSO_32:
-               return prctl_map_vdso(&vdso_image_32, arg2);
+               return prctl_map_vdso(&vdso32_image, arg2);
 # endif
        case ARCH_MAP_VDSO_64:
-               return prctl_map_vdso(&vdso_image_64, arg2);
+               return prctl_map_vdso(&vdso64_image, arg2);
 #endif
 #ifdef CONFIG_ADDRESS_MASKING
        case ARCH_GET_UNTAG_MASK:
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index 42bbc42bd350..e55cf19e68fe 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -282,7 +282,7 @@ int ia32_setup_frame(struct ksignal *ksig, struct pt_regs 
*regs)
                /* Return stub is in 32bit vsyscall page */
                if (current->mm->context.vdso)
                        restorer = current->mm->context.vdso +
-                               vdso_image_32.sym___kernel_sigreturn;
+                               vdso32_image.sym___kernel_sigreturn;
                else
                        restorer = &frame->retcode;
        }
@@ -368,7 +368,7 @@ int ia32_setup_rt_frame(struct ksignal *ksig, struct 
pt_regs *regs)
                restorer = ksig->ka.sa.sa_restorer;
        else
                restorer = current->mm->context.vdso +
-                       vdso_image_32.sym___kernel_rt_sigreturn;
+                       vdso32_image.sym___kernel_rt_sigreturn;
        unsafe_put_user(ptr_to_compat(restorer), &frame->pretcode, Efault);
 
        /*
-- 
2.51.1


Reply via email to