@@ -191,27 +191,23 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
printk(KERN_INFO "Queueing interrupt %x\n", vec);
#endif
}
-EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
{
/* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags);
}
-EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
{
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_SYSCALL, 0);
}
-EXPORT_SYMBOL(kvmppc_core_queue_syscall);
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{
/* might as well deliver this straight away */
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
}
-EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
{
@@ -235,19 +231,16 @@ void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
{
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
}
-EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
{
return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
}
-EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
{
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
}
-EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
@@ -290,13 +283,11 @@ void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
kvmppc_set_dsisr(vcpu, flags);
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
}
-EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
{
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
}
-EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
unsigned int priority)
@@ -428,7 +419,6 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
return 0;
}
-EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
bool *writable)
@@ -454,7 +444,6 @@ kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
}
-EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
@@ -501,7 +490,6 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
else
return EMULATE_AGAIN;
}
-EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
{
@@ -788,7 +776,6 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
{
vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
}
-EXPORT_SYMBOL_GPL(kvmppc_set_msr);
int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
{
@@ -964,7 +951,6 @@ int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
return H_SUCCESS;
}
-EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
{
@@ -1004,7 +990,6 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
return H_SUCCESS;
}
-EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
int kvmppc_core_check_processor_compat(void)
{
@@ -81,7 +81,6 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
return ret;
}
-EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix);
static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
void *to, void *from, unsigned long n)
@@ -117,14 +116,12 @@ long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
return ret;
}
-EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix);
long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
unsigned long n)
{
return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
}
-EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix);
int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, u64 root,
@@ -606,7 +606,6 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
return ret;
}
-EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
unsigned long liobn, unsigned long ioba,
@@ -703,7 +702,6 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
return ret;
}
-EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
unsigned long liobn, unsigned long ioba,
@@ -752,4 +750,3 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
return ret;
}
-EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
@@ -294,7 +294,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
*/
return rc;
}
-EXPORT_SYMBOL_GPL(kvmppc_rtas_hcall);
void kvmppc_rtas_tokens_free(struct kvm *kvm)
{
@@ -870,7 +870,6 @@ int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
return H_SUCCESS;
}
-EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete);
int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
{
@@ -917,7 +916,6 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
return rc;
}
-EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
/* -- Initialisation code etc. -- */
@@ -1498,7 +1496,6 @@ void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
ics->irq_state[idx].host_irq = host_irq;
ics->irq_state[idx].intr_cpu = -1;
}
-EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
unsigned long host_irq)
@@ -1513,4 +1510,3 @@ void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
ics->irq_state[idx].host_irq = 0;
}
-EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);
@@ -126,7 +126,6 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.xive_esc_on = 0;
}
}
-EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
/*
* Pull a vcpu's context from the XIVE on guest exit.
@@ -157,7 +156,6 @@ void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.xive_pushed = 0;
eieio();
}
-EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
{
@@ -191,7 +189,6 @@ void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
}
mb();
}
-EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
/*
* This is a simple trigger for a generic XIVE IRQ. This must
@@ -1016,7 +1013,6 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
return 0;
}
-EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
struct irq_desc *host_desc)
@@ -1097,7 +1093,6 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
return 0;
}
-EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
{
@@ -2169,7 +2164,6 @@ int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
return H_UNSUPPORTED;
}
-EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
{
@@ -304,4 +304,3 @@ int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu)
return emulated;
}
-EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);
@@ -41,9 +41,7 @@
#include "trace.h"
struct kvmppc_ops *kvmppc_hv_ops;
-EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
struct kvmppc_ops *kvmppc_pr_ops;
-EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
@@ -135,7 +133,6 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
local_irq_enable();
return r;
}
-EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
@@ -248,7 +245,6 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
return r;
}
-EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
{
@@ -277,7 +273,6 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
vcpu->arch.sane = r;
return r ? 0 : -EINVAL;
}
-EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
{
@@ -319,7 +314,6 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
return r;
}
-EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data)
@@ -362,7 +356,6 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
return EMULATE_DONE;
}
-EXPORT_SYMBOL_GPL(kvmppc_st);
int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data)
@@ -411,7 +404,6 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
return EMULATE_DONE;
}
-EXPORT_SYMBOL_GPL(kvmppc_ld);
int kvm_arch_hardware_enable(void)
{
@@ -1281,7 +1273,6 @@ int kvmppc_handle_load(struct kvm_vcpu *vcpu,
{
return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
}
-EXPORT_SYMBOL_GPL(kvmppc_handle_load);
/* Same as above, but sign extends */
int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
@@ -1378,7 +1369,6 @@ int kvmppc_handle_store(struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO;
}
-EXPORT_SYMBOL_GPL(kvmppc_handle_store);
#ifdef CONFIG_VSX
static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
@@ -2478,26 +2468,22 @@ long kvmppc_alloc_lpid(void)
return lpid;
}
-EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
void kvmppc_claim_lpid(long lpid)
{
set_bit(lpid, lpid_inuse);
}
-EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
void kvmppc_free_lpid(long lpid)
{
clear_bit(lpid, lpid_inuse);
}
-EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
void kvmppc_init_lpid(unsigned long nr_lpids_param)
{
nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
memset(lpid_inuse, 0, sizeof(lpid_inuse));
}
-EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
int kvm_arch_init(void *opaque)
{
Now that we have only one kvm module, we can stop exporting some symbols. Signed-off-by: Fabiano Rosas <farosas@linux.ibm.com> --- arch/powerpc/kvm/book3s.c | 15 --------------- arch/powerpc/kvm/book3s_64_mmu_radix.c | 3 --- arch/powerpc/kvm/book3s_64_vio.c | 3 --- arch/powerpc/kvm/book3s_rtas.c | 1 - arch/powerpc/kvm/book3s_xics.c | 4 ---- arch/powerpc/kvm/book3s_xive.c | 6 ------ arch/powerpc/kvm/emulate.c | 1 - arch/powerpc/kvm/powerpc.c | 14 -------------- 8 files changed, 47 deletions(-)