KVM: ia64: Clean up vmm_ivt.S using tab to indent every line

Using tab for indentation for vmm_ivt.S.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Xiantao Zhang
2008-11-21 21:04:37 +08:00
committed by Avi Kivity
parent 9f7d5bb5e2
commit 8fe0736763

View File

@@ -1,5 +1,5 @@
/* /*
* /ia64/kvm_ivt.S * arch/ia64/kvm/vmm_ivt.S
* *
* Copyright (C) 1998-2001, 2003 Hewlett-Packard Co * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
@@ -115,7 +115,6 @@ ENTRY(kvm_vhpt_miss)
KVM_FAULT(0) KVM_FAULT(0)
END(kvm_vhpt_miss) END(kvm_vhpt_miss)
.org kvm_ia64_ivt+0x400 .org kvm_ia64_ivt+0x400
//////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////
// 0x0400 Entry 1 (size 64 bundles) ITLB (21) // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
@@ -240,7 +239,7 @@ ENTRY(kvm_break_fault)
;; ;;
KVM_SAVE_MIN_WITH_COVER_R19 KVM_SAVE_MIN_WITH_COVER_R19
;; ;;
alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!)
mov out0=cr.ifa mov out0=cr.ifa
mov out2=cr.isr // FIXME: pity to make this slow access twice mov out2=cr.isr // FIXME: pity to make this slow access twice
mov out3=cr.iim // FIXME: pity to make this slow access twice mov out3=cr.iim // FIXME: pity to make this slow access twice
@@ -456,13 +455,11 @@ END(kvm_virtual_exirq)
KVM_FAULT(14) KVM_FAULT(14)
// this code segment is from 2.6.16.13 // this code segment is from 2.6.16.13
.org kvm_ia64_ivt+0x3c00 .org kvm_ia64_ivt+0x3c00
/////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////
// 0x3c00 Entry 15 (size 64 bundles) Reserved // 0x3c00 Entry 15 (size 64 bundles) Reserved
KVM_FAULT(15) KVM_FAULT(15)
.org kvm_ia64_ivt+0x4000 .org kvm_ia64_ivt+0x4000
/////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////
// 0x4000 Entry 16 (size 64 bundles) Reserved // 0x4000 Entry 16 (size 64 bundles) Reserved
@@ -893,7 +890,7 @@ ENTRY(kvm_dispatch_virtualization_fault)
;; ;;
KVM_SAVE_MIN_WITH_COVER_R19 KVM_SAVE_MIN_WITH_COVER_R19
;; ;;
alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!)
mov out0=r13 //vcpu mov out0=r13 //vcpu
adds r3=8,r2 // set up second base pointer adds r3=8,r2 // set up second base pointer
;; ;;
@@ -917,7 +914,6 @@ ENTRY(kvm_dispatch_interrupt)
KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
;; ;;
alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
//mov out0=cr.ivr // pass cr.ivr as first arg
adds r3=8,r2 // set up second base pointer for SAVE_REST adds r3=8,r2 // set up second base pointer for SAVE_REST
;; ;;
ssm psr.ic ssm psr.ic
@@ -934,9 +930,6 @@ ENTRY(kvm_dispatch_interrupt)
br.call.sptk.many b6=kvm_ia64_handle_irq br.call.sptk.many b6=kvm_ia64_handle_irq
END(kvm_dispatch_interrupt) END(kvm_dispatch_interrupt)
GLOBAL_ENTRY(ia64_leave_nested) GLOBAL_ENTRY(ia64_leave_nested)
rsm psr.i rsm psr.i
;; ;;
@@ -1065,13 +1058,10 @@ GLOBAL_ENTRY(ia64_leave_nested)
rfi rfi
END(ia64_leave_nested) END(ia64_leave_nested)
GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
/* /*
* work.need_resched etc. mustn't get changed * work.need_resched etc. mustn't get changed
*by this CPU before it returns to *by this CPU before it returns to
;;
* user- or fsys-mode, hence we disable interrupts early on: * user- or fsys-mode, hence we disable interrupts early on:
*/ */
adds r2 = PT(R4)+16,r12 adds r2 = PT(R4)+16,r12
@@ -1298,8 +1288,6 @@ GLOBAL_ENTRY(ia64_vmm_entry)
;; ;;
END(ia64_vmm_entry) END(ia64_vmm_entry)
/* /*
* extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
* u64 arg3, u64 arg4, u64 arg5, * u64 arg3, u64 arg4, u64 arg5,