[coreboot] r3586 - trunk/coreboot-v2/src/northbridge/amd/amdk8

svn at coreboot.org svn at coreboot.org
Fri Sep 19 23:19:46 CEST 2008


Author: mjones
Date: 2008-09-19 23:19:46 +0200 (Fri, 19 Sep 2008)
New Revision: 3586

Modified:
   trunk/coreboot-v2/src/northbridge/amd/amdk8/raminit_f.c
Log:
Whitespace and style cleanup. (trivial)

Signed-off-by: Marc Jones <marc.jones at amd.com>
Acked-by: Marc Jones <marc.jones at amd.com>



Modified: trunk/coreboot-v2/src/northbridge/amd/amdk8/raminit_f.c
===================================================================
--- trunk/coreboot-v2/src/northbridge/amd/amdk8/raminit_f.c	2008-09-19 20:16:25 UTC (rev 3585)
+++ trunk/coreboot-v2/src/northbridge/amd/amdk8/raminit_f.c	2008-09-19 21:19:46 UTC (rev 3586)
@@ -36,9 +36,9 @@
 static inline void print_raminit(const char *strval, uint32_t val)
 {
 #if CONFIG_USE_PRINTK_IN_CAR
-        printk_debug("%s%08x\r\n", strval, val);
+	printk_debug("%s%08x\r\n", strval, val);
 #else
-        print_debug(strval); print_debug_hex32(val); print_debug("\r\n");
+	print_debug(strval); print_debug_hex32(val); print_debug("\r\n");
 #endif
 }
 
@@ -55,7 +55,7 @@
 static inline void print_t(const char *strval)
 {
 #if RAM_TIMING_DEBUG == 1
-        print_debug(strval); 
+	print_debug(strval);
 #endif
 }
 
@@ -68,11 +68,12 @@
 #include "amdk8_f_pci.c"
 
 
-	// for PCI_ADDR(0, 0x18, 2, 0x98) index, and PCI_ADDR(0x, 0x18, 2, 0x9c) data
+	/* for PCI_ADDR(0, 0x18, 2, 0x98) index,
+	 and PCI_ADDR(0x, 0x18, 2, 0x9c) data */
 	/*
-		index: 
+		index:
 		[29: 0] DctOffset (Dram Controller Offset)
-		[30:30] DctAccessWrite (Dram Controller Read/Write Select)	
+		[30:30] DctAccessWrite (Dram Controller Read/Write Select)
 			0 = read access
 			1 = write access
 		[31:31] DctAccessDone (Dram Controller Access Done)
@@ -81,23 +82,23 @@
 
 		Data:
 		[31: 0] DctOffsetData (Dram Controller Offset Data)
-		
+
 		Read:
-			- Write the register num to DctOffset with DctAccessWrite = 0
+			- Write the register num to DctOffset with
+			  DctAccessWrite = 0
 			- poll the DctAccessDone until it = 1
 			- Read the data from DctOffsetData
 		Write:
 			- Write the data to DctOffsetData
 			- Write register num to DctOffset with DctAccessWrite = 1
 			- poll the DctAccessDone untio it = 1
-		
 	*/
 
 
 static void setup_resource_map(const unsigned int *register_values, int max)
 {
 	int i;
-	for(i = 0; i < max; i += 3) {
+	for (i = 0; i < max; i += 3) {
 		device_t dev;
 		unsigned where;
 		unsigned long reg;
@@ -112,14 +113,15 @@
 
 static int controller_present(const struct mem_controller *ctrl)
 {
-        return pci_read_config32(ctrl->f0, 0) == 0x11001022;
+	return pci_read_config32(ctrl->f0, 0) == 0x11001022;
 }
 
 static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo)
 {
 	static const unsigned int register_values[] = {
 
-	/* Careful set limit registers before base registers which contain the enables */
+	/* Careful set limit registers before base registers which
+	   contain the enables */
 	/* DRAM Limit i Registers
 	 * F1:0x44 i = 0
 	 * F1:0x4C i = 1
@@ -238,67 +240,82 @@
 	 * [28:19] Address Mask (36-27)
 	 *	   The bits with an address mask of 1 are excluded from address comparison
 	 * [31:29] Reserved
-	 * 
+	 *
 	 */
 	PCI_ADDR(0, 0x18, 2, 0x60), 0xe007c01f, 0x00000000,
 	PCI_ADDR(0, 0x18, 2, 0x64), 0xe007c01f, 0x00000000,
 	PCI_ADDR(0, 0x18, 2, 0x68), 0xe007c01f, 0x00000000,
 	PCI_ADDR(0, 0x18, 2, 0x6C), 0xe007c01f, 0x00000000,
 
-        /* DRAM Control Register
-         * F2:0x78
-         * [ 3: 0] RdPtrInit ( Read Pointer Initial Value)
+	/* DRAM Control Register
+	 * F2:0x78
+	 * [ 3: 0] RdPtrInit ( Read Pointer Initial Value)
 	 * 	0x03-0x00: reserved
 	 * [ 6: 4] RdPadRcvFifoDly (Read Delay from Pad Receive FIFO)
 	 *	000 = reserved
 	 *	001 = reserved
-	 *      010 = 1.5 Memory Clocks
+	 *	010 = 1.5 Memory Clocks
 	 *	011 = 2 Memory Clocks
 	 *	100 = 2.5 Memory Clocks
 	 *	101 = 3 Memory Clocks
 	 *	110 = 3.5 Memory Clocks
 	 *	111 = Reseved
-         * [15: 7] Reserved
-         * [16:16] AltVidC3MemClkTriEn (AltVID Memory Clock Tristate Enable)
-	 *	Enables the DDR memory clocks to be tristated when alternate VID mode is enabled. This bit has no effect if the DisNbClkRamp bit (F3, 0x88) is set
+	 * [15: 7] Reserved
+	 * [16:16] AltVidC3MemClkTriEn (AltVID Memory Clock Tristate Enable)
+	 *	Enables the DDR memory clocks to be tristated when alternate VID
+	 *	mode is enabled. This bit has no effect if the DisNbClkRamp bit
+	 *	(F3, 0x88) is set
 	 * [17:17] DllTempAdjTime (DLL Temperature Adjust Cycle Time)
 	 *	0 = 5 ms
 	 *	1 = 1 ms
 	 * [18:18] DqsRcvEnTrain (DQS Receiver Enable Training Mode)
 	 *	0 = Normal DQS Receiver enable operation
 	 *	1 = DQS receiver enable training mode
-         * [31:19] reverved
-        */
-        PCI_ADDR(0, 0x18, 2, 0x78), 0xfff80000, (6<<4)|(6<<0),
+	  * [31:19] reverved
+	 */
+	PCI_ADDR(0, 0x18, 2, 0x78), 0xfff80000, (6<<4)|(6<<0),
 
-        /* DRAM Initialization Register
-         * F2:0x7C
-         * [15: 0] MrsAddress (Address for MRS/EMRS Commands)
-         *      this field specifies the dsata driven on the DRAM address pins 15-0 for MRS and EMRS commands
-         * [18:16] MrsBank (Bank Address for MRS/EMRS Commands)
-	 *	this files specifies the data driven on the DRAM bank pins for the MRS and EMRS commands
+	/* DRAM Initialization Register
+	 * F2:0x7C
+	 * [15: 0] MrsAddress (Address for MRS/EMRS Commands)
+	 *	this field specifies the dsata driven on the DRAM address pins
+	 *	15-0 for MRS and EMRS commands
+	 * [18:16] MrsBank (Bank Address for MRS/EMRS Commands)
+	 *	this files specifies the data driven on the DRAM bank pins for
+	 *	the MRS and EMRS commands
 	 * [23:19] reverved
-         * [24:24] SendPchgAll (Send Precharge All Command)
-	 *	Setting this bit causes the DRAM controller to send a precharge all command. This bit is cleared by the hardware after the command completes
+	 * [24:24] SendPchgAll (Send Precharge All Command)
+	 *	Setting this bit causes the DRAM controller to send a precharge
+	 *	all command. This bit is cleared by the hardware after the
+	 *	command completes
 	 * [25:25] SendAutoRefresh (Send Auto Refresh Command)
-	 *	Setting this bit causes the DRAM controller to send an auto refresh command. This bit is cleared by the hardware after the command completes
+	 *	Setting this bit causes the DRAM controller to send an auto
+	 *	refresh command. This bit is cleared by the hardware after the
+	 *	command completes
 	 * [26:26] SendMrsCmd (Send MRS/EMRS Command)
-	 *	Setting this bit causes the DRAM controller to send the MRS or EMRS command defined by the MrsAddress and MrsBank fields. This bit is cleared by the hardware adter the commmand completes
+	 *	Setting this bit causes the DRAM controller to send the MRS or
+	 *	EMRS command defined by the MrsAddress and MrsBank fields. This
+	 *	bit is cleared by the hardware adter the commmand completes
 	 * [27:27] DeassertMemRstX (De-assert Memory Reset)
-	 * 	Setting this bit causes the DRAM controller to de-assert the memory reset pin. This bit cannot be used to assert the memory reset pin
+	 * 	Setting this bit causes the DRAM controller to de-assert the
+	 *	memory reset pin. This bit cannot be used to assert the memory
+	 *	reset pin
 	 * [28:28] AssertCke (Assert CKE)
-	 *	setting this bit causes the DRAM controller to assert the CKE pins. This bit cannot be used to de-assert the CKE pins
+	 *	setting this bit causes the DRAM controller to assert the CKE
+	 *	pins. This bit cannot be used to de-assert the CKE pins
 	 * [30:29] reverved
 	 * [31:31] EnDramInit (Enable DRAM Initialization)
-	 *	Setting this bit puts the DRAM controller in a BIOS controlled DRAM initialization mode. BIOS must clear this bit aster DRAM initialization is complete.
-        */
-//        PCI_ADDR(0, 0x18, 2, 0x7C), 0x60f80000, 0, 
+	 *	Setting this bit puts the DRAM controller in a BIOS controlled
+	 *	DRAM initialization mode. BIOS must clear this bit aster DRAM
+	 *	initialization is complete.
+	 */
+//	PCI_ADDR(0, 0x18, 2, 0x7C), 0x60f80000, 0,
 
 
 	/* DRAM Bank Address Mapping Register
 	 * F2:0x80
 	 * Specify the memory module size
-	 * [ 3: 0] CS1/0 
+	 * [ 3: 0] CS1/0
 	 * [ 7: 4] CS3/2
 	 * [11: 8] CS5/4
 	 * [15:12] CS7/6
@@ -316,7 +333,7 @@
 	  9:  15     11     3    :4G
 	 10:  16     10     3    :4G
 	 11:  16     11     3    :8G
-	*/
+	 */
 	PCI_ADDR(0, 0x18, 2, 0x80), 0xffff0000, 0x00000000,
 	/* DRAM Timing Low Register
 	 * F2:0x88
@@ -335,7 +352,7 @@
 	 *	   01 = 4 clocks
 	 *	   10 = 5 clocks
 	 *	   11 = 6 clocks
-         * [ 7: 6] Reserved
+	 * [ 7: 6] Reserved
 	 * [ 9: 8] Trp (Row Precharge Time, Precharge-to-Active or Auto-Refresh)
 	 *	   00 = 3 clocks
 	 *	   01 = 4 clocks
@@ -344,36 +361,39 @@
 	 * [10:10] Reserved
 	 * [11:11] Trtp (Read to Precharge Time, read Cas# to precharge time)
 	 *	   0 = 2 clocks for Burst Length of 32 Bytes
-	 *             4 clocks for Burst Length of 64 Bytes
-         *         1 = 3 clocks for Burst Length of 32 Bytes
-         *             5 clocks for Burst Length of 64 Bytes
+	 *	       4 clocks for Burst Length of 64 Bytes
+	 *	   1 = 3 clocks for Burst Length of 32 Bytes
+	 *	       5 clocks for Burst Length of 64 Bytes
 	 * [15:12] Tras (Minimum Ras# Active Time)
-         *         0000 = reserved
+	 *	   0000 = reserved
 	 *	   0001 = reserved
-         *         0010 = 5 bus clocks
-         *         ...
-         *         1111 = 18 bus clocks
-	 * [19:16] Trc (Row Cycle Time, Ras#-active to Ras#-active or auto refresh of the same bank)
+	 *	   0010 = 5 bus clocks
+	 *	   ...
+	 *	   1111 = 18 bus clocks
+	 * [19:16] Trc (Row Cycle Time, Ras#-active to Ras#-active or auto
+	 * refresh of the same bank)
 	 *	   0000 = 11 bus clocks
 	 *	   0010 = 12 bus clocks
-         *	   ...
+	 *	   ...
 	 *	   1110 = 25 bus clocks
 	 *	   1111 = 26 bus clocks
-	 * [21:20] Twr (Write Recovery Time, From the last data to precharge, writes can go back-to-back) 
-         *         00 = 3 bus clocks
-         *         01 = 4 bus clocks
-         *         10 = 5 bus clocks
-         *         11 = 6 bus clocks
-         * [23:22] Trrd (Active-to-active (Ras#-to-Ras#) Delay of different banks)
-         *         00 = 2 bus clocks
-         *         01 = 3 bus clocks
-         *         10 = 4 bus clocks
-         *         11 = 5 bus clocks
-	 * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel A, BIOS should set it to reduce the power consumption)
-         *         Bit		F(1207)		M2 Package	S1g1 Package
-	 *          0           N/A             MA1_CLK1	N/A
+	 * [21:20] Twr (Write Recovery Time, From the last data to precharge,
+	 * writes can go back-to-back)
+	 *	   00 = 3 bus clocks
+	 *	   01 = 4 bus clocks
+	 *	   10 = 5 bus clocks
+	 *	   11 = 6 bus clocks
+	 * [23:22] Trrd (Active-to-active(Ras#-to-Ras#) Delay of different banks)
+	 *	   00 = 2 bus clocks
+	 *	   01 = 3 bus clocks
+	 *	   10 = 4 bus clocks
+	 *	   11 = 5 bus clocks
+	 * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel A,
+	 * BIOS should set it to reduce the power consumption)
+	 *	  Bit		F(1207)		M2 Package	S1g1 Package
+	 *	    0		N/A		MA1_CLK1	N/A
 	 *	    1		N/A		MA0_CLK1	MA0_CLK1
-         *	    2		MA3_CLK		N/A		N/A
+	 *	    2		MA3_CLK		N/A		N/A
 	 *	    3		MA2_CLK		N/A		N/A
 	 *	    4		MA1_CLK		MA1_CLK0	N/A
 	 *	    5		MA0_CLK		MA0_CLK0	MA0_CLK0
@@ -394,84 +414,95 @@
 	 *	   110 = 8 bus clocks
 	 *	   111 = 9 bus clocks
 	 * [ 7: 7] Reserved
-	 * [ 9: 8] Twtr (Internal DRAM Write-to-Read Command Delay, minium write-to-read delay when both access the same chip select)
-         *         00 = Reserved
-         *         01 = 1 bus clocks
-         *         10 = 2 bus clocks
-         *         11 = 3 bus clocks
-	 * [11:10] Twrrd (Write to Read DIMM Termination Turnaround, minimum write-to-read delay when accessing two different DIMMs)
-         *         00 = 0 bus clocks
-         *         01 = 1 bus clocks
-         *         10 = 2 bus clocks
-         *         11 = 3 bus clocks
-         * [13:12] Twrwr (Write to Write Timing)
-         *         00 = 1 bus clocks ( 0 idle cycle on the bus)
-         *         01 = 2 bus clocks ( 1 idle cycle on the bus)
-         *         10 = 3 bus clocks ( 2 idle cycles on the bus)
-         *         11 = Reserved
-         * [15:14] Trdrd ( Read to Read Timing)
-         *         00 = 2 bus clocks ( 1 idle cycle on the bus)
-         *         01 = 3 bus clocks ( 2 idle cycles on the bus)
-         *         10 = 4 bus clocks ( 3 idle cycles on the bus)
-         *         11 = 5 bus clocks ( 4 idel cycles on the bus)
-         * [17:16] Tref (Refresh Rate)
-         *         00 = Undefined behavior
-         *         01 = Reserved
-         *         10 = Refresh interval of 7.8 microseconds
-         *         11 = Refresh interval of 3.9 microseconds
+	 * [ 9: 8] Twtr (Internal DRAM Write-to-Read Command Delay, 
+	 * minium write-to-read delay when both access the same chip select)
+	 *	   00 = Reserved
+	 *	   01 = 1 bus clocks
+	 *	   10 = 2 bus clocks
+	 *	   11 = 3 bus clocks
+	 * [11:10] Twrrd (Write to Read DIMM Termination Turnaround, minimum
+	 * write-to-read delay when accessing two different DIMMs)
+	 *	   00 = 0 bus clocks
+	 *	   01 = 1 bus clocks
+	 *	   10 = 2 bus clocks
+	 *	   11 = 3 bus clocks
+	 * [13:12] Twrwr (Write to Write Timing)
+	 *	   00 = 1 bus clocks ( 0 idle cycle on the bus)
+	 *	   01 = 2 bus clocks ( 1 idle cycle on the bus)
+	 *	   10 = 3 bus clocks ( 2 idle cycles on the bus)
+	 *	   11 = Reserved
+	 * [15:14] Trdrd ( Read to Read Timing)
+	 *	   00 = 2 bus clocks ( 1 idle cycle on the bus)
+	 *	   01 = 3 bus clocks ( 2 idle cycles on the bus)
+	 *	   10 = 4 bus clocks ( 3 idle cycles on the bus)
+	 *	   11 = 5 bus clocks ( 4 idel cycles on the bus)
+	 * [17:16] Tref (Refresh Rate)
+	 *	   00 = Undefined behavior
+	 *	   01 = Reserved
+	 *	   10 = Refresh interval of 7.8 microseconds
+	 *	   11 = Refresh interval of 3.9 microseconds
 	 * [19:18] Reserved
-	 * [22:20] Trfc0 ( Auto-Refresh Row Cycle Time for the Logical DIMM0, based on DRAM density and speed)
-         *         000 = 75 ns (all speeds, 256Mbit)
-         *         001 = 105 ns (all speeds, 512Mbit)
-         *         010 = 127.5 ns (all speeds, 1Gbit)
-         *         011 = 195 ns (all speeds, 2Gbit)
-         *         100 = 327.5 ns (all speeds, 4Gbit)
-         *         101 = reserved
-         *         110 = reserved
-         *         111 = reserved
-	 * [25:23] Trfc1 ( Auto-Refresh Row Cycle Time for the Logical DIMM1, based on DRAM density and speed)
-	 * [28:26] Trfc2 ( Auto-Refresh Row Cycle Time for the Logical DIMM2, based on DRAM density and speed)
-	 * [31:29] Trfc3 ( Auto-Refresh Row Cycle Time for the Logical DIMM3, based on DRAM density and speed)
+	 * [22:20] Trfc0 ( Auto-Refresh Row Cycle Time for the Logical DIMM0,
+	 *	based on DRAM density and speed)
+	 *	   000 = 75 ns (all speeds, 256Mbit)
+	 *	   001 = 105 ns (all speeds, 512Mbit)
+	 *	   010 = 127.5 ns (all speeds, 1Gbit)
+	 *	   011 = 195 ns (all speeds, 2Gbit)
+	 *	   100 = 327.5 ns (all speeds, 4Gbit)
+	 *	   101 = reserved
+	 *	   110 = reserved
+	 *	   111 = reserved
+	 * [25:23] Trfc1 ( Auto-Refresh Row Cycle Time for the Logical DIMM1,
+	 *	based on DRAM density and speed)
+	 * [28:26] Trfc2 ( Auto-Refresh Row Cycle Time for the Logical DIMM2,
+	 *	based on DRAM density and speed)
+	 * [31:29] Trfc3 ( Auto-Refresh Row Cycle Time for the Logical DIMM3,
+	 *	based on DRAM density and speed)
 	 */
 	PCI_ADDR(0, 0x18, 2, 0x8c), 0x000c008f, (2 << 16)|(1 << 8),
 	/* DRAM Config Low Register
 	 * F2:0x90
 	 * [ 0: 0] InitDram (Initialize DRAM)
-	 *	   1 = write 1 cause DRAM controller to execute the DRAM initialization, when done it read to 0
+	 *	   1 = write 1 cause DRAM controller to execute the DRAM
+	 *	       initialization, when done it read to 0
 	 * [ 1: 1] ExitSelfRef ( Exit Self Refresh Command )
-	 *	   1 = write 1 causes the DRAM controller to bring the DRAMs out fo self refresh mode
+	 *	   1 = write 1 causes the DRAM controller to bring the DRAMs out
+	 *	       for self refresh mode
 	 * [ 3: 2] Reserved
 	 * [ 5: 4] DramTerm (DRAM Termination)
-         *         00 = On die termination disabled
-         *         01 = 75 ohms
-         *         10 = 150 ohms
-         *         11 = 50 ohms
+	 *	   00 = On die termination disabled
+	 *	   01 = 75 ohms
+	 *	   10 = 150 ohms
+	 *	   11 = 50 ohms
 	 * [ 6: 6] Reserved
 	 * [ 7: 7] DramDrvWeak ( DRAM Drivers Weak Mode)
 	 *	   0 = Normal drive strength mode.
 	 *	   1 = Weak drive strength mode
 	 * [ 8: 8] ParEn (Parity Enable)
-	 *	   1 = Enable address parity computation output, PAR, and enables the parity error input, ERR
-         * [ 9: 9] SelfRefRateEn (Faster Self Refresh Rate Enable)
-         *         1 = Enable high temperature ( two times normal ) self refresh rate
+	 *	   1 = Enable address parity computation output, PAR,
+	 *	       and enables the parity error input, ERR
+	 * [ 9: 9] SelfRefRateEn (Faster Self Refresh Rate Enable)
+	 * 	  1 = Enable high temperature ( two times normal )
+	 *	      self refresh rate
 	 * [10:10] BurstLength32 ( DRAM Burst Length Set for 32 Bytes)
 	 *	   0 = 64-byte mode
 	 *	   1 = 32-byte mode
-	 * [11:11] Width128 ( Width of DRAM interface) 
+	 * [11:11] Width128 ( Width of DRAM interface)
 	 *	   0 = the controller DRAM interface is 64-bits wide
 	 *	   1 = the controller DRAM interface is 128-bits wide
-         * [12:12] X4Dimm (DIMM 0 is x4)
-         * [13:13] X4Dimm (DIMM 1 is x4)
-         * [14:14] X4Dimm (DIMM 2 is x4)
-         * [15:15] X4Dimm (DIMM 3 is x4)
-         *         0 = DIMM is not x4
-         *         1 = x4 DIMM present
+	 * [12:12] X4Dimm (DIMM 0 is x4)
+	 * [13:13] X4Dimm (DIMM 1 is x4)
+	 * [14:14] X4Dimm (DIMM 2 is x4)
+	 * [15:15] X4Dimm (DIMM 3 is x4)
+	 *         0 = DIMM is not x4
+	 *         1 = x4 DIMM present
 	 * [16:16] UnBuffDimm ( Unbuffered DIMMs)
 	 *	   0 = Buffered DIMMs
 	 *	   1 = Unbuffered DIMMs
 	 * [18:17] Reserved
 	 * [19:19] DimmEccEn ( DIMM ECC Enable )
-		   1 =  ECC checking is being enabled for all DIMMs on the DRAM controller ( Through F3 0x44[EccEn])
+	 *	   1 =  ECC checking is being enabled for all DIMMs on the DRAM
+	 *	        controller ( Through F3 0x44[EccEn])
 	 * [31:20] Reserved
 	 */
 	PCI_ADDR(0, 0x18, 2, 0x90), 0xfff6004c, 0x00000010,
@@ -481,96 +512,121 @@
 	 *	   000 = 200MHz
 	 *	   001 = 266MHz
 	 *	   010 = 333MHz
-         *	   011 = reserved
+	 *	   011 = reserved
 	 *	   1xx = reserved
 	 * [ 3: 3] MemClkFreqVal (Memory Clock Freqency Valid)
-	 *	   1 = BIOS need to set the bit when setting up MemClkFreq to the proper value  
+	 *	   1 = BIOS need to set the bit when setting up MemClkFreq to
+	 *	       the proper value
 	 * [ 7: 4] MaxAsyncLat ( Maximum Asynchronous Latency)
-         *         0000 = 0 ns
-         *         ...
-         *         1111 = 15 ns
-         * [11: 8] Reserved
-         * [12:12] RDqsEn ( Read DQS Enable) This bit is only be set if x8 registered DIMMs are present in the system
-         *         0 = DM pins function as data mask pins
-         *         1 = DM pins function as read DQS pins
-         * [13:13] Reserved
-         * [14:14] DisDramInterface ( Disable the DRAM interface ) When this bit is set, the DRAM controller is disabled, and interface in low power state
-         *         0 = Enabled (default)
-         *         1 = Disabled
-         * [15:15] PowerDownEn ( Power Down Mode Enable ) 
-         *         0 = Disabled (default)
-         *         1 = Enabled
-         * [16:16] PowerDown ( Power Down Mode )
-         *         0 = Channel CKE Control
-         *         1 = Chip Select CKE Control
-         * [17:17] FourRankSODimm (Four Rank SO-DIMM) 
-         *         1 = this bit is set by BIOS to indicate that a four rank SO-DIMM is present
-         * [18:18] FourRankRDimm (Four Rank Registered DIMM)
-         *         1 = this bit is set by BIOS to indicate that a four rank registered DIMM is present
+	 *         0000 = 0 ns
+	 *         ...
+	 *         1111 = 15 ns
+	 * [11: 8] Reserved
+	 * [12:12] RDqsEn ( Read DQS Enable) This bit is only be set if x8
+	 *	   registered DIMMs are present in the system
+	 *	   0 = DM pins function as data mask pins
+	 *	   1 = DM pins function as read DQS pins
+	 * [13:13] Reserved
+	 * [14:14] DisDramInterface ( Disable the DRAM interface ) When this bit
+	 * is set, the DRAM controller is disabled, and interface in low power
+	 * state
+	 *	   0 = Enabled (default)
+	 *	   1 = Disabled
+	 * [15:15] PowerDownEn ( Power Down Mode Enable )
+	 *	   0 = Disabled (default)
+	 *	   1 = Enabled
+	 * [16:16] PowerDown ( Power Down Mode )
+	 *	   0 = Channel CKE Control
+	 *	   1 = Chip Select CKE Control
+	 * [17:17] FourRankSODimm (Four Rank SO-DIMM)
+	 *	   1 = this bit is set by BIOS to indicate that a four rank
+	 *	       SO-DIMM is present
+	 * [18:18] FourRankRDimm (Four Rank Registered DIMM)
+	 *         1 = this bit is set by BIOS to indicate that a four rank
+	 *	       registered DIMM is present
 	 * [19:19] Reserved
-         * [20:20] SlowAccessMode (Slow Access Mode (2T Mode))
-         *         0 = DRAM address and control signals are driven for one MEMCLK cycle
-         *         1 = One additional MEMCLK of setup time is provided on all DRAM address and control signals except CS, CKE, and ODT; i.e., these signals are drivern for two MEMCLK cycles rather than one
- 	 * [21:21] Reserved
-         * [22:22] BankSwizzleMode ( Bank Swizzle Mode), 
-         *         0 = Disabled (default)
-         *         1 = Enabled
-         * [23:23] Reserved
+	 * [20:20] SlowAccessMode (Slow Access Mode (2T Mode))
+	 *	   0 = DRAM address and control signals are driven for one 
+	 *	       MEMCLK cycle
+	 *	   1 = One additional MEMCLK of setup time is provided on all
+	 *	       DRAM address and control signals except CS, CKE, and ODT;
+	 *	       i.e., these signals are drivern for two MEMCLK cycles
+	 *	       rather than one
+	 * [21:21] Reserved
+	 * [22:22] BankSwizzleMode ( Bank Swizzle Mode),
+	 *         0 = Disabled (default)
+	 *         1 = Enabled
+	 * [23:23] Reserved
 	 * [27:24] DcqBypassMax ( DRAM Controller Queue Bypass Maximum)
 	 *	   0000 = No bypass; the oldest request is never bypassed
 	 *	   0001 = The oldest request may be bypassed no more than 1 time
 	 *	   ...
-	 *	   1111 = The oldest request may be bypassed no more than 15 times
-         * [31:28] FourActWindow ( Four Bank Activate Window) , not more than 4 banks in a 8 bank device are activated
-         *         0000 = No tFAW window restriction 
-         *         0001 = 8 MEMCLK cycles
-         *	   0010 = 9 MEMCLK cycles
-         *         ...
-         *         1101 = 20 MEMCLK cycles
+	 *	   1111 = The oldest request may be bypassed no more than 15\
+	 *	          times
+	 * [31:28] FourActWindow ( Four Bank Activate Window) , not more than
+	 *	   4 banks in a 8 bank device are activated
+	 *	   0000 = No tFAW window restriction
+	 *	   0001 = 8 MEMCLK cycles
+	 *	   0010 = 9 MEMCLK cycles
+	 *	   ...
+	 *	   1101 = 20 MEMCLK cycles
 	 *	   111x = reserved
 	 */
 	PCI_ADDR(0, 0x18, 2, 0x94), 0x00a82f00,0x00008000,
 	/* DRAM Delay Line Register
 	 * F2:0xa0
-	 * [ 0: 0] MemClrStatus (Memory Clear Status) :    ---------Readonly 
-	 *	   when set, this bit indicates that the memory clear function is complete. Only clear by reset. BIOS should not write or read the DRAM until this bit is set by hardware
+	 * [ 0: 0] MemClrStatus (Memory Clear Status) : Readonly
+	 *	   when set, this bit indicates that the memory clear function
+	 *	   is complete. Only clear by reset. BIOS should not write or
+	 *	   read the DRAM until this bit is set by hardware
 	 * [ 1: 1] DisableJitter ( Disable Jitter)
-	 *	   When set the DDR compensation circuit will not change the values unless the change is more than one step from the current value 
+	 *	   When set the DDR compensation circuit will not change the
+	 *	   values unless the change is more than one step from the
+	 *	   current value
 	 * [ 3: 2] RdWrQByp ( Read/Write Queue Bypass Count)
 	 *	   00 = 2
 	 *	   01 = 4
 	 *	   10 = 8
 	 *	   11 = 16
 	 * [ 4: 4] Mode64BitMux (Mismatched DIMM Support Enable)
-	 *	   1 When bit enables support for mismatched DIMMs when using 128-bit DRAM interface, the Width128 no effect, only for M2 and s1g1
+	 *	   1 When bit enables support for mismatched DIMMs when using
+	 *	   128-bit DRAM interface, the Width128 no effect, only for
+	 *	   AM2 and s1g1
 	 * [ 5: 5] DCC_EN ( Dynamica Idle Cycle Counter Enable)
-	 *	   When set to 1, indicates that each entry in the page tables dynamically adjusts the idle cycle limit based on page Conflict/Page Miss (PC/PM) traffic
+	 *	   When set to 1, indicates that each entry in the page tables
+	 *	   dynamically adjusts the idle cycle limit based on page
+	 *	    Conflict/Page Miss (PC/PM) traffic
 	 * [ 8: 6] ILD_lmt ( Idle Cycle Limit)
 	 *	   000 = 0 cycles
 	 *	   001 = 4 cycles
 	 *	   010 = 8 cycles
 	 *	   011 = 16 cycles
 	 *	   100 = 32 cycles
-	 * 	   101 = 64 cycles
+	 *	   101 = 64 cycles
 	 *	   110 = 128 cycles
-	 *	   111 = 256 cycles 
+	 *	   111 = 256 cycles
 	 * [ 9: 9] DramEnabled ( DRAM Enabled)
-	 *	   When Set, this bit indicates that the DRAM is enabled, this bit is set by hardware after DRAM initialization or on an exit from self refresh. The DRAM controller is intialized after the
-	 *	   hardware-controlled initialization process ( initiated by the F2 0x90[DramInit]) completes or when the BIOS-controlled initialization process completes (F2 0x7c(EnDramInit] is 
+	 *	   When Set, this bit indicates that the DRAM is enabled, this
+	 *	   bit is set by hardware after DRAM initialization or on an exit
+	 *	   from self refresh. The DRAM controller is intialized after the
+	 *	   hardware-controlled initialization process ( initiated by the
+	 *	   F2 0x90[DramInit]) completes or when the BIOS-controlled
+	 *	   initialization process completes (F2 0x7c(EnDramInit] is
 	 *	   written from 1 to 0)
 	 * [23:10] Reserved
-         * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel B, BIOS should set it to reduce the power consumption)
-         *         Bit          F(1207)         M2 Package      S1g1 Package
-         *          0           N/A             MA1_CLK1        N/A
-         *          1           N/A             MA0_CLK1        MA0_CLK1
-         *          2           MA3_CLK         N/A             N/A
-         *          3           MA2_CLK         N/A             N/A
-         *          4           MA1_CLK         MA1_CLK0        N/A
-         *          5           MA0_CLK         MA0_CLK0        MA0_CLK0
-         *          6           N/A             MA1_CLK2        N/A
-         *          7           N/A             MA0_CLK2        MA0_CLK2
+	 * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel B,
+	 *	   BIOS should set it to reduce the power consumption)
+	 *         Bit          F(1207)         M2 Package      S1g1 Package
+	 *          0           N/A             MA1_CLK1        N/A
+	 *          1           N/A             MA0_CLK1        MA0_CLK1
+	 *          2           MA3_CLK         N/A             N/A
+	 *          3           MA2_CLK         N/A             N/A
+	 *          4           MA1_CLK         MA1_CLK0        N/A
+	 *          5           MA0_CLK         MA0_CLK0        MA0_CLK0
+	 *          6           N/A             MA1_CLK2        N/A
+	 *          7           N/A             MA0_CLK2        MA0_CLK2
 	 */
-	PCI_ADDR(0, 0x18, 2, 0xa0), 0x00fffc00, 0xff000000, 
+	PCI_ADDR(0, 0x18, 2, 0xa0), 0x00fffc00, 0xff000000,
 
 	/* DRAM Scrub Control Register
 	 * F3:0x58
@@ -622,11 +678,12 @@
 	 */
 	PCI_ADDR(0, 0x18, 3, 0x60), 0xffffff00, 0x00000000,
 	};
-	// for PCI_ADDR(0, 0x18, 2, 0x98) index, and PCI_ADDR(0x, 0x18, 2, 0x9c) data
+	/* for PCI_ADDR(0, 0x18, 2, 0x98) index,
+	 and PCI_ADDR(0x, 0x18, 2, 0x9c) data */
 	/*
-		index: 
+		index:
 		[29: 0] DctOffset (Dram Controller Offset)
-		[30:30] DctAccessWrite (Dram Controller Read/Write Select)	
+		[30:30] DctAccessWrite (Dram Controller Read/Write Select)
 			0 = read access
 			1 = write access
 		[31:31] DctAccessDone (Dram Controller Access Done)
@@ -635,7 +692,7 @@
 
 		Data:
 		[31: 0] DctOffsetData (Dram Controller Offset Data)
-		
+
 		Read:
 			- Write the register num to DctOffset with DctAccessWrite = 0
 			- poll the DctAccessDone until it = 1
@@ -644,22 +701,22 @@
 			- Write the data to DctOffsetData
 			- Write register num to DctOffset with DctAccessWrite = 1
 			- poll the DctAccessDone untio it = 1
-		
+
 	*/
 	int i;
 	int max;
 
-        if (!controller_present(ctrl)) {
+	if (!controller_present(ctrl)) {
 		sysinfo->ctrl_present[ctrl->node_id] = 0;
-                return;
-        }
+		return;
+	}
 	sysinfo->ctrl_present[ctrl->node_id] = 1;
-	
+
 	print_spew("setting up CPU");
 	print_spew_hex8(ctrl->node_id);
 	print_spew(" northbridge registers\r\n");
 	max = sizeof(register_values)/sizeof(register_values[0]);
-	for(i = 0; i < max; i += 3) {
+	for (i = 0; i < max; i += 3) {
 		device_t dev;
 		unsigned where;
 		unsigned long reg;
@@ -674,6 +731,7 @@
 	print_spew("done.\r\n");
 }
 
+
 static int is_dual_channel(const struct mem_controller *ctrl)
 {
 	uint32_t dcl;
@@ -681,9 +739,10 @@
 	return dcl & DCL_Width128;
 }
 
+
 static int is_opteron(const struct mem_controller *ctrl)
 {
-	/* Test to see if I am an Opteron.  
+	/* Test to see if I am an Opteron.
 	 * FIXME Testing dual channel capability is correct for now
 	 * but a better test is probably required.
 	 * m2 and s1g1 support dual channel too. but only support unbuffered dimm
@@ -694,6 +753,7 @@
 	return !!(nbcap & NBCAP_128Bit);
 }
 
+
 static int is_registered(const struct mem_controller *ctrl)
 {
 	/* Test to see if we are dealing with registered SDRAM.
@@ -705,6 +765,7 @@
 	return !(dcl & DCL_UnBuffDimm);
 }
 
+
 static void spd_get_dimm_size(unsigned device, struct dimm_size *sz)
 {
 	/* Calculate the log base 2 size of a DIMM in bits */
@@ -716,13 +777,13 @@
 
 	value = spd_read_byte(device, SPD_ROW_NUM);	/* rows */
 	if (value < 0) goto hw_err;
-	if ((value & 0xff) == 0) goto val_err; // max is 16 ?
+	if ((value & 0xff) == 0) goto val_err; /* max is 16 ? */
 	sz->per_rank += value & 0xff;
 	sz->rows = value & 0xff;
 
 	value = spd_read_byte(device, SPD_COL_NUM);	/* columns */
 	if (value < 0) goto hw_err;
-	if ((value & 0xff) == 0) goto val_err;  //max is 11
+	if ((value & 0xff) == 0) goto val_err;  /* max is 11 */
 	sz->per_rank += value & 0xff;
 	sz->col = value & 0xff;
 
@@ -733,16 +794,17 @@
 	sz->per_rank += sz->bank;
 
 	/* Get the module data width and convert it to a power of two */
-	value = spd_read_byte(device, SPD_DATA_WIDTH);	
+	value = spd_read_byte(device, SPD_DATA_WIDTH);
 	if (value < 0) goto hw_err;
 	value &= 0xff;
 	if ((value != 72) && (value != 64)) goto val_err;
 	sz->per_rank += log2(value) - 3; //64 bit So another 3 lines
 
 	/* How many ranks? */
-	value = spd_read_byte(device, SPD_MOD_ATTRIB_RANK);	/* number of physical banks */
+	/* number of physical banks */
+	value = spd_read_byte(device, SPD_MOD_ATTRIB_RANK);
 	if (value < 0) goto hw_err;
-//	value >>= SPD_MOD_ATTRIB_RANK_NUM_SHIFT;
+/*	value >>= SPD_MOD_ATTRIB_RANK_NUM_SHIFT; */
 	value &= SPD_MOD_ATTRIB_RANK_NUM_MASK;
 	value += SPD_MOD_ATTRIB_RANK_NUM_BASE; // 0-->1, 1-->2, 3-->4
 	/*
@@ -756,16 +818,16 @@
 	}
 	sz->rank = value;
 
-	/* verify if per_rank is equal byte 31 
+	/* verify if per_rank is equal byte 31
 	  it has the DIMM size as a multiple of 128MB.
-         */
-        value = spd_read_byte(device, SPD_RANK_SIZE);
-        if (value < 0) goto hw_err;
-        value &= 0xff;
+	  */
+	value = spd_read_byte(device, SPD_RANK_SIZE);
+	if (value < 0) goto hw_err;
+	value &= 0xff;
 	value = log2(value);
-	if(value <=4 ) value += 8; // add back to 1G to high
+	if (value <=4 ) value += 8; // add back to 1G to high
 	value += (27-5); // make 128MB to the real lines
-	if( value != (sz->per_rank)) { 
+	if ( value != (sz->per_rank)) {
 		print_err("Bad RANK Size --\r\n");
 		goto val_err;
 	}
@@ -775,7 +837,7 @@
  val_err:
 	die("Bad SPD value\r\n");
 	/* If an hw_error occurs report that I have no memory */
-hw_err:
+ hw_err:
 	sz->per_rank = 0;
 	sz->rows = 0;
 	sz->col = 0;
@@ -785,24 +847,25 @@
 	return;
 }
 
-static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size *sz, unsigned index, int is_Width128)
+
+static void set_dimm_size(const struct mem_controller *ctrl,
+			 struct dimm_size *sz, unsigned index, int is_Width128)
 {
 	uint32_t base0, base1;
 
-
 	/* For each base register.
 	 * Place the dimm size in 32 MB quantities in the bits 31 - 21.
 	 * The initialize dimm size is in bits.
 	 * Set the base enable bit0.
 	 */
-	
+
 	base0 = base1 = 0;
 
 	/* Make certain side1 of the dimm is at least 128MB */
 	if (sz->per_rank >= 27) {
 		base0 = (1 << ((sz->per_rank - 27 ) + 19)) | 1;
 	}
-	
+
 	/* Make certain side2 of the dimm is at least 128MB */
 	if (sz->rank > 1) { // 2 ranks or 4 ranks
 		base1 = (1 << ((sz->per_rank - 27 ) + 19)) | 1;
@@ -822,7 +885,7 @@
 	pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), base0);
 	pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), base1);
 #if QRANK_DIMM_SUPPORT == 1
-	if(sz->rank == 4) {
+	if (sz->rank == 4) {
 		pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), base0);
 		pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), base1);
 	}
@@ -838,26 +901,26 @@
 		ClkDis0 = DTL_MemClkDis0_AM2;
 #elif CPU_SOCKET_TYPE == 0x12	/* S1G1 */
 		ClkDis0 = DTL_MemClkDis0_S1g1;
-#endif 
+#endif
 
 		dword = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW); //Channel A
 		dword &= ~(ClkDis0 >> index);
 #if QRANK_DIMM_SUPPORT == 1
-		if(sz->rank == 4) {
+		if (sz->rank == 4) {
 			dword &= ~(ClkDis0 >> (index+2));
 		}
 #endif
 		pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dword);
-	
-		if (is_Width128) { //Channel B	
-	                dword = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
-	                dword &= ~(ClkDis0 >> index);
+
+		if (is_Width128) { //Channel B
+			dword = pci_read_config32(ctrl->f2, DRAM_CTRL_MISC);
+			dword &= ~(ClkDis0 >> index);
 #if QRANK_DIMM_SUPPORT == 1
-	                if(sz->rank == 4) {
-        	                dword &= ~(ClkDis0 >> (index+2));
-                	}
+			if (sz->rank == 4) {
+				dword &= ~(ClkDis0 >> (index+2));
+			}
 #endif
-	                pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dword);
+			pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dword);
 		}
 
 	}
@@ -878,53 +941,57 @@
  11:  16     11     3    :8G
 */
 
-static void set_dimm_cs_map(const struct mem_controller *ctrl, struct dimm_size *sz, unsigned index)
+
+static void set_dimm_cs_map(const struct mem_controller *ctrl,
+			     struct dimm_size *sz, unsigned index)
 {
 	static const uint8_t cs_map_aaa[24] = {
-	        /* (bank=2, row=13, col=9)(3, 16, 11) ---> (0, 0, 0) (1, 3, 2) */
+		/* (bank=2, row=13, col=9)(3, 16, 11) ---> (0, 0, 0) (1, 3, 2) */
 	//Bank2
-	        0, 1, 3,
-	        0, 2, 6,
-	        0, 0, 0,
-	        0, 0, 0,
+		0, 1, 3,
+		0, 2, 6,
+		0, 0, 0,
+		0, 0, 0,
 	//Bank3
-	        0, 4, 0,
-	        0, 5, 8,
-	        0, 7, 9,
-	        0,10,11,
+		0, 4, 0,
+		0, 5, 8,
+		0, 7, 9,
+		0,10,11,
 	};
 
-        uint32_t map;
+	uint32_t map;
 
-        map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
-        map &= ~(0xf << (index * 4));
+	map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
+	map &= ~(0xf << (index * 4));
 #if QRANK_DIMM_SUPPORT == 1
-        if(sz->rank == 4) {
-                map &= ~(0xf << ( (index + 2) * 4));
-        }
+	if (sz->rank == 4) {
+		map &= ~(0xf << ( (index + 2) * 4));
+	}
 #endif
 
-        /* Make certain side1 of the dimm is at least 128MB */
-        if (sz->per_rank >= 27) {
-                unsigned temp_map;
-                temp_map = cs_map_aaa[(sz->bank-2)*3*4 + (sz->rows - 13)*3 + (sz->col - 9) ];
-                map |= temp_map << (index*4);
+	/* Make certain side1 of the dimm is at least 128MB */
+	if (sz->per_rank >= 27) {
+		unsigned temp_map;
+		temp_map = cs_map_aaa[(sz->bank-2)*3*4 + (sz->rows - 13)*3 + (sz->col - 9) ];
+		map |= temp_map << (index*4);
 #if QRANK_DIMM_SUPPORT == 1
-                if(sz->rank == 4) {
-                       map |=  temp_map << ( (index + 2) * 4);
-                }
+		if (sz->rank == 4) {
+			map |=  temp_map << ( (index + 2) * 4);
+		}
 #endif
-        }
+	}
 
-        pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
+	pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
 
 }
 
-static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask, struct mem_info *meminfo)
+
+static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask,
+			      struct mem_info *meminfo)
 {
 	int i;
-	
-	for(i = 0; i < DIMM_SOCKETS; i++) {
+
+	for (i = 0; i < DIMM_SOCKETS; i++) {
 		struct dimm_size *sz = &(meminfo->sz[i]);
 		if (!(dimm_mask & (1 << i))) {
 			continue;
@@ -939,8 +1006,9 @@
 	return dimm_mask;
 }
 
+
 static void route_dram_accesses(const struct mem_controller *ctrl,
-	unsigned long base_k, unsigned long limit_k)
+		                 unsigned long base_k, unsigned long limit_k)
 {
 	/* Route the addresses to the controller node */
 	unsigned node_id;
@@ -962,12 +1030,14 @@
 
 	limit_reg = 0x44 + index;
 	base_reg = 0x40 + index;
-	for(device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1); device += PCI_DEV(0, 1, 0)) {
+	for (device = PCI_DEV(0, 0x18, 1); device <= PCI_DEV(0, 0x1f, 1);
+	     device += PCI_DEV(0, 1, 0)) {
 		pci_write_config32(device, limit_reg, limit);
 		pci_write_config32(device, base_reg, base);
 	}
 }
 
+
 static void set_top_mem(unsigned tom_k, unsigned hole_startk)
 {
 	/* Error if I don't have memory */
@@ -981,7 +1051,7 @@
 	print_debug(" KB\r\n");
 
 	msr_t msr;
-	if(tom_k > (4*1024*1024)) {
+	if (tom_k > (4*1024*1024)) {
 		/* Now set top of memory */
 		msr.lo = (tom_k & 0x003fffff) << 10;
 		msr.hi = (tom_k & 0xffc00000) >> 22;
@@ -993,8 +1063,8 @@
 	 */
 	if (tom_k >= 0x003f0000) {
 #if HW_MEM_HOLE_SIZEK != 0
-		if(hole_startk != 0) {
-	                tom_k = hole_startk;
+		if (hole_startk != 0) {
+			tom_k = hole_startk;
 		} else
 #endif
 		tom_k = 0x3f0000;
@@ -1008,20 +1078,20 @@
 {
 	/* 35 - 27 */
 
-        static const uint8_t csbase_low_f0_shift[] = {
-        /* 128MB */       (14 - (13-5)),
-        /* 256MB */       (15 - (13-5)),
-        /* 512MB */       (15 - (13-5)),
-	/* 512MB */       (16 - (13-5)),
-        /* 512MB */       (16 - (13-5)),
-        /* 1GB   */       (16 - (13-5)),
-        /* 1GB   */       (16 - (13-5)),
-        /* 2GB   */       (16 - (13-5)),
-        /* 2GB   */       (17 - (13-5)),
-	/* 4GB   */       (17 - (13-5)),
-        /* 4GB   */       (16 - (13-5)),
-	/* 8GB   */       (17 - (13-5)),
-        };
+	static const uint8_t csbase_low_f0_shift[] = {
+	 /* 128MB */       (14 - (13-5)),
+	 /* 256MB */       (15 - (13-5)),
+	 /* 512MB */       (15 - (13-5)),
+	 /* 512MB */       (16 - (13-5)),
+	 /* 512MB */       (16 - (13-5)),
+	 /* 1GB   */       (16 - (13-5)),
+	 /* 1GB   */       (16 - (13-5)),
+	 /* 2GB   */       (16 - (13-5)),
+	 /* 2GB   */       (17 - (13-5)),
+	 /* 4GB   */       (17 - (13-5)),
+	 /* 4GB   */       (16 - (13-5)),
+	 /* 8GB   */       (17 - (13-5)),
+	};
 
 	/* cs_base_high is not changed */
 
@@ -1038,13 +1108,13 @@
 	chip_selects = 0;
 	common_size = 0;
 	common_cs_mode = 0xff;
-	for(index = 0; index < 8; index++) {
+	for (index = 0; index < 8; index++) {
 		unsigned size;
 		unsigned cs_mode;
 		uint32_t value;
-		
+
 		value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
-		
+
 		/* Is it enabled? */
 		if (!(value & 1)) {
 			continue;
@@ -1060,39 +1130,40 @@
 		}
 
 		value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
-                cs_mode =( value >> ((index>>1)*4)) & 0xf;
-                if(common_cs_mode == 0xff) {
-                	common_cs_mode = cs_mode;
-                }
-                /* The cs_mode differed fail */
-                if(common_cs_mode != cs_mode) {
-                        return 0;
-                }
+		cs_mode =( value >> ((index>>1)*4)) & 0xf;
+		if (common_cs_mode == 0xff) {
+			common_cs_mode = cs_mode;
+		}
+		/* The cs_mode differed fail */
+		if (common_cs_mode != cs_mode) {
+			return 0;
+		}
 	}
 
 	/* Chip selects can only be interleaved when there is
 	 * more than one and their is a power of two of them.
 	 */
 	bits = log2(chip_selects);
-	if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) { //chip_selects max = 8
+	if (((1 << bits) != chip_selects) || (bits < 1) || (bits > 3)) {
+		//chip_selects max = 8
 		return 0;
 	}
 
 	/* Find the bits of csbase that we need to interleave on */
 	csbase_inc = 1 << (csbase_low_f0_shift[common_cs_mode]);
-	if(is_Width128) {
+	if (is_Width128) {
 		csbase_inc <<=1;
-        }   
-	
+	}
 
-	/* Compute the initial values for csbase and csbask. 
+
+	/* Compute the initial values for csbase and csbask.
 	 * In csbase just set the enable bit and the base to zero.
 	 * In csmask set the mask bits for the size and page level interleave.
 	 */
 	csbase = 0 | 1;
 	csmask = (((common_size  << bits) - 1) << 19);
 	csmask |= 0x3fe0 & ~((csbase_inc << bits) - csbase_inc);
-	for(index = 0; index < 8; index++) {
+	for (index = 0; index < 8; index++) {
 		uint32_t value;
 
 		value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
@@ -1101,15 +1172,15 @@
 			continue;
 		}
 		pci_write_config32(ctrl->f2, DRAM_CSBASE + (index << 2), csbase);
-		if((index & 1) == 0) {  //only have 4 CSMASK
+		if ((index & 1) == 0) {  //only have 4 CSMASK
 			pci_write_config32(ctrl->f2, DRAM_CSMASK + ((index>>1) << 2), csmask);
 		}
 		csbase += csbase_inc;
 	}
-	
+
 	print_debug("Interleaved\r\n");
 
-	/* Return the memory size in K */ 
+	/* Return the memory size in K */
 	return common_size << ((27-10) + bits);
 }
 static unsigned long order_chip_selects(const struct mem_controller *ctrl)
@@ -1118,14 +1189,14 @@
 
 	/* Remember which registers we have used in the high 8 bits of tom */
 	tom = 0;
-	for(;;) {
+	for (;;) {
 		/* Find the largest remaining canidate */
 		unsigned index, canidate;
 		uint32_t csbase, csmask;
 		unsigned size;
 		csbase = 0;
 		canidate = 0;
-		for(index = 0; index < 8; index++) {
+		for (index = 0; index < 8; index++) {
 			uint32_t value;
 			value = pci_read_config32(ctrl->f2, DRAM_CSBASE + (index << 2));
 
@@ -1133,12 +1204,12 @@
 			if (!(value & 1)) {
 				continue;
 			}
-			
+
 			/* Is it greater? */
 			if (value <= csbase) {
 				continue;
 			}
-			
+
 			/* Has it already been selected */
 			if (tom & (1 << (index + 24))) {
 				continue;
@@ -1147,6 +1218,7 @@
 			csbase = value;
 			canidate = index;
 		}
+		
 		/* See if I have found a new canidate */
 		if (csbase == 0) {
 			break;
@@ -1171,12 +1243,12 @@
 		/* Write the new base register */
 		pci_write_config32(ctrl->f2, DRAM_CSBASE + (canidate << 2), csbase);
 		/* Write the new mask register */
-                if((canidate & 1) == 0) {  //only have 4 CSMASK
-                        pci_write_config32(ctrl->f2, DRAM_CSMASK + ((canidate>>1) << 2), csmask);
-                }
-		
+		 if ((canidate & 1) == 0) {  //only have 4 CSMASK
+			 pci_write_config32(ctrl->f2, DRAM_CSMASK + ((canidate>>1) << 2), csmask);
+		 }
+
 	}
-	/* Return the memory size in K */ 
+	/* Return the memory size in K */
 	return (tom & ~0xff000000) << (27-10);
 }
 
@@ -1186,7 +1258,7 @@
 	unsigned end_k;
 	/* Find the last memory address used */
 	end_k = 0;
-	for(node_id = 0; node_id < max_node_id; node_id++) {
+	for (node_id = 0; node_id < max_node_id; node_id++) {
 		uint32_t limit, base;
 		unsigned index;
 		index = node_id << 3;
@@ -1200,19 +1272,24 @@
 	return end_k;
 }
 
-static void order_dimms(const struct mem_controller *ctrl, struct mem_info *meminfo)
+
+static void order_dimms(const struct mem_controller *ctrl,
+			 struct mem_info *meminfo)
 {
 	unsigned long tom_k, base_k;
 
-	if (read_option(CMOS_VSTART_interleave_chip_selects, CMOS_VLEN_interleave_chip_selects, 1) != 0) {
+	if (read_option(CMOS_VSTART_interleave_chip_selects,
+	    CMOS_VLEN_interleave_chip_selects, 1) != 0) {
 		tom_k = interleave_chip_selects(ctrl, meminfo->is_Width128);
 	} else {
 		print_debug("Interleaving disabled\r\n");
 		tom_k = 0;
 	}
+	
 	if (!tom_k) {
 		tom_k = order_chip_selects(ctrl);
 	}
+	
 	/* Compute the memory base address */
 	base_k = memory_end_k(ctrl, ctrl->node_id);
 	tom_k += base_k;
@@ -1220,52 +1297,60 @@
 	set_top_mem(tom_k, 0);
 }
 
-static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask, struct mem_info *meminfo)
+
+static long disable_dimm(const struct mem_controller *ctrl, unsigned index,
+			  long dimm_mask, struct mem_info *meminfo)
 {
-	print_debug("disabling dimm"); 
-	print_debug_hex8(index); 
+	print_debug("disabling dimm");
+	print_debug_hex8(index);
 	print_debug("\r\n");
 	pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+0)<<2), 0);
 	pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+1)<<2), 0);
 #if QRANK_DIMM_SUPPORT == 1
-        if(meminfo->sz[index].rank == 4) {
-                pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), 0);
-                pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), 0);
-        }
+	if (meminfo->sz[index].rank == 4) {
+		pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+4)<<2), 0);
+		pci_write_config32(ctrl->f2, DRAM_CSBASE + (((index << 1)+5)<<2), 0);
+	}
 #endif
 
 	dimm_mask &= ~(1 << index);
 	return dimm_mask;
 }
 
-static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl, long dimm_mask, struct mem_info *meminfo)
+
+static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl,
+					long dimm_mask, struct mem_info *meminfo)
 {
 	int i;
 	uint32_t registered;
 	uint32_t dcl;
 	registered = 0;
-	for(i = 0; (i < DIMM_SOCKETS); i++) {
+	for (i = 0; (i < DIMM_SOCKETS); i++) {
 		int value;
 		if (!(dimm_mask & (1 << i))) {
 			continue;
 		}
+
 		value = spd_read_byte(ctrl->channel0[i], SPD_DIMM_TYPE);
 		if (value < 0) {
 			return -1;
 		}
+
 		/* Registered dimm ? */
 		value &= 0x3f;
-		if ((value == SPD_DIMM_TYPE_RDIMM) || (value == SPD_DIMM_TYPE_mRDIMM)) {
-			//check SPD_MOD_ATTRIB to verify it is SPD_MOD_ATTRIB_REGADC (0x11)?
+		if ((value == SPD_DIMM_TYPE_RDIMM) ||
+		    (value == SPD_DIMM_TYPE_mRDIMM)) {
+			/* check SPD_MOD_ATTRIB to verify it is
+			   SPD_MOD_ATTRIB_REGADC (0x11)? */
 			registered |= (1<<i);
-		} 
+		}
 	}
 
 	if (is_opteron(ctrl)) {
 #if 0
 		if ( registered != (dimm_mask & ((1<<DIMM_SOCKETS)-1)) ) {
 			dimm_mask &= (registered | (registered << DIMM_SOCKETS) ); //disable unbuffed dimm
-	//		die("Mixed buffered and registered dimms not supported");
+//			die("Mixed buffered and registered dimms not supported");
 		}
 		//By yhlu for debug M2, s1g1 can do dual channel, but it use unbuffer DIMM
 		if (!registered) {
@@ -1283,7 +1368,7 @@
 		meminfo->is_registered = 0;
 	}
 	pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
-	
+
 #if 1
 	if (meminfo->is_registered) {
 		print_debug("Registered\r\n");
@@ -1294,12 +1379,13 @@
 	return dimm_mask;
 }
 
+
 static unsigned int spd_detect_dimms(const struct mem_controller *ctrl)
 {
 	unsigned dimm_mask;
 	int i;
 	dimm_mask = 0;
-	for(i = 0; i < DIMM_SOCKETS; i++) {
+	for (i = 0; i < DIMM_SOCKETS; i++) {
 		int byte;
 		unsigned device;
 		device = ctrl->channel0[i];
@@ -1320,6 +1406,7 @@
 	return dimm_mask;
 }
 
+
 static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_mask, struct mem_info *meminfo)
 {
 	int i;
@@ -1336,7 +1423,7 @@
 		13,	/* *Pri SDRAM Width */
 		17,	/* *Logical Banks */
 		18,	/* *Supported CAS Latencies */
-		20,     /* *DIMM Type Info */
+		20,	/* *DIMM Type Info */
 		21,	/* *SDRAM Module Attributes */
 		23,	/* *Cycle time at CAS Latnecy (CLX - 1) */
 		26,	/* *Cycle time at CAS Latnecy (CLX - 2) */
@@ -1345,23 +1432,24 @@
 		29,	/* *tRCD RAS to CAS */
 		30,	/* *tRAS Activate to Precharge */
 		36,	/* *Write recovery time (tWR) */
-		37, 	/* *Internal write to read command delay (tRDP) */
-		38, 	/* *Internal read to precharge commanfd delay (tRTP) */
-		41, 	/* *Extension of Byte 41 tRC and Byte 42 tRFC */
+		37,	/* *Internal write to read command delay (tRDP) */
+		38,	/* *Internal read to precharge commanfd delay (tRTP) */
+		41,	/* *Extension of Byte 41 tRC and Byte 42 tRFC */
 		41,	/* *Minimum Active to Active/Auto Refresh Time(Trc) */
 		42,	/* *Minimum Auto Refresh Command Time(Trfc) */
 	};
 	/* If the dimms are not in pairs do not do dual channels */
 	if ((dimm_mask & ((1 << DIMM_SOCKETS) - 1)) !=
-		((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) { 
+		((dimm_mask >> DIMM_SOCKETS) & ((1 << DIMM_SOCKETS) - 1))) {
 		goto single_channel;
 	}
-	/* If the cpu is not capable of doing dual channels don't do dual channels */
+	/* If the cpu is not capable of doing dual channels
+	   don't do dual channels */
 	nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
 	if (!(nbcap & NBCAP_128Bit)) {
 		goto single_channel;
 	}
-	for(i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
+	for (i = 0; (i < 4) && (ctrl->channel0[i]); i++) {
 		unsigned device0, device1;
 		int value0, value1;
 		int j;
@@ -1371,7 +1459,7 @@
 		}
 		device0 = ctrl->channel0[i];
 		device1 = ctrl->channel1[i];
-		for(j = 0; j < sizeof(addresses)/sizeof(addresses[0]); j++) {
+		for (j = 0; j < sizeof(addresses)/sizeof(addresses[0]); j++) {
 			unsigned addr;
 			addr = addresses[j];
 			value0 = spd_read_byte(device0, addr);
@@ -1406,10 +1494,10 @@
 	uint16_t cycle_time;
 	uint8_t divisor; /* In 1/40 ns increments */
 	uint8_t TrwtTO;
-        uint8_t Twrrd;
-        uint8_t Twrwr;
-        uint8_t Trdrd;
-        uint8_t DcqByPassMax;
+	uint8_t Twrrd;
+	uint8_t Twrwr;
+	uint8_t Trdrd;
+	uint8_t DcqByPassMax;
 	uint32_t dch_memclk;
 	char name[9];
 };
@@ -1418,7 +1506,7 @@
 		{
 			.name	    = "200Mhz\r\n",
 			.cycle_time = 0x500,
-			.divisor    = 200, // how many 1/40ns per clock 
+			.divisor    = 200, // how many 1/40ns per clock
 			.dch_memclk = DCH_MemClkFreq_200MHz, //0
 			.TrwtTO	    = 7,
 			.Twrrd	    = 2,
@@ -1432,34 +1520,34 @@
 			.cycle_time = 0x375,
 			.divisor    = 150, //????
 			.dch_memclk = DCH_MemClkFreq_266MHz, //1
-                        .TrwtTO     = 7,
-                        .Twrrd      = 2,
-                        .Twrwr      = 2,
-                        .Trdrd      = 3,
-                        .DcqByPassMax = 4,
+			.TrwtTO     = 7,
+			.Twrrd      = 2,
+			.Twrwr      = 2,
+			.Trdrd      = 3,
+			.DcqByPassMax = 4,
 		},
-                {
-                        .name       = "333Mhz\r\n",
-                        .cycle_time = 0x300,
-                        .divisor    = 120,
-                        .dch_memclk = DCH_MemClkFreq_333MHz, //2
-                        .TrwtTO     = 7,
-                        .Twrrd      = 2,
-                        .Twrwr      = 2,
-                        .Trdrd      = 3,
-                        .DcqByPassMax = 4,
+		 {
+			.name       = "333Mhz\r\n",
+			.cycle_time = 0x300,
+			.divisor    = 120,
+			.dch_memclk = DCH_MemClkFreq_333MHz, //2
+			.TrwtTO     = 7,
+			.Twrrd      = 2,
+			.Twrwr      = 2,
+			.Trdrd      = 3,
+			.DcqByPassMax = 4,
 
-                },
+		 },
 		{
 			.name	    = "400Mhz\r\n",
 			.cycle_time = 0x250,
 			.divisor    = 100,
 			.dch_memclk = DCH_MemClkFreq_400MHz,//3
-                        .TrwtTO     = 7,
-                        .Twrrd      = 2,
-                        .Twrwr      = 2,
-                        .Trdrd      = 3,
-                        .DcqByPassMax = 4,
+			.TrwtTO     = 7,
+			.Twrrd      = 2,
+			.Twrwr      = 2,
+			.Trdrd      = 3,
+			.DcqByPassMax = 4,
 		},
 		{
 			.cycle_time = 0x000,
@@ -1470,7 +1558,7 @@
 {
 
 	const struct mem_param *param;
-	for(param = &speed[0]; param->cycle_time ; param++) {
+	for (param = &speed[0]; param->cycle_time ; param++) {
 		if (min_cycle_time > (param+1)->cycle_time) {
 			break;
 		}
@@ -1495,52 +1583,54 @@
 	 /*6 */   200, 166, 166, 100,
 	 /*7 */   200, 171, 142, 100,
 
-         /*8 */   200, 150, 125, 100,
-         /*9 */   200, 156, 133, 100,
-         /*10*/   200, 160, 120, 100,
-         /*11*/   200, 163, 127, 100,
+	  /*8 */   200, 150, 125, 100,
+	  /*9 */   200, 156, 133, 100,
+	  /*10*/   200, 160, 120, 100,
+	  /*11*/   200, 163, 127, 100,
 
-         /*12*/   200, 150, 133, 100,
-         /*13*/   200, 153, 123, 100,
-         /*14*/   200, 157, 128, 100,
-         /*15*/   200, 160, 120, 100,
-	}; 
-	
+	  /*12*/   200, 150, 133, 100,
+	  /*13*/   200, 153, 123, 100,
+	  /*14*/   200, 157, 128, 100,
+	  /*15*/   200, 160, 120, 100,
+	};
+
 	unsigned fid_cur;
 	int index;
-	
+
 	msr_t msr;
 	msr = rdmsr(0xc0010042);
-        fid_cur = msr.lo & 0x3f;
+	fid_cur = msr.lo & 0x3f;
 
 	index = fid_cur>>1;
 
-	if(index>12) return divisor;
+	if (index>12) return divisor;
 
-	if(i>3) return divisor;
+	if (i>3) return divisor;
 
 	return dv_a[index * 4+i];
 
 }
 
+
 struct spd_set_memclk_result {
 	const struct mem_param *param;
 	long dimm_mask;
 };
 
+
 static unsigned convert_to_linear(unsigned value)
 {
-         static const unsigned fraction[] = { 0x25, 0x33, 0x66, 0x75 };
-         unsigned valuex;
+	static const unsigned fraction[] = { 0x25, 0x33, 0x66, 0x75 };
+	unsigned valuex;
 
-         /* We need to convert value to more readable */
-         if((value & 0xf) < 10) { //no .25, .33, .66, .75
-                value <<= 4;
-         } else {
+	/* We need to convert value to more readable */
+	if ((value & 0xf) < 10) { //no .25, .33, .66, .75
+		value <<= 4;
+	} else {
 		valuex = ((value & 0xf0) << 4) | fraction [(value & 0xf)-10];
-                value = valuex;
-         }
-	 return value;
+		value = valuex;
+	}
+	return value;
 }
 
 static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *ctrl, long dimm_mask, struct mem_info *meminfo)
@@ -1568,14 +1658,14 @@
 	if (bios_cycle_time > min_cycle_time) {
 		min_cycle_time = bios_cycle_time;
 	}
-	min_latency = 3; 
+	min_latency = 3;
 
-        print_tx("1 min_cycle_time:", min_cycle_time); 
+	print_tx("1 min_cycle_time:", min_cycle_time);
 
 	/* Compute the least latency with the fastest clock supported
 	 * by both the memory controller and the dimms.
 	 */
-	for(i = 0; i < DIMM_SOCKETS; i++) {
+	for (i = 0; i < DIMM_SOCKETS; i++) {
 		int new_cycle_time, new_latency;
 		int index;
 		int latencies;
@@ -1604,7 +1694,7 @@
 		latency = log2(latencies) - 2;
 
 		/* Loop through and find a fast clock with a low latency */
-		for(index = 0; index < 3; index++, latency++) {
+		for (index = 0; index < 3; index++, latency++) {
 			int value;
 			if ((latency < 3) || (latency > 6) ||
 				(!(latencies & (1 << latency)))) {
@@ -1614,52 +1704,56 @@
 			if (value < 0) {
 				goto hw_error;
 			}
-			print_tx("\tindex:", index); 	
-			print_tx("\t\tlatency:", latency); 
-			print_tx("\t\tvalue1:", value); 	
 
+			print_tx("\tindex:", index);
+			print_tx("\t\tlatency:", latency);
+			print_tx("\t\tvalue1:", value);
+
 			value = convert_to_linear(value);
 
-			print_tx("\t\tvalue2:", value); 
+			print_tx("\t\tvalue2:", value);
 
 			/* Only increase the latency if we decreas the clock */
 			if (value >= min_cycle_time ) {
-				if(value < new_cycle_time) {
+				if (value < new_cycle_time) {
 					new_cycle_time = value;
 					new_latency = latency;
 				} else if (value == new_cycle_time) {
-					if(new_latency > latency) {
+					if (new_latency > latency) {
 						new_latency = latency;
 					}
 				}
 			}
-                        print_tx("\t\tnew_cycle_time:", new_cycle_time);
-                        print_tx("\t\tnew_latency:", new_latency);
+			print_tx("\t\tnew_cycle_time:", new_cycle_time);
+			print_tx("\t\tnew_latency:", new_latency);
 
 		}
+
 		if (new_latency > 6){
 			continue;
 		}
+
 		/* Does min_latency need to be increased? */
 		if (new_cycle_time > min_cycle_time) {
 			min_cycle_time = new_cycle_time;
 		}
+
 		/* Does min_cycle_time need to be increased? */
 		if (new_latency > min_latency) {
 			min_latency = new_latency;
 		}
 
-		print_tx("2 min_cycle_time:", min_cycle_time); 
-		print_tx("2 min_latency:", min_latency); 
+		print_tx("2 min_cycle_time:", min_cycle_time);
+		print_tx("2 min_latency:", min_latency);
 	}
 	/* Make a second pass through the dimms and disable
 	 * any that cannot support the selected memclk and cas latency.
 	 */
 
-	print_tx("3 min_cycle_time:", min_cycle_time); 
-	print_tx("3 min_latency:", min_latency); 
-	
-	for(i = 0; (i < DIMM_SOCKETS) && (ctrl->channel0[i]); i++) {
+	print_tx("3 min_cycle_time:", min_cycle_time);
+	print_tx("3 min_latency:", min_latency);
+
+	for (i = 0; (i < DIMM_SOCKETS) && (ctrl->channel0[i]); i++) {
 		int latencies;
 		int latency;
 		int index;
@@ -1667,6 +1761,7 @@
 		if (!(dimm_mask & (1 << i))) {
 			continue;
 		}
+
 		latencies = spd_read_byte(ctrl->channel0[i], SPD_CAS_LAT);
 		if (latencies < 0) goto hw_error;
 		if (latencies == 0) {
@@ -1677,7 +1772,7 @@
 		latency = log2(latencies) -2;
 
 		/* Walk through searching for the selected latency */
-		for(index = 0; index < 3; index++, latency++) {
+		for (index = 0; index < 3; index++, latency++) {
 			if (!(latencies & (1 << latency))) {
 				continue;
 			}
@@ -1688,13 +1783,13 @@
 		if ((latency != min_latency) || (index >= 3)) {
 			goto dimm_err;
 		}
-	
+
 		/* Read the min_cycle_time for this latency */
 		value = spd_read_byte(ctrl->channel0[i], latency_indicies[index]);
 		if (value < 0) goto hw_error;
-	
-		value = convert_to_linear(value);	
-		/* All is good if the selected clock speed 
+
+		value = convert_to_linear(value);
+		/* All is good if the selected clock speed
 		 * is what I need or slower.
 		 */
 		if (value <= min_cycle_time) {
@@ -1705,8 +1800,8 @@
 		dimm_mask = disable_dimm(ctrl, i, dimm_mask, meminfo);
 	}
 
-	print_tx("4 min_cycle_time:", min_cycle_time); 
-	
+	print_tx("4 min_cycle_time:", min_cycle_time);
+
 	/* Now that I know the minimum cycle time lookup the memory parameters */
 	result.param = get_mem_param(min_cycle_time);
 
@@ -1724,7 +1819,7 @@
 	value &= ~(DTL_TCL_MASK << DTL_TCL_SHIFT);
 	value |= (min_latency - DTL_TCL_BASE)  << DTL_TCL_SHIFT;
 	pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, value);
-	
+
 	result.dimm_mask = dimm_mask;
 	return result;
  hw_error:
@@ -1735,12 +1830,12 @@
 
 static unsigned convert_to_1_4(unsigned value)
 {
-         static const uint8_t fraction[] = { 0, 1, 2, 2, 3, 3, 0 };
-         unsigned valuex;
+	static const uint8_t fraction[] = { 0, 1, 2, 2, 3, 3, 0 };
+	unsigned valuex;
 
-         /* We need to convert value to more readable */
-         valuex =  fraction [value & 0x7];
-         return valuex;
+	/* We need to convert value to more readable */
+	valuex =  fraction [value & 0x7];
+	return valuex;
 }
 static int update_dimm_Trc(const struct mem_controller *ctrl, const struct mem_param *param, int i)
 {
@@ -1758,7 +1853,7 @@
 	value *=10;
 
 	clocks = (value + param->divisor - 1)/param->divisor;
-	
+
 	if (clocks < DTL_TRC_MIN) {
 		clocks = DTL_TRC_MIN;
 	}
@@ -1768,7 +1863,7 @@
 
 	dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
 	old_clocks = ((dtl >> DTL_TRC_SHIFT) & DTL_TRC_MASK) + DTL_TRC_BASE;
-	if (old_clocks >= clocks) {  //?? someone did it 
+	if (old_clocks >= clocks) {  //?? someone did it
 		// clocks = old_clocks;
 		return 1;
 	}
@@ -1786,9 +1881,9 @@
 
 	//get the cs_size --> logic dimm size
 	value = spd_read_byte(ctrl->channel0[i], SPD_PRI_WIDTH);
-        if (value < 0) {
-                return -1;
-        }
+	if (value < 0) {
+		return -1;
+	}
 
 	value = 6 - log2(value); //4-->4, 8-->3, 16-->2
 
@@ -1806,46 +1901,52 @@
 	return 1;
 }
 
-static int update_dimm_TT_1_4(const struct mem_controller *ctrl, const struct mem_param *param, int i, 
-					unsigned TT_REG, 
+static int update_dimm_TT_1_4(const struct mem_controller *ctrl, const struct mem_param *param, int i,
+					unsigned TT_REG,
 					unsigned SPD_TT, unsigned TT_SHIFT, unsigned TT_MASK, unsigned TT_BASE, unsigned TT_MIN, unsigned TT_MAX )
 {
-        unsigned clocks, old_clocks;
-        uint32_t dtl;
-        int value;
-        value = spd_read_byte(ctrl->channel0[i], SPD_TT); //already in 1/4 ns
-        if (value < 0) return -1;
+	unsigned clocks, old_clocks;
+	uint32_t dtl;
+	int value;
+	value = spd_read_byte(ctrl->channel0[i], SPD_TT); //already in 1/4 ns
+	if (value < 0) return -1;
 	value *=10;
-        clocks = (value + param->divisor -1)/param->divisor;
-        if (clocks < TT_MIN) {
-                clocks = TT_MIN;
-        }
-        if (clocks > TT_MAX) {
-                return 0;
-        }
-        dtl = pci_read_config32(ctrl->f2, TT_REG);
+	clocks = (value + param->divisor -1)/param->divisor;
+	if (clocks < TT_MIN) {
+		clocks = TT_MIN;
+	}
+	
+	if (clocks > TT_MAX) {
+		 return 0;
+	}
 
-        old_clocks = ((dtl >> TT_SHIFT) & TT_MASK) + TT_BASE;
-        if (old_clocks >= clocks) { //some one did it?
-//              clocks = old_clocks;
-                return 1;
-        }
-        dtl &= ~(TT_MASK << TT_SHIFT);
-        dtl |= ((clocks - TT_BASE) << TT_SHIFT);
-        pci_write_config32(ctrl->f2, TT_REG, dtl);
-        return 1;
+	dtl = pci_read_config32(ctrl->f2, TT_REG);
+
+	old_clocks = ((dtl >> TT_SHIFT) & TT_MASK) + TT_BASE;
+	if (old_clocks >= clocks) { //some one did it?
+//	clocks = old_clocks;
+		return 1;
+	}
+	dtl &= ~(TT_MASK << TT_SHIFT);
+	dtl |= ((clocks - TT_BASE) << TT_SHIFT);
+	pci_write_config32(ctrl->f2, TT_REG, dtl);
+	return 1;
 }
 
-static int update_dimm_Trcd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
+
+static int update_dimm_Trcd(const struct mem_controller *ctrl,
+			     const struct mem_param *param, int i)
 {
 	return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TRCD, DTL_TRCD_SHIFT, DTL_TRCD_MASK, DTL_TRCD_BASE, DTL_TRCD_MIN, DTL_TRCD_MAX);
 }
 
+
 static int update_dimm_Trrd(const struct mem_controller *ctrl, const struct mem_param *param, int i)
 {
 	return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TRRD, DTL_TRRD_SHIFT, DTL_TRRD_MASK, DTL_TRRD_BASE, DTL_TRRD_MIN, DTL_TRRD_MAX);
 }
 
+
 static int update_dimm_Tras(const struct mem_controller *ctrl, const struct mem_param *param, int i)
 {
 	unsigned clocks, old_clocks;
@@ -1853,48 +1954,54 @@
 	int value;
 	value = spd_read_byte(ctrl->channel0[i], SPD_TRAS); //in 1 ns
 	if (value < 0) return -1;
-	print_tx("update_dimm_Tras: 0 value=", value); 
-	
-	value<<=2; //convert it to in 1/4ns
+	print_tx("update_dimm_Tras: 0 value=", value);
 
+	value <<= 2; //convert it to in 1/4ns
+
 	value *= 10;
-	print_tx("update_dimm_Tras:  1 value=", value); 
+	print_tx("update_dimm_Tras:  1 value=", value);
 
 	clocks = (value  + param->divisor - 1)/param->divisor;
-	print_tx("update_dimm_Tras: divisor=", param->divisor); 
-	print_tx("update_dimm_Tras: clocks=", clocks); 
+	print_tx("update_dimm_Tras: divisor=", param->divisor);
+	print_tx("update_dimm_Tras: clocks=", clocks);
 	if (clocks < DTL_TRAS_MIN) {
 		clocks = DTL_TRAS_MIN;
 	}
+
 	if (clocks > DTL_TRAS_MAX) {
 		return 0;
 	}
+
 	dtl = pci_read_config32(ctrl->f2, DRAM_TIMING_LOW);
 	old_clocks = ((dtl >> DTL_TRAS_SHIFT) & DTL_TRAS_MASK) + DTL_TRAS_BASE;
 	if (old_clocks >= clocks) { // someone did it?
 		return 1;
 	}
+
 	dtl &= ~(DTL_TRAS_MASK << DTL_TRAS_SHIFT);
 	dtl |= ((clocks - DTL_TRAS_BASE) << DTL_TRAS_SHIFT);
 	pci_write_config32(ctrl->f2, DRAM_TIMING_LOW, dtl);
 	return 1;
 }
 
-static int update_dimm_Trp(const struct mem_controller *ctrl, const struct mem_param *param, int i)
+static int update_dimm_Trp(const struct mem_controller *ctrl,
+			    const struct mem_param *param, int i)
 {
 	return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TRP, DTL_TRP_SHIFT, DTL_TRP_MASK, DTL_TRP_BASE, DTL_TRP_MIN, DTL_TRP_MAX);
 }
 
-static int update_dimm_Trtp(const struct mem_controller *ctrl, const struct mem_param *param, int i, struct mem_info *meminfo)
+
+static int update_dimm_Trtp(const struct mem_controller *ctrl,
+		const struct mem_param *param, int i, struct mem_info *meminfo)
 {
-	//need to figure if it is 32 byte burst or 64 bytes burst
+	/* need to figure if it is 32 byte burst or 64 bytes burst */
 	int offset = 2;
-	if(!meminfo->is_Width128) {
+	if (!meminfo->is_Width128) {
 		uint32_t dword;
 		dword = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
-		if((dword &  DCL_BurstLength32)) offset = 0;
+		if ((dword &  DCL_BurstLength32)) offset = 0;
 	}
-        return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TRTP, DTL_TRTP_SHIFT, DTL_TRTP_MASK, DTL_TRTP_BASE+offset, DTL_TRTP_MIN+offset, DTL_TRTP_MAX+offset);
+	 return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_LOW, SPD_TRTP, DTL_TRTP_SHIFT, DTL_TRTP_MASK, DTL_TRTP_BASE+offset, DTL_TRTP_MIN+offset, DTL_TRTP_MAX+offset);
 }
 
 
@@ -1904,14 +2011,15 @@
 }
 
 
-static int update_dimm_Tref(const struct mem_controller *ctrl, const struct mem_param *param, int i)
+static int update_dimm_Tref(const struct mem_controller *ctrl,
+			     const struct mem_param *param, int i)
 {
-        uint32_t dth, dth_old;
-        int value;
-        value = spd_read_byte(ctrl->channel0[i], SPD_TREF); // 0: 15.625us, 1: 3.9us 2: 7.8 us....
-        if (value < 0) return -1;
+	uint32_t dth, dth_old;
+	int value;
+	value = spd_read_byte(ctrl->channel0[i], SPD_TREF); // 0: 15.625us, 1: 3.9us 2: 7.8 us....
+	if (value < 0) return -1;
 
-	if(value == 1 ) {
+	if (value == 1 ) {
 		value = 3;
 	} else {
 		value = 2;
@@ -1922,111 +2030,113 @@
 	dth_old = dth;
 	dth &= ~(DTH_TREF_MASK << DTH_TREF_SHIFT);
 	dth |= (value << DTH_TREF_SHIFT);
-	if(dth_old != dth) {
+	if (dth_old != dth) {
 		pci_write_config32(ctrl->f2, DRAM_TIMING_HIGH, dth);
 	}
 	return 1;
 }
 
-static void set_4RankRDimm(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
+
+static void set_4RankRDimm(const struct mem_controller *ctrl,
+			const struct mem_param *param, struct mem_info *meminfo)
 {
 #if QRANK_DIMM_SUPPRT == 1
-        int value;
+	int value;
 	int i;
-	
 
-	if(!(meminfo->is_registered)) return; 
 
+	if (!(meminfo->is_registered)) return;
+
 	value = 0;
 
-        for(i = 0; i < DIMM_SOCKETS; i++) {
-                if (!(dimm_mask & (1 << i))) {
-                        continue;
-                }
+	for (i = 0; i < DIMM_SOCKETS; i++) {
+		if (!(dimm_mask & (1 << i))) {
+			continue;
+		}
 
-		if(meminfo->sz.rank == 4) {
+		if (meminfo->sz.rank == 4) {
 			value = 1;
 			break;
 		}
 	}
 
-	if(value == 1) {
+	if (value == 1) {
 		uint32_t dch;
-	        dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
+		dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
 		dch |= DCH_FourRankRDimm;
-	        pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
+		pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
 	}
 #endif
 }
 
 
-static uint32_t get_extra_dimm_mask(const struct mem_controller *ctrl, struct mem_info *meminfo)
+static uint32_t get_extra_dimm_mask(const struct mem_controller *ctrl,
+				     struct mem_info *meminfo)
 {
-        int i;
+	int i;
 
-        uint32_t mask_x4;
+	uint32_t mask_x4;
 	uint32_t mask_x16;
 	uint32_t mask_single_rank;
 	uint32_t mask_page_1k;
 	int value;
 #if QRANK_DIMM_SUPPORT == 1
-        int rank;
+	int rank;
 #endif
 
 	long dimm_mask = meminfo->dimm_mask;
 
 
-        mask_x4 = 0;
+	mask_x4 = 0;
 	mask_x16 = 0;
 	mask_single_rank = 0;
 	mask_page_1k = 0;
 
-        for(i = 0; i < DIMM_SOCKETS; i++) {
-		
-                if (!(dimm_mask & (1 << i))) {
-                        continue;
-                }
+	for (i = 0; i < DIMM_SOCKETS; i++) {
+		if (!(dimm_mask & (1 << i))) {
+			continue;
+		}
 
-		if(meminfo->sz[i].rank == 1) {
+		if (meminfo->sz[i].rank == 1) {
 			mask_single_rank |= 1<<i;
 		}
 
-                if(meminfo->sz[i].col==10) {
-                        mask_page_1k |= 1<<i;
-                }
+		if (meminfo->sz[i].col==10) {
+			mask_page_1k |= 1<<i;
+		}
 
 
-	        value = spd_read_byte(ctrl->channel0[i], SPD_PRI_WIDTH);
+		value = spd_read_byte(ctrl->channel0[i], SPD_PRI_WIDTH);
 
 		#if QRANK_DIMM_SUPPORT == 1
-       		rank = meminfo->sz[i].rank;
+			rank = meminfo->sz[i].rank;
 		#endif
 
-		if(value==4) {
+		if (value==4) {
 			mask_x4 |= (1<<i);
 			#if QRANK_DIMM_SUPPORT == 1
-		        if(rank==4) {
-                		mask_x4 |= 1<<(i+2);
-        		}
+			if (rank==4) {
+				mask_x4 |= 1<<(i+2);
+			}
 			#endif
-		} else if(value==16) {
+		} else if (value==16) {
 			mask_x16 |= (1<<i);
 			#if QRANK_DIMM_SUPPORT == 1
-                        if(rank==4) {
-                                mask_x16 |= 1<<(i+2);
-                        }
+			 if (rank==4) {
+				 mask_x16 |= 1<<(i+2);
+			 }
 			#endif
 		}
-                
-        }
-	
-        meminfo->x4_mask= mask_x4;
+
+	}
+
+	meminfo->x4_mask= mask_x4;
 	meminfo->x16_mask = mask_x16;
-	
+
 	meminfo->single_rank_mask = mask_single_rank;
 	meminfo->page_1k_mask = mask_page_1k;
 
-        return mask_x4;
+	return mask_x4;
 
 }
 
@@ -2040,155 +2150,168 @@
 	pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
 }
 
+
 static int count_ones(uint32_t dimm_mask)
 {
-        int dimms;
-        unsigned index;
-        dimms = 0;
-        for(index = 0; index < DIMM_SOCKETS; index++, dimm_mask>>=1) {
-                if (dimm_mask & 1) {
-                        dimms++;
-                }
-        }
-        return dimms;
+	int dimms;
+	unsigned index;
+	dimms = 0;
+	for (index = 0; index < DIMM_SOCKETS; index++, dimm_mask>>=1) {
+		if (dimm_mask & 1) {
+			dimms++;
+		}
+	}
+	return dimms;
 }
 
 
-static void set_DramTerm(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
+static void set_DramTerm(const struct mem_controller *ctrl,
+			const struct mem_param *param, struct mem_info *meminfo)
 {
-        uint32_t dcl;
+	uint32_t dcl;
 	unsigned odt;
 	odt = 1; // 75 ohms
 
-	if(param->divisor == 100) { //DDR2 800
-		if(meminfo->is_Width128) {
-			if(count_ones(meminfo->dimm_mask & 0x0f)==2) {
+	if (param->divisor == 100) { //DDR2 800
+		if (meminfo->is_Width128) {
+			if (count_ones(meminfo->dimm_mask & 0x0f)==2) {
 				odt = 3;  //50 ohms
 			}
 		}
 
-	}	
+	}
 
 
 #if DIMM_SUPPORT == 0x0204
 	odt = 0x2;		/* 150 ohms */
 #endif
 
-    dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
-    dcl &= ~(DCL_DramTerm_MASK<<DCL_DramTerm_SHIFT);
-    dcl |= (odt & DCL_DramTerm_MASK) << (DCL_DramTerm_SHIFT);
-    pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
+	dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
+	dcl &= ~(DCL_DramTerm_MASK<<DCL_DramTerm_SHIFT);
+	dcl |= (odt & DCL_DramTerm_MASK) << (DCL_DramTerm_SHIFT);
+	pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
 }
 
 
-static void set_ecc(const struct mem_controller *ctrl,const struct mem_param *param, long dimm_mask, struct mem_info *meminfo)
+static void set_ecc(const struct mem_controller *ctrl,
+	const struct mem_param *param, long dimm_mask, struct mem_info *meminfo)
 {
 	int i;
 	int value;
-	
-        uint32_t dcl, nbcap;
-        nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
-        dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
-        dcl &= ~DCL_DimmEccEn;
-        if (nbcap & NBCAP_ECC) {
-                dcl |= DCL_DimmEccEn;
-        }
-        if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
-                dcl &= ~DCL_DimmEccEn;
-        }
-        pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
 
+	uint32_t dcl, nbcap;
+	nbcap = pci_read_config32(ctrl->f3, NORTHBRIDGE_CAP);
+	dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
+	dcl &= ~DCL_DimmEccEn;
+	if (nbcap & NBCAP_ECC) {
+		dcl |= DCL_DimmEccEn;
+	}
+	if (read_option(CMOS_VSTART_ECC_memory, CMOS_VLEN_ECC_memory, 1) == 0) {
+		dcl &= ~DCL_DimmEccEn;
+	}
+	pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
+
 	meminfo->is_ecc = 1;
-	if(!(dcl & DCL_DimmEccEn)) {
+	if (!(dcl & DCL_DimmEccEn)) {
 		meminfo->is_ecc = 0;
 		return; // already disabled the ECC, so don't need to read SPD any more
 	}
 
-        for(i = 0; i < DIMM_SOCKETS; i++) {
-		
-                if (!(dimm_mask & (1 << i))) {
-                        continue;
-                }
+	for (i = 0; i < DIMM_SOCKETS; i++) {
 
+		if (!(dimm_mask & (1 << i))) {
+			continue;
+		}
+
 		value = spd_read_byte(ctrl->channel0[i], SPD_DIMM_CONF_TYPE);
 
-		if(!(value & SPD_DIMM_CONF_TYPE_ECC)) {
-	                dcl &= ~DCL_DimmEccEn;
-	                pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
+		if (!(value & SPD_DIMM_CONF_TYPE_ECC)) {
+			dcl &= ~DCL_DimmEccEn;
+			pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
 			meminfo->is_ecc = 0;
 			return;
 		}
 
-        }
+	}
 }
 
-static int update_dimm_Twtr(const struct mem_controller *ctrl, const struct mem_param *param, int i)
+
+static int update_dimm_Twtr(const struct mem_controller *ctrl,
+			     const struct mem_param *param, int i)
 {
 
 	return update_dimm_TT_1_4(ctrl, param, i, DRAM_TIMING_HIGH, SPD_TWTR, DTH_TWTR_SHIFT, DTH_TWTR_MASK, DTH_TWTR_BASE, DTH_TWTR_MIN, DTH_TWTR_MAX);
-
 }
 
-static void set_TT(const struct mem_controller *ctrl, const struct mem_param *param, unsigned TT_REG,
-                unsigned TT_SHIFT, unsigned TT_MASK, unsigned TT_BASE, unsigned TT_MIN, unsigned TT_MAX, unsigned val, const char *str)
+
+static void set_TT(const struct mem_controller *ctrl,
+	const struct mem_param *param, unsigned TT_REG, unsigned TT_SHIFT,
+	unsigned TT_MASK, unsigned TT_BASE, unsigned TT_MIN, unsigned TT_MAX,
+	unsigned val, const char *str)
 {
-        uint32_t reg;
+	uint32_t reg;
 
-        if ((val < TT_MIN) || (val > TT_MAX)) {
-                print_err(str);
-                die(" Unknown\r\n");
-        }
+	if ((val < TT_MIN) || (val > TT_MAX)) {
+		print_err(str);
+		die(" Unknown\r\n");
+	}
 
-        reg = pci_read_config32(ctrl->f2, TT_REG);
-        reg &= ~(TT_MASK << TT_SHIFT);
-        reg |= ((val - TT_BASE) << TT_SHIFT);
-        pci_write_config32(ctrl->f2, TT_REG, reg);
-        return;
+	reg = pci_read_config32(ctrl->f2, TT_REG);
+	reg &= ~(TT_MASK << TT_SHIFT);
+	reg |= ((val - TT_BASE) << TT_SHIFT);
+	pci_write_config32(ctrl->f2, TT_REG, reg);
+	return;
 }
 
-static void set_TrwtTO(const struct mem_controller *ctrl, const struct mem_param *param)
+
+static void set_TrwtTO(const struct mem_controller *ctrl,
+			const struct mem_param *param)
 {
 	set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TRWTTO_SHIFT, DTH_TRWTTO_MASK,DTH_TRWTTO_BASE, DTH_TRWTTO_MIN, DTH_TRWTTO_MAX, param->TrwtTO, "TrwtTO");
 }
 
+
 static void set_Twrrd(const struct mem_controller *ctrl, const struct mem_param *param)
 {
-        set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRRD_SHIFT, DTH_TWRRD_MASK,DTH_TWRRD_BASE, DTH_TWRRD_MIN, DTH_TWRRD_MAX, param->Twrrd, "Twrrd");
+	set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRRD_SHIFT, DTH_TWRRD_MASK,DTH_TWRRD_BASE, DTH_TWRRD_MIN, DTH_TWRRD_MAX, param->Twrrd, "Twrrd");
 }
 
+
 static void set_Twrwr(const struct mem_controller *ctrl, const struct mem_param *param)
 {
-        set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRWR_SHIFT, DTH_TWRWR_MASK,DTH_TWRWR_BASE, DTH_TWRWR_MIN, DTH_TWRWR_MAX, param->Twrwr, "Twrwr");
+	set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TWRWR_SHIFT, DTH_TWRWR_MASK,DTH_TWRWR_BASE, DTH_TWRWR_MIN, DTH_TWRWR_MAX, param->Twrwr, "Twrwr");
 }
 
+
 static void set_Trdrd(const struct mem_controller *ctrl, const struct mem_param *param)
 {
-        set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TRDRD_SHIFT, DTH_TRDRD_MASK,DTH_TRDRD_BASE, DTH_TRDRD_MIN, DTH_TRDRD_MAX, param->Trdrd, "Trdrd");
+	set_TT(ctrl, param, DRAM_TIMING_HIGH, DTH_TRDRD_SHIFT, DTH_TRDRD_MASK,DTH_TRDRD_BASE, DTH_TRDRD_MIN, DTH_TRDRD_MAX, param->Trdrd, "Trdrd");
 }
 
+
 static void set_DcqBypassMax(const struct mem_controller *ctrl, const struct mem_param *param)
 {
-        set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_DcqBypassMax_SHIFT, DCH_DcqBypassMax_MASK,DCH_DcqBypassMax_BASE, DCH_DcqBypassMax_MIN, DCH_DcqBypassMax_MAX, param->DcqByPassMax, "DcqBypassMax"); // value need to be in CMOS
+	set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_DcqBypassMax_SHIFT, DCH_DcqBypassMax_MASK,DCH_DcqBypassMax_BASE, DCH_DcqBypassMax_MIN, DCH_DcqBypassMax_MAX, param->DcqByPassMax, "DcqBypassMax"); // value need to be in CMOS
 }
 
+
 static void set_Tfaw(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
 {
-        static const uint8_t faw_1k[] = {8, 10, 13, 14};
-        static const uint8_t faw_2k[] = {10, 14, 17, 18};
-        unsigned memclkfreq_index;
-        unsigned faw;
+	static const uint8_t faw_1k[] = {8, 10, 13, 14};
+	static const uint8_t faw_2k[] = {10, 14, 17, 18};
+	unsigned memclkfreq_index;
+	unsigned faw;
 
 
-        memclkfreq_index = param->dch_memclk;
+	memclkfreq_index = param->dch_memclk;
 
-        if(meminfo->page_1k_mask != 0) { //1k page
-                faw = faw_1k[memclkfreq_index];
-        }
-        else {
-                faw = faw_2k[memclkfreq_index];
-        }
+	if (meminfo->page_1k_mask != 0) { //1k page
+		faw = faw_1k[memclkfreq_index];
+	} else {
+		faw = faw_2k[memclkfreq_index];
+	}
 
-        set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_FourActWindow_SHIFT, DCH_FourActWindow_MASK, DCH_FourActWindow_BASE, DCH_FourActWindow_MIN, DCH_FourActWindow_MAX, faw, "FourActWindow");
+	set_TT(ctrl, param, DRAM_CONFIG_HIGH, DCH_FourActWindow_SHIFT, DCH_FourActWindow_MASK, DCH_FourActWindow_BASE, DCH_FourActWindow_MIN, DCH_FourActWindow_MAX, faw, "FourActWindow");
 
 }
 
@@ -2203,22 +2326,23 @@
 	dch &= ~(DCH_MaxAsyncLat_MASK << DCH_MaxAsyncLat_SHIFT);
 
 	//FIXME: We need to use Max of DqsRcvEnDelay + 6ns here: After trainning and get that from index reg 0x10, 0x13, 0x16, 0x19, 0x30, 0x33, 0x36, 0x39
-	async_lat = 6+6;
-	
-	
+	async_lat = 6 + 6;
+
+
 	dch |= ((async_lat - DCH_MaxAsyncLat_BASE) << DCH_MaxAsyncLat_SHIFT);
 	pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
 }
 
+
 static void set_SlowAccessMode(const struct mem_controller *ctrl)
 {
-        uint32_t dch;
+	uint32_t dch;
 
-        dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
+	dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
 
-        dch |= (1<<20);
+	dch |= (1<<20);
 
-        pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
+	pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
 }
 
 
@@ -2228,32 +2352,32 @@
 */
 static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info *meminfo)
 {
-        uint32_t dword;
+	uint32_t dword;
 	uint32_t dwordx;
 	unsigned SlowAccessMode = 0;
 
 	long dimm_mask = meminfo->dimm_mask & 0x0f;
 
 #if DIMM_SUPPORT==0x0104   /* DDR2 and REG */
-	/* for REG DIMM */	
-        dword = 0x00111222;
-        dwordx = 0x002f0000;
-        switch (meminfo->memclk_set) {
-        case DCH_MemClkFreq_266MHz:
-                if( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) { 
-                	dwordx = 0x002f2700; 
-                }
-                break;
-        case DCH_MemClkFreq_333MHz:
-	        if( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
-                        if ((meminfo->single_rank_mask & 0x03)!=0x03) { //any double rank there?
-                                dwordx = 0x002f2f00;
-                        }
-                }
-                break;
-        case DCH_MemClkFreq_400MHz:
-                dwordx = 0x002f3300;
-                break;
+	/* for REG DIMM */
+	dword = 0x00111222;
+	dwordx = 0x002f0000;
+	switch (meminfo->memclk_set) {
+	case DCH_MemClkFreq_266MHz:
+		if ( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
+			dwordx = 0x002f2700;
+		}
+		break;
+	case DCH_MemClkFreq_333MHz:
+		if ( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
+			if ((meminfo->single_rank_mask & 0x03)!=0x03) { //any double rank there?
+				dwordx = 0x002f2f00;
+			}
+		}
+		break;
+	case DCH_MemClkFreq_400MHz:
+		dwordx = 0x002f3300;
+		break;
 	}
 
 #endif
@@ -2276,7 +2400,7 @@
 		   && (meminfo->x16_mask == 1))	/* SR x16 */
 			dwordx = 0x00272700;
 		else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0)
-		         && (meminfo->single_rank_mask == 0)) {	/* DR x8 */
+			 && (meminfo->single_rank_mask == 0)) {	/* DR x8 */
 			SlowAccessMode = 1;
 			dwordx = 0x00002800;
 		} else {	/* SR x8, DR x16 */
@@ -2288,7 +2412,7 @@
 		   && (meminfo->x16_mask == 1))	/* SR x16 */
 			dwordx = 0x00292900;
 		else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0)
-		         && (meminfo->single_rank_mask == 0)) {	/* DR x8 */
+			 && (meminfo->single_rank_mask == 0)) {	/* DR x8 */
 			SlowAccessMode = 1;
 			dwordx = 0x00002A00;
 		} else {	/* SR x8, DR x16 */
@@ -2299,21 +2423,21 @@
 #endif
 
 #if DIMM_SUPPORT==0x0004  /* DDR2 and unbuffered */
-        /* for UNBUF DIMM */
-        dword = 0x00111222;
+	/* for UNBUF DIMM */
+	dword = 0x00111222;
 	dwordx = 0x002f2f00;
 	switch (meminfo->memclk_set) {
 	case DCH_MemClkFreq_200MHz:
-		if(dimm_mask == 0x03) {
+		if (dimm_mask == 0x03) {
 			SlowAccessMode = 1;
 			dword = 0x00111322;
 		}
 		break;
 	case DCH_MemClkFreq_266MHz:
-		if(dimm_mask == 0x03) {
+		if (dimm_mask == 0x03) {
 			SlowAccessMode = 1;
 			dword = 0x00111322;
-			if((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
+			if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
 				switch (meminfo->single_rank_mask) {
 				case 0x03:
 					dwordx = 0x00002f00; //x8 single Rank
@@ -2321,18 +2445,17 @@
 				case 0x00:
 					dwordx = 0x00342f00; //x8 double Rank
 					break;
-				default: 
+				default:
 					dwordx = 0x00372f00; //x8 single Rank and double Rank mixed
 				}
-			} else if((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
-                                        dwordx = 0x00382f00; //x8 Double Rank and x16 single Rank mixed
-                        } else if((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
-                                        dwordx = 0x00382f00; //x16 single Rank and x8 double Rank mixed
+			} else if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
+					 dwordx = 0x00382f00; //x8 Double Rank and x16 single Rank mixed
+			 } else if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
+					 dwordx = 0x00382f00; //x16 single Rank and x8 double Rank mixed
 			}
 
-		}
-		else {
-			if((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x00) && ((meminfo->single_rank_mask == 0x01)||(meminfo->single_rank_mask == 0x02)))  { //x8 single rank
+		} else {
+			if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x00) && ((meminfo->single_rank_mask == 0x01)||(meminfo->single_rank_mask == 0x02)))  { //x8 single rank
 				dwordx = 0x002f2f00;
 			} else {
 				dwordx = 0x002b2f00;
@@ -2341,37 +2464,37 @@
 		break;
 	case DCH_MemClkFreq_333MHz:
 		dwordx = 0x00202220;
-                if(dimm_mask == 0x03) {
-                        SlowAccessMode = 1;
-                        dword = 0x00111322;
-                        if((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
-                                switch (meminfo->single_rank_mask) {
-                                case 0x03:
-                                        dwordx = 0x00302220; //x8 single Rank
-                                        break;
-                                case 0x00:
-                                        dwordx = 0x002b2220; //x8 double Rank
-                                        break;
-                                default:
-                                        dwordx = 0x002a2220; //x8 single Rank and double Rank mixed
-                                }
-                        } else if((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
-                                        dwordx = 0x002c2220; //x8 Double Rank and x16 single Rank mixed
-                        } else if((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
-                                        dwordx = 0x002c2220; //x16 single Rank and x8 double Rank mixed
-                        }
-                }
-                break;
+		if (dimm_mask == 0x03) {
+			SlowAccessMode = 1;
+			dword = 0x00111322;
+			if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
+				switch (meminfo->single_rank_mask) {
+				case 0x03:
+					dwordx = 0x00302220; //x8 single Rank
+					break;
+				case 0x00:
+					dwordx = 0x002b2220; //x8 double Rank
+					break;
+				default:
+					dwordx = 0x002a2220; //x8 single Rank and double Rank mixed
+				}
+			} else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
+					dwordx = 0x002c2220; //x8 Double Rank and x16 single Rank mixed
+			} else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
+					dwordx = 0x002c2220; //x16 single Rank and x8 double Rank mixed
+			}
+		}
+		break;
 	case DCH_MemClkFreq_400MHz:
-                dwordx = 0x00202520;
+		dwordx = 0x00202520;
 		SlowAccessMode = 1;
-                if(dimm_mask == 0x03) {
-                        dword = 0x00113322;
-                } else {
-                        dword = 0x00113222;
+		if (dimm_mask == 0x03) {
+			dword = 0x00113322;
+		} else {
+			dword = 0x00113222;
 		}
-                break;
-	} 
+		break;
+	}
 
 	print_raminit("\tdimm_mask = ", meminfo->dimm_mask);
 	print_raminit("\tx4_mask = ", meminfo->x4_mask);
@@ -2382,45 +2505,47 @@
 #endif
 
 #if (DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
-	if(SlowAccessMode) {
+	if (SlowAccessMode) {
 		set_SlowAccessMode(ctrl);
 	}
 #endif
 
-        /* Program the Output Driver Compensation Control Registers (Function 2:Offset 0x9c, index 0, 0x20) */
-        pci_write_config32_index_wait(ctrl->f2, 0x98, 0, dword);
-	if(meminfo->is_Width128) {	
-	        pci_write_config32_index_wait(ctrl->f2, 0x98, 0x20, dword);
+	/* Program the Output Driver Compensation Control Registers (Function 2:Offset 0x9c, index 0, 0x20) */
+	pci_write_config32_index_wait(ctrl->f2, 0x98, 0, dword);
+	if (meminfo->is_Width128) {
+		pci_write_config32_index_wait(ctrl->f2, 0x98, 0x20, dword);
 	}
 
-        /* Program the Address Timing Control Registers (Function 2:Offset 0x9c, index 4, 0x24) */
-        pci_write_config32_index_wait(ctrl->f2, 0x98, 4, dwordx);
-	if(meminfo->is_Width128) {
-	        pci_write_config32_index_wait(ctrl->f2, 0x98, 0x24, dwordx);
+	/* Program the Address Timing Control Registers (Function 2:Offset 0x9c, index 4, 0x24) */
+	pci_write_config32_index_wait(ctrl->f2, 0x98, 4, dwordx);
+	if (meminfo->is_Width128) {
+		pci_write_config32_index_wait(ctrl->f2, 0x98, 0x24, dwordx);
 	}
 
 }
 
 
-static void set_RDqsEn(const struct mem_controller *ctrl, const struct mem_param *param, struct mem_info *meminfo)
+static void set_RDqsEn(const struct mem_controller *ctrl,
+			const struct mem_param *param, struct mem_info *meminfo)
 {
 #if CPU_SOCKET_TYPE==0x10
 	//only need to set for reg and x8
-        uint32_t dch;
+	uint32_t dch;
 
-        dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
+	dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
 
 	dch &= ~DCH_RDqsEn;
-	if((!meminfo->x4_mask) && (!meminfo->x16_mask)) {
-	        dch |= DCH_RDqsEn;
+	if ((!meminfo->x4_mask) && (!meminfo->x16_mask)) {
+		dch |= DCH_RDqsEn;
 	}
 
-       	pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
+	pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
 #endif
 }
 
 
-static void set_idle_cycle_limit(const struct mem_controller *ctrl, const struct mem_param *param)
+static void set_idle_cycle_limit(const struct mem_controller *ctrl,
+				  const struct mem_param *param)
 {
 	uint32_t dcm;
 	/* AMD says to Hardcode this */
@@ -2431,18 +2556,19 @@
 	pci_write_config32(ctrl->f2, DRAM_CTRL_MISC, dcm);
 }
 
-static void set_RdWrQByp(const struct mem_controller *ctrl, const struct mem_param *param)
+
+static void set_RdWrQByp(const struct mem_controller *ctrl,
+			  const struct mem_param *param)
 {
 	set_TT(ctrl, param, DRAM_CTRL_MISC, DCM_RdWrQByp_SHIFT, DCM_RdWrQByp_MASK,0, 0, 3, 2, "RdWrQByp");
 }
 
 
-
 static long spd_set_dram_timing(const struct mem_controller *ctrl, const struct mem_param *param, long dimm_mask, struct mem_info *meminfo)
 {
 	int i;
 
-	for(i = 0; i < DIMM_SOCKETS; i++) {
+	for (i = 0; i < DIMM_SOCKETS; i++) {
 		int rc;
 		if (!(dimm_mask & (1 << i))) {
 			continue;
@@ -2500,7 +2626,7 @@
 	set_Twrrd (ctrl, param);
 	set_Twrwr (ctrl, param);
 	set_Trdrd (ctrl, param);
-        
+
 	set_4RankRDimm(ctrl, param, meminfo);
 
 	/* DRAM Config High */
@@ -2513,7 +2639,7 @@
 	set_ecc(ctrl, param, dimm_mask, meminfo);
 	set_dimm_x4(ctrl, param, meminfo);
 	set_DramTerm(ctrl, param, meminfo);
-	
+
 	/* DRAM Control Misc */
 	set_idle_cycle_limit(ctrl, param);
 	set_RdWrQByp(ctrl, param);
@@ -2521,7 +2647,8 @@
 	return dimm_mask;
 }
 
-static void sdram_set_spd_registers(const struct mem_controller *ctrl, struct sys_info *sysinfo) 
+static void sdram_set_spd_registers(const struct mem_controller *ctrl,
+				     struct sys_info *sysinfo)
 {
 	struct spd_set_memclk_result result;
 	const struct mem_param *param;
@@ -2536,39 +2663,39 @@
 	meminfo = &sysinfo->meminfo[ctrl->node_id];
 
 	print_debug_addr("sdram_set_spd_registers: paramx :", &paramx);
-	
+
 	activate_spd_rom(ctrl);
 	dimm_mask = spd_detect_dimms(ctrl);
 	if (!(dimm_mask & ((1 << DIMM_SOCKETS) - 1))) {
 		print_debug("No memory for this cpu\r\n");
 		return;
 	}
-	dimm_mask = spd_enable_2channels(ctrl, dimm_mask, meminfo);        
-	if (dimm_mask < 0) 
+	dimm_mask = spd_enable_2channels(ctrl, dimm_mask, meminfo);
+	if (dimm_mask < 0)
 		goto hw_spd_err;
-	dimm_mask = spd_set_ram_size(ctrl , dimm_mask, meminfo);           
-	if (dimm_mask < 0) 
+	dimm_mask = spd_set_ram_size(ctrl , dimm_mask, meminfo);
+	if (dimm_mask < 0)
 		goto hw_spd_err;
-	dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask, meminfo); 
-	if (dimm_mask < 0) 
+	dimm_mask = spd_handle_unbuffered_dimms(ctrl, dimm_mask, meminfo);
+	if (dimm_mask < 0)
 		goto hw_spd_err;
 	result = spd_set_memclk(ctrl, dimm_mask, meminfo);
 	param     = result.param;
 	dimm_mask = result.dimm_mask;
-	if (dimm_mask < 0) 
+	if (dimm_mask < 0)
 		goto hw_spd_err;
 
 	//store memclk set to sysinfo, incase we need rebuilt param again
 	meminfo->memclk_set = param->dch_memclk;
 
 	memcpy(&paramx, param, sizeof(paramx));
-	
+
 	paramx.divisor = get_exact_divisor(param->dch_memclk, paramx.divisor);
 
 	dimm_mask = spd_set_dram_timing(ctrl, &paramx , dimm_mask, meminfo); // dimm_mask will be stored to meminfo->dimm_mask
 	if (dimm_mask < 0)
 		goto hw_spd_err;
-	
+
 	order_dimms(ctrl, meminfo);
 
 	return;
@@ -2586,120 +2713,121 @@
 #if HW_MEM_HOLE_SIZEK != 0
 static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
 {
-        int ii;
-        uint32_t carry_over;
-        device_t dev;
-        uint32_t base, limit;
-        uint32_t basek;
-        uint32_t hoist;
+	int ii;
+	uint32_t carry_over;
+	device_t dev;
+	uint32_t base, limit;
+	uint32_t basek;
+	uint32_t hoist;
 	int j;
 
-        carry_over = (4*1024*1024) - hole_startk;
+	carry_over = (4*1024*1024) - hole_startk;
 
-        for(ii=controllers - 1;ii>i;ii--) {
-                base  = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
-                if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
-                        continue;
-                }
+	for (ii=controllers - 1;ii>i;ii--) {
+		base  = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
+		if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
+			continue;
+		}
 		limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
 		limit += (carry_over << 2 );
 		base  += (carry_over << 2 );
-		for(j = 0; j < controllers; j++) {	
-                	pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit);
-                	pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base );
+		for (j = 0; j < controllers; j++) {
+			pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit);
+			pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base );
 		}
-        }
-        limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
+	}
+	limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
 	limit += (carry_over << 2);
-	for(j = 0; j < controllers; j++) {
-	        pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit);
+	for (j = 0; j < controllers; j++) {
+		pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit);
 	}
-        dev = ctrl[i].f1;
-        base  = pci_read_config32(dev, 0x40 + (i << 3));
-        basek  = (base & 0xffff0000) >> 2;
-        if(basek == hole_startk) {
-                //don't need set memhole here, because hole off set will be 0, overflow
-                //so need to change base reg instead, new basek will be 4*1024*1024
-                base &= 0x0000ffff;
-                base |= (4*1024*1024)<<2;
-		for(j = 0; j < controllers; j++) {
-	                pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
+	dev = ctrl[i].f1;
+	base  = pci_read_config32(dev, 0x40 + (i << 3));
+	basek  = (base & 0xffff0000) >> 2;
+	if (basek == hole_startk) {
+		//don't need set memhole here, because hole off set will be 0, overflow
+		//so need to change base reg instead, new basek will be 4*1024*1024
+		base &= 0x0000ffff;
+		base |= (4*1024*1024)<<2;
+		for (j = 0; j < controllers; j++) {
+			pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
 		}
-        }
-        else
-        {
-        	hoist = /* hole start address */
-	                ((hole_startk << 10) & 0xff000000) +
-        	        /* hole address to memory controller address */
-	                (((basek + carry_over) >> 6) & 0x0000ff00) +
-        	        /* enable */
-	                1;
-        	pci_write_config32(dev, 0xf0, hoist);
-	}	
+	}  else  {
+		hoist = /* hole start address */
+			((hole_startk << 10) & 0xff000000) +
+			/* hole address to memory controller address */
+			(((basek + carry_over) >> 6) & 0x0000ff00) +
+			/* enable */
+			1;
+		pci_write_config32(dev, 0xf0, hoist);
+	}
 
-        return carry_over;
+	return carry_over;
 }
 
 static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
 {
 
-        uint32_t hole_startk;
+	uint32_t hole_startk;
 	int i;
 
-        hole_startk = 4*1024*1024 - HW_MEM_HOLE_SIZEK;
+	hole_startk = 4*1024*1024 - HW_MEM_HOLE_SIZEK;
 
 #if HW_MEM_HOLE_SIZE_AUTO_INC == 1
-	//We need to double check if the hole_startk is valid, if it is equal to basek, we need to decrease it some
+	/* We need to double check if the hole_startk is valid, if it is equal
+	   to basek, we need to decrease it some */
 	uint32_t basek_pri;
-        for(i=0; i<controllers; i++) {
-                        uint32_t base;
-                        unsigned base_k;
-                        base  = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
-                        if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
-                                continue;
-                        }
-                        base_k = (base & 0xffff0000) >> 2;
-                        if(base_k == hole_startk) {
-                                hole_startk -= (base_k - basek_pri)>>1; // decrease mem hole startk to make sure it is on middle of previous node
-                                break; //only one hole
-                        }
+	for (i=0; i<controllers; i++) {
+			uint32_t base;
+			unsigned base_k;
+			base  = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
+			if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
+				continue;
+			}
+			base_k = (base & 0xffff0000) >> 2;
+			if (base_k == hole_startk) {
+				/* decrease mem hole startk to make sure it is
+				   on middle of previous node */
+				hole_startk -= (base_k - basek_pri) >> 1;
+				break; //only one hole
+			}
 			basek_pri = base_k;
-        }
+	}
 #endif
-        //find node index that need do set hole
-        for(i=0; i<controllers; i++) {
-                        uint32_t base, limit;
-                        unsigned base_k, limit_k;
-                        base  = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
-                        if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
-                                continue;
-                        }
-                        limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
-                        base_k = (base & 0xffff0000) >> 2;
-                        limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
-                        if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
-                                unsigned end_k;
-                                hoist_memory(controllers, ctrl, hole_startk, i);
-				end_k = memory_end_k(ctrl, controllers);
-                                set_top_mem(end_k, hole_startk);
-				break; //only one hole
-                        }
-        }
+	/* find node index that need do set hole */
+	for (i=0; i < controllers; i++) {
+		uint32_t base, limit;
+		unsigned base_k, limit_k;
+		base  = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
+		if ((base & ((1 << 1) | (1 << 0))) != ((1 << 1) | (1 << 0))) {
+			continue;
+		}
+		limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
+		base_k = (base & 0xffff0000) >> 2;
+		limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
+		if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
+			unsigned end_k;
+			hoist_memory(controllers, ctrl, hole_startk, i);
+			end_k = memory_end_k(ctrl, controllers);
+			set_top_mem(end_k, hole_startk);
+			break; //only one hole
+		}
+	}
 
 }
-
 #endif
 
-static void sdram_enable(int controllers, const struct mem_controller *ctrl, struct sys_info *sysinfo)
+
+static void sdram_enable(int controllers, const struct mem_controller *ctrl,
+			  struct sys_info *sysinfo)
 {
 	int i;
 
-
 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
-        unsigned cpu_f0_f1[8];
+	 unsigned cpu_f0_f1[8];
 	/* FIXME: How about 32 node machine later? */
 	tsc_t tsc, tsc0[8];
-	
+
 	print_debug_addr("sdram_enable: tsc0[8]: ", &tsc0[0]);
 #endif
 	uint32_t dword;
@@ -2710,23 +2838,22 @@
 	}
 
 	/* Before enabling memory start the memory clocks */
-	for(i = 0; i < controllers; i++) {
+	for (i = 0; i < controllers; i++) {
 		uint32_t dtl, dch;
 		if (!sysinfo->ctrl_present[ i ])
 			continue;
-                dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
+		dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
 
-		// if no memory installed, disabled the interface
-		if(sysinfo->meminfo[i].dimm_mask==0x00){
-                        dch |= DCH_DisDramInterface;
-                        pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
+		/* if no memory installed, disabled the interface */
+		if (sysinfo->meminfo[i].dimm_mask==0x00){
+			dch |= DCH_DisDramInterface;
+			pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
 
-                }
-		else {
-                        dch |= DCH_MemClkFreqVal;
-                        pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
-                	/* address timing and Output driver comp Control */
-	                set_misc_timing(ctrl+i, sysinfo->meminfo+i );
+		} else {
+			dch |= DCH_MemClkFreqVal;
+			pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
+			/* address timing and Output driver comp Control */
+			set_misc_timing(ctrl+i, sysinfo->meminfo+i );
 		}
 	}
 
@@ -2734,14 +2861,14 @@
 	memreset(controllers, ctrl);
 #if 0
 	print_debug("prepare to InitDram:");
-	for(i=0; i<10; i++) {
+	for (i=0; i<10; i++) {
 		print_debug_hex32(i);
 		print_debug("\b\b\b\b\b\b\b\b");
 	}
 	print_debug("\r\n");
 #endif
 
-	for(i = 0; i < controllers; i++) {
+	for (i = 0; i < controllers; i++) {
 		uint32_t dcl, dch;
 		if (!sysinfo->ctrl_present[ i ])
 			continue;
@@ -2765,38 +2892,37 @@
 		}
 
 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
-	        cpu_f0_f1[i] = is_cpu_pre_f2_in_bsp(i);
-	        if(cpu_f0_f1[i]) {
+		cpu_f0_f1[i] = is_cpu_pre_f2_in_bsp(i);
+		if (cpu_f0_f1[i]) {
 			//Rev F0/F1 workaround
 #if 1
-			        /* Set the DqsRcvEnTrain bit */
-		        dword = pci_read_config32(ctrl[i].f2, DRAM_CTRL);
-		        dword |= DC_DqsRcvEnTrain;
-		        pci_write_config32(ctrl[i].f2, DRAM_CTRL, dword);
+				/* Set the DqsRcvEnTrain bit */
+			dword = pci_read_config32(ctrl[i].f2, DRAM_CTRL);
+			dword |= DC_DqsRcvEnTrain;
+			pci_write_config32(ctrl[i].f2, DRAM_CTRL, dword);
 #endif
-			tsc0[i] = rdtsc();			
+			tsc0[i] = rdtsc();
 		}
 #endif
 
 #if 0
-                               /* Set the DqsRcvEnTrain bit */
-                        dword = pci_read_config32(ctrl[i].f2, DRAM_CTRL);
-                        dword |= DC_DqsRcvEnTrain;
-                        pci_write_config32(ctrl[i].f2, DRAM_CTRL, dword);
+		/* Set the DqsRcvEnTrain bit */
+		dword = pci_read_config32(ctrl[i].f2, DRAM_CTRL);
+		dword |= DC_DqsRcvEnTrain;
+		pci_write_config32(ctrl[i].f2, DRAM_CTRL, dword);
 #endif
 
 		pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
 		dcl |= DCL_InitDram;
 		pci_write_config32(ctrl[i].f2, DRAM_CONFIG_LOW, dcl);
-
 	}
 
-	for(i = 0; i < controllers; i++) {
+	for (i = 0; i < controllers; i++) {
 		uint32_t dcl, dch, dcm;
 		if (!sysinfo->ctrl_present[ i ])
 			continue;
 		/* Skip everything if I don't have any memory on this controller */
-		if(sysinfo->meminfo[i].dimm_mask==0x00) continue;
+		if (sysinfo->meminfo[i].dimm_mask==0x00) continue;
 
 		print_debug("Initializing memory: ");
 		int loops = 0;
@@ -2818,59 +2944,59 @@
 		} while(((dcm & DCM_MemClrStatus) == 0) /* || ((dcm & DCM_DramEnabled) == 0)*/ );
 
 #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
-		if(cpu_f0_f1[i]) {
-	                tsc= rdtsc();
+		if (cpu_f0_f1[i]) {
+			tsc= rdtsc();
 
-			print_debug_dqs_tsc("\r\nbegin tsc0", i, tsc0[i].hi, tsc0[i].lo, 2); 
-			print_debug_dqs_tsc("end   tsc ", i, tsc.hi, tsc.lo, 2); 
+			print_debug_dqs_tsc("\r\nbegin tsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
+			print_debug_dqs_tsc("end   tsc ", i, tsc.hi, tsc.lo, 2);
 
-        	        if(tsc.lo<tsc0[i].lo) {
-                	        tsc.hi--;
-	                }
-        	        tsc.lo -= tsc0[i].lo;
-	                tsc.hi -= tsc0[i].hi;
+			if (tsc.lo<tsc0[i].lo) {
+				tsc.hi--;
+			}
+			tsc.lo -= tsc0[i].lo;
+			tsc.hi -= tsc0[i].hi;
 
-        	        tsc0[i].lo = tsc.lo;
-	                tsc0[i].hi = tsc.hi;
+			tsc0[i].lo = tsc.lo;
+			tsc0[i].hi = tsc.hi;
 
-        	        print_debug_dqs_tsc("     dtsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
+			print_debug_dqs_tsc("     dtsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
 		}
 #endif
 		print_debug(" done\r\n");
 	}
 
 #if HW_MEM_HOLE_SIZEK != 0
-	 // init hw mem hole here
+	/* init hw mem hole here */
 	/* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
 	set_hw_mem_hole(controllers, ctrl);
 #endif
-	
-        //store tom to sysinfo, and it will be used by dqs_timing
-        {
-                msr_t msr;
-                //[1M, TOM)
-                msr = rdmsr(TOP_MEM);
-                sysinfo->tom_k = ((msr.hi<<24) | (msr.lo>>8))>>2;
 
-                //[4G, TOM2)
-                msr = rdmsr(TOP_MEM2);
-                sysinfo->tom2_k = ((msr.hi<<24)| (msr.lo>>8))>>2;
-        }
+	/* store tom to sysinfo, and it will be used by dqs_timing */
+	{
+		msr_t msr;
+		//[1M, TOM)
+		msr = rdmsr(TOP_MEM);
+		sysinfo->tom_k = ((msr.hi<<24) | (msr.lo>>8))>>2;
 
-        for(i = 0; i < controllers; i++) {
-                sysinfo->mem_trained[i] = 0;
+		//[4G, TOM2)
+		msr = rdmsr(TOP_MEM2);
+		sysinfo->tom2_k = ((msr.hi<<24)| (msr.lo>>8))>>2;
+	}
 
-                if (!sysinfo->ctrl_present[ i ])
-                        continue;
+	for (i = 0; i < controllers; i++) {
+		sysinfo->mem_trained[i] = 0;
 
-                /* Skip everything if I don't have any memory on this controller */
-                if(sysinfo->meminfo[i].dimm_mask==0x00) 
+		if (!sysinfo->ctrl_present[ i ])
 			continue;
 
-                sysinfo->mem_trained[i] = 0x80; // mem need to be trained
-        }
+		/* Skip everything if I don't have any memory on this controller */
+		if (sysinfo->meminfo[i].dimm_mask==0x00)
+			continue;
 
+		sysinfo->mem_trained[i] = 0x80; // mem need to be trained
+	}
 
+
 #if MEM_TRAIN_SEQ ==  0
    #if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
 	dqs_timing(controllers, ctrl, tsc0, sysinfo);
@@ -2879,26 +3005,26 @@
    #endif
 #else
 
-   #if MEM_TRAIN_SEQ == 2
-           //need to enable mtrr, so dqs training could access the test address
-        setup_mtrr_dqs(sysinfo->tom_k, sysinfo->tom2_k);
-   #endif
+#if MEM_TRAIN_SEQ == 2
+	/* need to enable mtrr, so dqs training could access the test address  */
+	setup_mtrr_dqs(sysinfo->tom_k, sysinfo->tom2_k);
+#endif
 
-        for(i = 0; i < controllers; i++) {
-                /* Skip everything if I don't have any memory on this controller */
-                if(sysinfo->mem_trained[i]!=0x80) 
+	for (i = 0; i < controllers; i++) {
+		/* Skip everything if I don't have any memory on this controller */
+		if (sysinfo->mem_trained[i]!=0x80)
 			continue;
 
-                dqs_timing(i, &ctrl[i], sysinfo, 1);
+		dqs_timing(i, &ctrl[i], sysinfo, 1);
 
-   #if MEM_TRAIN_SEQ == 1
-                break; // only train the first node with ram
-   #endif
-        }
+#if MEM_TRAIN_SEQ == 1
+		break; // only train the first node with ram
+#endif
+	}
 
-   #if MEM_TRAIN_SEQ == 2
-        clear_mtrr_dqs(sysinfo->tom2_k);
-   #endif
+#if MEM_TRAIN_SEQ == 2
+	clear_mtrr_dqs(sysinfo->tom2_k);
+#endif
 
 #endif
 
@@ -2907,24 +3033,27 @@
 #endif
 
 }
-static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a, const uint16_t *spd_addr)
+
+
+static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
+			   const uint16_t *spd_addr)
 {
-	int i; 
+	int i;
 	int j;
 	struct mem_controller *ctrl;
-        for(i=0;i<controllers; i++) {
-                ctrl = &ctrl_a[i];
-                ctrl->node_id = i;
-                ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
-                ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
-                ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
-                ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
+	for (i=0;i<controllers; i++) {
+		ctrl = &ctrl_a[i];
+		ctrl->node_id = i;
+		ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
+		ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
+		ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
+		ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
 
-		if(spd_addr == (void *)0) continue;
+		if (spd_addr == (void *)0) continue;
 
-                for(j=0;j<DIMM_SOCKETS;j++) {
-                        ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
-                        ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];
-                }
-        }
+		for (j=0;j<DIMM_SOCKETS;j++) {
+			ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
+			ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];
+		}
+	}
 }





More information about the coreboot mailing list